From a3bde921bafcd7a790c59b2753aa90839afa6ee7 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Wed, 9 Aug 2023 18:28:48 +0200 Subject: [PATCH 01/27] Companion for #14412 (#7547) * Companion for 14412 * update lockfile for {"substrate"} * Trigger CI --------- Co-authored-by: parity-processbot <> --- Cargo.lock | 652 +++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 452 insertions(+), 200 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 86284315686c..2ef64180f1dd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -246,6 +246,164 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29d47fbf90d5149a107494b15a7dc8d69b351be2db3bb9691740e88ec17fd880" +[[package]] +name = "ark-bls12-381" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c775f0d12169cba7aae4caeb547bb6a50781c7449a8aa53793827c9ec4abf488" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-serialize", + "ark-std", +] + +[[package]] +name = "ark-ec" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defd9a439d56ac24968cca0571f598a61bc8c55f71d50a89cda591cb750670ba" +dependencies = [ + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "derivative", + "hashbrown 0.13.2", + "itertools", + "num-traits", + "zeroize", +] + +[[package]] +name = "ark-ed-on-bls12-381-bandersnatch" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9cde0f2aa063a2a5c28d39b47761aa102bda7c13c84fc118a61b87c7b2f785c" +dependencies = [ + "ark-bls12-381", + "ark-ec", + "ark-ff", + "ark-std", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm", + "ark-ff-macros", + "ark-serialize", + "ark-std", + "derivative", + "digest 0.10.7", + "itertools", + "num-bigint", + "num-traits", + "paste", + "rustc_version", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-poly" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d320bfc44ee185d899ccbadfa8bc31aab923ce1558716e1997a1e74057fe86bf" +dependencies = [ + "ark-ff", + "ark-serialize", + "ark-std", + "derivative", + "hashbrown 0.13.2", +] + +[[package]] +name = "ark-secret-scalar" +version = "0.0.2" +source = "git+https://github.com/w3f/ring-vrf?rev=c86ebd4#c86ebd4114d3165d05f9ce28c1d9e8d7a9a4e801" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-serialize", + "ark-std", + "ark-transcript", + "digest 0.10.7", + "rand_core 0.6.4", + "zeroize", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-serialize-derive", + "ark-std", + "digest 0.10.7", + "num-bigint", +] + +[[package]] +name = "ark-serialize-derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-transcript" +version = "0.0.2" +source = "git+https://github.com/w3f/ring-vrf?rev=c86ebd4#c86ebd4114d3165d05f9ce28c1d9e8d7a9a4e801" +dependencies = [ + "ark-ff", + "ark-serialize", + "ark-std", + "digest 0.10.7", + "rand_core 0.6.4", + "sha3", +] + [[package]] name = "array-bytes" version = "6.1.0" @@ -396,6 +554,27 @@ dependencies = [ "rustc-demangle", ] +[[package]] +name = "bandersnatch_vrfs" +version = "0.0.1" +source = "git+https://github.com/w3f/ring-vrf?rev=c86ebd4#c86ebd4114d3165d05f9ce28c1d9e8d7a9a4e801" +dependencies = [ + "ark-bls12-381", + "ark-ec", + "ark-ed-on-bls12-381-bandersnatch", + "ark-ff", + "ark-serialize", + "ark-std", + "dleq_vrf", + "fflonk", + "merlin 3.0.0", + "rand_chacha 0.3.1", + "rand_core 0.6.4", + "ring 0.1.0", + "sha2 0.10.7", + "zeroize", +] + [[package]] name = "base-x" version = "0.2.8" @@ -438,7 +617,7 @@ dependencies = [ [[package]] name = "binary-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "hash-db", "log", @@ -1012,6 +1191,20 @@ dependencies = [ "unicode-width", ] +[[package]] +name = "common" +version = "0.1.0" +source = "git+https://github.com/w3f/ring-proof#0e948f3c28cbacecdd3020403c4841c0eb339213" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "fflonk", + "merlin 3.0.0", +] + [[package]] name = "common-path" version = "1.0.0" @@ -1685,6 +1878,22 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "31ad93652f40969dead8d4bf897a41e9462095152eb21c56e5830537e41179dd" +[[package]] +name = "dleq_vrf" +version = "0.0.2" +source = "git+https://github.com/w3f/ring-vrf?rev=c86ebd4#c86ebd4114d3165d05f9ce28c1d9e8d7a9a4e801" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-secret-scalar", + "ark-serialize", + "ark-std", + "ark-transcript", + "arrayvec 0.7.4", + "rand_core 0.6.4", + "zeroize", +] + [[package]] name = "dlmalloc" version = "0.2.4" @@ -1702,18 +1911,18 @@ checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" [[package]] name = "docify" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6491709f76fb7ceb951244daf624d480198b427556084391d6e3c33d3ae74b9" +checksum = "029de870d175d11969524d91a3fb2cbf6d488b853bff99d41cf65e533ac7d9d2" dependencies = [ "docify_macros", ] [[package]] name = "docify_macros" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc5338a9f72ce29a81377d9039798fcc926fb471b2004666caf48e446dffbbf" +checksum = "cac43324656a1b05eb0186deb51f27d2d891c704c37f34de281ef6297ba193e5" dependencies = [ "common-path", "derive-syn-parse", @@ -1723,6 +1932,7 @@ dependencies = [ "regex", "syn 2.0.20", "termcolor", + "toml 0.7.3", "walkdir", ] @@ -2131,6 +2341,19 @@ dependencies = [ "subtle", ] +[[package]] +name = "fflonk" +version = "0.1.0" +source = "git+https://github.com/w3f/fflonk#26a5045b24e169cffc1f9328ca83d71061145c40" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "merlin 3.0.0", +] + [[package]] name = "fiat-crypto" version = "0.1.20" @@ -2223,7 +2446,7 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", ] @@ -2246,7 +2469,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-support", "frame-support-procedural", @@ -2271,7 +2494,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "Inflector", "array-bytes", @@ -2319,7 +2542,7 @@ dependencies = [ [[package]] name = "frame-election-provider-solution-type" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2330,7 +2553,7 @@ dependencies = [ [[package]] name = "frame-election-provider-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-election-provider-solution-type", "frame-support", @@ -2347,7 +2570,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-support", "frame-system", @@ -2376,7 +2599,7 @@ dependencies = [ [[package]] name = "frame-remote-externalities" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-recursion", "futures", @@ -2397,7 +2620,7 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "aquamarine", "bitflags", @@ -2434,7 +2657,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "Inflector", "cfg-expr", @@ -2452,7 +2675,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", @@ -2464,7 +2687,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "proc-macro2", "quote", @@ -2474,7 +2697,7 @@ dependencies = [ [[package]] name = "frame-support-test" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-executive", @@ -2501,7 +2724,7 @@ dependencies = [ [[package]] name = "frame-support-test-pallet" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-support", "frame-system", @@ -2514,7 +2737,7 @@ dependencies = [ [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "cfg-if", "frame-support", @@ -2533,7 +2756,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -2548,7 +2771,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "sp-api", @@ -2557,7 +2780,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-support", "parity-scale-codec", @@ -2739,7 +2962,7 @@ dependencies = [ [[package]] name = "generate-bags" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "chrono", "frame-election-provider-support", @@ -4548,6 +4771,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "merlin" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.6.4", + "zeroize", +] + [[package]] name = "mick-jaeger" version = "0.1.8" @@ -4594,7 +4829,7 @@ dependencies = [ [[package]] name = "mmr-gadget" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "futures", "log", @@ -4613,7 +4848,7 @@ dependencies = [ [[package]] name = "mmr-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "anyhow", "jsonrpsee", @@ -5139,7 +5374,7 @@ dependencies = [ [[package]] name = "pallet-assets" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5154,7 +5389,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-support", "frame-system", @@ -5170,7 +5405,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-support", "frame-system", @@ -5184,7 +5419,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5208,7 +5443,7 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5228,7 +5463,7 @@ dependencies = [ [[package]] name = "pallet-bags-list-remote-tests" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-election-provider-support", "frame-remote-externalities", @@ -5247,7 +5482,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5262,7 +5497,7 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-support", "frame-system", @@ -5281,7 +5516,7 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "array-bytes", "binary-merkle-tree", @@ -5305,7 +5540,7 @@ dependencies = [ [[package]] name = "pallet-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5323,7 +5558,7 @@ dependencies = [ [[package]] name = "pallet-child-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5342,7 +5577,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5359,7 +5594,7 @@ dependencies = [ [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "assert_matches", "frame-benchmarking", @@ -5376,7 +5611,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5394,7 +5629,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5417,7 +5652,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-support-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5430,7 +5665,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5449,7 +5684,7 @@ dependencies = [ [[package]] name = "pallet-fast-unstake" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "docify", "frame-benchmarking", @@ -5468,7 +5703,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5491,7 +5726,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "enumflags2", "frame-benchmarking", @@ -5507,7 +5742,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5527,7 +5762,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5544,7 +5779,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5561,7 +5796,7 @@ dependencies = [ [[package]] name = "pallet-message-queue" version = "7.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5580,7 +5815,7 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5597,7 +5832,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5613,7 +5848,7 @@ dependencies = [ [[package]] name = "pallet-nis" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5629,7 +5864,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-support", "frame-system", @@ -5648,7 +5883,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-benchmarking" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5668,7 +5903,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-runtime-api" version = "1.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "pallet-nomination-pools", "parity-scale-codec", @@ -5679,7 +5914,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-support", "frame-system", @@ -5696,7 +5931,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5720,7 +5955,7 @@ dependencies = [ [[package]] name = "pallet-preimage" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5737,7 +5972,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5752,7 +5987,7 @@ dependencies = [ [[package]] name = "pallet-ranked-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5770,7 +6005,7 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5785,7 +6020,7 @@ dependencies = [ [[package]] name = "pallet-referenda" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "assert_matches", "frame-benchmarking", @@ -5804,7 +6039,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5821,7 +6056,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-support", "frame-system", @@ -5842,7 +6077,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5858,12 +6093,11 @@ dependencies = [ [[package]] name = "pallet-society" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "hex-literal 0.3.4", "log", "parity-scale-codec", "rand_chacha 0.2.2", @@ -5877,7 +6111,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5900,7 +6134,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -5911,7 +6145,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-fn" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "log", "sp-arithmetic", @@ -5920,7 +6154,7 @@ dependencies = [ [[package]] name = "pallet-staking-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "sp-api", @@ -5929,7 +6163,7 @@ dependencies = [ [[package]] name = "pallet-state-trie-migration" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5946,7 +6180,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5961,7 +6195,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5979,7 +6213,7 @@ dependencies = [ [[package]] name = "pallet-tips" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -5998,7 +6232,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-support", "frame-system", @@ -6014,7 +6248,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -6030,7 +6264,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -6042,7 +6276,7 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -6059,7 +6293,7 @@ dependencies = [ [[package]] name = "pallet-uniques" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -6074,7 +6308,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -6090,7 +6324,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -6105,7 +6339,7 @@ dependencies = [ [[package]] name = "pallet-whitelist" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-benchmarking", "frame-support", @@ -6764,7 +6998,7 @@ dependencies = [ "kvdb", "kvdb-memorydb", "lru 0.11.0", - "merlin", + "merlin 2.0.1", "parity-scale-codec", "parking_lot 0.12.1", "polkadot-node-jaeger", @@ -8762,6 +8996,21 @@ dependencies = [ "subtle", ] +[[package]] +name = "ring" +version = "0.1.0" +source = "git+https://github.com/w3f/ring-proof#0e948f3c28cbacecdd3020403c4841c0eb339213" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "common", + "fflonk", + "merlin 3.0.0", +] + [[package]] name = "ring" version = "0.16.20" @@ -9010,7 +9259,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "539a2bfe908f471bfa933876bd1eb6a19cf2176d375f82ef7f99530a40e48c2c" dependencies = [ "log", - "ring", + "ring 0.16.20", "sct", "webpki", ] @@ -9022,7 +9271,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e32ca28af694bc1bbf399c33a516dbdf1c90090b8ab23c2bc24f834aa2247f5f" dependencies = [ "log", - "ring", + "ring 0.16.20", "rustls-webpki", "sct", ] @@ -9063,7 +9312,7 @@ version = "0.100.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" dependencies = [ - "ring", + "ring 0.16.20", "untrusted", ] @@ -9111,7 +9360,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "log", "sp-core", @@ -9122,7 +9371,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "futures", @@ -9150,7 +9399,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "futures", "futures-timer", @@ -9173,7 +9422,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -9188,7 +9437,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "memmap2", "sc-chain-spec-derive", @@ -9207,7 +9456,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -9218,7 +9467,7 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "array-bytes", "chrono", @@ -9257,7 +9506,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "fnv", "futures", @@ -9283,7 +9532,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "hash-db", "kvdb", @@ -9309,7 +9558,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "futures", @@ -9334,7 +9583,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "fork-tree", @@ -9370,7 +9619,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "futures", "jsonrpsee", @@ -9392,7 +9641,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "array-bytes", "async-channel", @@ -9426,7 +9675,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "futures", "jsonrpsee", @@ -9445,7 +9694,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "fork-tree", "parity-scale-codec", @@ -9458,7 +9707,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "ahash 0.8.2", "array-bytes", @@ -9499,7 +9748,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "finality-grandpa", "futures", @@ -9519,7 +9768,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "futures", @@ -9542,7 +9791,7 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -9564,7 +9813,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "sc-allocator", "sp-maybe-compressed-blob", @@ -9576,7 +9825,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "anyhow", "cfg-if", @@ -9593,7 +9842,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "ansi_term", "futures", @@ -9609,7 +9858,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "array-bytes", "parking_lot 0.12.1", @@ -9623,7 +9872,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "array-bytes", "async-channel", @@ -9666,7 +9915,7 @@ dependencies = [ [[package]] name = "sc-network-bitswap" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-channel", "cid", @@ -9686,7 +9935,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "bitflags", @@ -9703,7 +9952,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "ahash 0.8.2", "futures", @@ -9722,7 +9971,7 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "array-bytes", "async-channel", @@ -9743,7 +9992,7 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "array-bytes", "async-channel", @@ -9777,7 +10026,7 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "array-bytes", "futures", @@ -9795,7 +10044,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "array-bytes", "bytes", @@ -9829,7 +10078,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -9838,7 +10087,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "futures", "jsonrpsee", @@ -9869,7 +10118,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -9888,7 +10137,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "http", "jsonrpsee", @@ -9903,7 +10152,7 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "array-bytes", "futures", @@ -9916,6 +10165,7 @@ dependencies = [ "sc-chain-spec", "sc-client-api", "sc-transaction-pool-api", + "sc-utils", "serde", "sp-api", "sp-blockchain", @@ -9929,7 +10179,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "directories", @@ -9993,7 +10243,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "log", "parity-scale-codec", @@ -10004,7 +10254,7 @@ dependencies = [ [[package]] name = "sc-storage-monitor" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "clap 4.2.5", "fs4", @@ -10018,7 +10268,7 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -10037,7 +10287,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "6.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "futures", "libc", @@ -10056,7 +10306,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "chrono", "futures", @@ -10075,7 +10325,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "ansi_term", "atty", @@ -10104,7 +10354,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -10115,7 +10365,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "futures", @@ -10141,7 +10391,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "futures", @@ -10157,7 +10407,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-channel", "futures", @@ -10226,7 +10476,7 @@ dependencies = [ "arrayvec 0.5.2", "curve25519-dalek 2.1.3", "getrandom 0.1.16", - "merlin", + "merlin 2.0.1", "rand 0.7.3", "rand_core 0.5.1", "sha2 0.8.2", @@ -10252,7 +10502,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ - "ring", + "ring 0.16.20", "untrusted", ] @@ -10516,9 +10766,9 @@ dependencies = [ [[package]] name = "sha3" -version = "0.10.0" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f935e31cf406e8c0e96c2815a5516181b7004ae8c5f296293221e9b1e356bd" +checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" dependencies = [ "digest 0.10.7", "keccak", @@ -10660,7 +10910,7 @@ dependencies = [ "chacha20poly1305", "curve25519-dalek 4.0.0-rc.1", "rand_core 0.6.4", - "ring", + "ring 0.16.20", "rustc_version", "sha2 0.10.7", "subtle", @@ -10705,7 +10955,7 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "hash-db", "log", @@ -10726,7 +10976,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "Inflector", "blake2", @@ -10740,7 +10990,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "scale-info", @@ -10753,7 +11003,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "16.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "integer-sqrt", "num-traits", @@ -10767,7 +11017,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "scale-info", @@ -10780,7 +11030,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "sp-api", "sp-inherents", @@ -10791,7 +11041,7 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "futures", "log", @@ -10809,7 +11059,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "futures", @@ -10824,7 +11074,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "parity-scale-codec", @@ -10841,7 +11091,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "parity-scale-codec", @@ -10860,7 +11110,7 @@ dependencies = [ [[package]] name = "sp-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "lazy_static", "parity-scale-codec", @@ -10879,7 +11129,7 @@ dependencies = [ [[package]] name = "sp-consensus-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "finality-grandpa", "log", @@ -10897,7 +11147,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "scale-info", @@ -10909,9 +11159,11 @@ dependencies = [ [[package]] name = "sp-core" version = "21.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "array-bytes", + "arrayvec 0.7.4", + "bandersnatch_vrfs", "bitflags", "blake2", "bounded-collections", @@ -10925,7 +11177,7 @@ dependencies = [ "lazy_static", "libsecp256k1", "log", - "merlin", + "merlin 2.0.1", "parity-scale-codec", "parking_lot 0.12.1", "paste", @@ -10954,7 +11206,7 @@ dependencies = [ [[package]] name = "sp-core-hashing" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "blake2b_simd", "byteorder", @@ -10967,7 +11219,7 @@ dependencies = [ [[package]] name = "sp-core-hashing-proc-macro" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "quote", "sp-core-hashing", @@ -10977,7 +11229,7 @@ dependencies = [ [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "kvdb", "parking_lot 0.12.1", @@ -10986,7 +11238,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "proc-macro2", "quote", @@ -10996,7 +11248,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.19.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "environmental", "parity-scale-codec", @@ -11007,7 +11259,7 @@ dependencies = [ [[package]] name = "sp-genesis-builder" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "serde_json", "sp-api", @@ -11018,7 +11270,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -11032,7 +11284,7 @@ dependencies = [ [[package]] name = "sp-io" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "bytes", "ed25519", @@ -11057,7 +11309,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "lazy_static", "sp-core", @@ -11068,7 +11320,7 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.27.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -11080,7 +11332,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "thiserror", "zstd 0.12.3+zstd.1.5.2", @@ -11089,7 +11341,7 @@ dependencies = [ [[package]] name = "sp-metadata-ir" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-metadata", "parity-scale-codec", @@ -11100,7 +11352,7 @@ dependencies = [ [[package]] name = "sp-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "ckb-merkle-mountain-range", "log", @@ -11118,7 +11370,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "scale-info", @@ -11132,7 +11384,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "sp-api", "sp-core", @@ -11142,7 +11394,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "backtrace", "lazy_static", @@ -11152,7 +11404,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "rustc-hash", "serde", @@ -11162,7 +11414,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "either", "hash256-std-hasher", @@ -11184,7 +11436,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "17.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -11202,7 +11454,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "Inflector", "proc-macro-crate", @@ -11214,7 +11466,7 @@ dependencies = [ [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "scale-info", @@ -11229,7 +11481,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -11243,7 +11495,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.28.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "hash-db", "log", @@ -11264,7 +11516,7 @@ dependencies = [ [[package]] name = "sp-statement-store" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "aes-gcm 0.10.2", "curve25519-dalek 3.2.0", @@ -11288,12 +11540,12 @@ dependencies = [ [[package]] name = "sp-std" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" [[package]] name = "sp-storage" version = "13.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "impl-serde", "parity-scale-codec", @@ -11306,7 +11558,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "parity-scale-codec", @@ -11319,7 +11571,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "10.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "sp-std", @@ -11331,7 +11583,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "sp-api", "sp-runtime", @@ -11340,7 +11592,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "parity-scale-codec", @@ -11355,7 +11607,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "ahash 0.8.2", "hash-db", @@ -11378,7 +11630,7 @@ dependencies = [ [[package]] name = "sp-version" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "impl-serde", "parity-scale-codec", @@ -11395,7 +11647,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "proc-macro2", @@ -11406,7 +11658,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "14.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -11419,7 +11671,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "20.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "parity-scale-codec", "scale-info", @@ -11644,12 +11896,12 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "frame-system-rpc-runtime-api", "futures", @@ -11668,7 +11920,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "hyper", "log", @@ -11680,7 +11932,7 @@ dependencies = [ [[package]] name = "substrate-rpc-client" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "jsonrpsee", @@ -11693,7 +11945,7 @@ dependencies = [ [[package]] name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -11710,7 +11962,7 @@ dependencies = [ [[package]] name = "substrate-test-client" version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "array-bytes", "async-trait", @@ -11736,7 +11988,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "futures", "substrate-test-utils-derive", @@ -11746,7 +11998,7 @@ dependencies = [ [[package]] name = "substrate-test-utils-derive" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -11757,7 +12009,7 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "ansi_term", "build-helper", @@ -12627,7 +12879,7 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "try-runtime-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e966352d118a745678f720ae85e617d054dc8165" +source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" dependencies = [ "async-trait", "clap 4.2.5", @@ -13304,7 +13556,7 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" dependencies = [ - "ring", + "ring 0.16.20", "untrusted", ] From fe3c92333b712a9a40315c38601c50bce0d794eb Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Thu, 10 Aug 2023 15:29:43 +1000 Subject: [PATCH 02/27] Remove unused code in runtime/polkadot/src/lib.rs (#7540) * remove SetStorageVersions runtime upgrade * remove unused imports --- runtime/polkadot/src/lib.rs | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/runtime/polkadot/src/lib.rs b/runtime/polkadot/src/lib.rs index 585e48dd5a4b..ac031671a4e6 100644 --- a/runtime/polkadot/src/lib.rs +++ b/runtime/polkadot/src/lib.rs @@ -1488,35 +1488,12 @@ pub type Migrations = migrations::Unreleased; #[allow(deprecated, missing_docs)] pub mod migrations { use super::*; - use frame_support::traits::{GetStorageVersion, OnRuntimeUpgrade, StorageVersion}; /// Unreleased migrations. Add new ones here: pub type Unreleased = ( pallet_im_online::migration::v1::Migration, parachains_configuration::migration::v7::MigrateToV7, ); - - /// Migrations that set `StorageVersion`s we missed to set. - pub struct SetStorageVersions; - - impl OnRuntimeUpgrade for SetStorageVersions { - fn on_runtime_upgrade() -> Weight { - // `Referenda` pallet was added on chain after the migration to version `1` was added. - // Thus, it never required the migration and we just missed to set the correct `StorageVersion`. - let storage_version = Referenda::on_chain_storage_version(); - if storage_version < 1 { - StorageVersion::new(1).put::(); - } - - // Was missed as part of: `runtime_common::session::migration::ClearOldSessionStorage`. - let storage_version = Historical::on_chain_storage_version(); - if storage_version < 1 { - StorageVersion::new(1).put::(); - } - - RocksDbWeight::get().reads_writes(2, 2) - } - } } /// Unchecked extrinsic type as expected by this runtime. From 95c77cc51fa1b95b31395524f33ec1f6011aef52 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A7alo=20Pestana?= Date: Thu, 10 Aug 2023 10:42:43 +0200 Subject: [PATCH 03/27] Companion for substrate#12970 (#6807) * Runtime companion changes * updates runtime configs * Fixes runtime-test runtime configs * Uses ElectionBounds and builder from own mod * updates new bounds mod * Fixes test-runtime mock * update lockfile for {"substrate"} --------- Co-authored-by: parity-processbot <> --- Cargo.lock | 368 ++++++++++++++++---------------- runtime/kusama/src/lib.rs | 20 +- runtime/polkadot/src/lib.rs | 21 +- runtime/test-runtime/src/lib.rs | 17 +- runtime/westend/src/lib.rs | 19 +- 5 files changed, 225 insertions(+), 220 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2ef64180f1dd..1ad880641445 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -617,7 +617,7 @@ dependencies = [ [[package]] name = "binary-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "hash-db", "log", @@ -2446,7 +2446,7 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", ] @@ -2469,7 +2469,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-support", "frame-support-procedural", @@ -2494,7 +2494,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "Inflector", "array-bytes", @@ -2542,7 +2542,7 @@ dependencies = [ [[package]] name = "frame-election-provider-solution-type" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2553,7 +2553,7 @@ dependencies = [ [[package]] name = "frame-election-provider-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-election-provider-solution-type", "frame-support", @@ -2570,7 +2570,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-support", "frame-system", @@ -2599,7 +2599,7 @@ dependencies = [ [[package]] name = "frame-remote-externalities" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-recursion", "futures", @@ -2620,7 +2620,7 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "aquamarine", "bitflags", @@ -2657,7 +2657,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "Inflector", "cfg-expr", @@ -2675,7 +2675,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", @@ -2687,7 +2687,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "proc-macro2", "quote", @@ -2697,7 +2697,7 @@ dependencies = [ [[package]] name = "frame-support-test" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-executive", @@ -2724,7 +2724,7 @@ dependencies = [ [[package]] name = "frame-support-test-pallet" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-support", "frame-system", @@ -2737,7 +2737,7 @@ dependencies = [ [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "cfg-if", "frame-support", @@ -2756,7 +2756,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -2771,7 +2771,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "sp-api", @@ -2780,7 +2780,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-support", "parity-scale-codec", @@ -2962,7 +2962,7 @@ dependencies = [ [[package]] name = "generate-bags" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "chrono", "frame-election-provider-support", @@ -4829,7 +4829,7 @@ dependencies = [ [[package]] name = "mmr-gadget" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "futures", "log", @@ -4848,7 +4848,7 @@ dependencies = [ [[package]] name = "mmr-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "anyhow", "jsonrpsee", @@ -5374,7 +5374,7 @@ dependencies = [ [[package]] name = "pallet-assets" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5389,7 +5389,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-support", "frame-system", @@ -5405,7 +5405,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-support", "frame-system", @@ -5419,7 +5419,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5443,7 +5443,7 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5463,7 +5463,7 @@ dependencies = [ [[package]] name = "pallet-bags-list-remote-tests" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-election-provider-support", "frame-remote-externalities", @@ -5482,7 +5482,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5497,7 +5497,7 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-support", "frame-system", @@ -5516,7 +5516,7 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "array-bytes", "binary-merkle-tree", @@ -5540,7 +5540,7 @@ dependencies = [ [[package]] name = "pallet-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5558,7 +5558,7 @@ dependencies = [ [[package]] name = "pallet-child-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5577,7 +5577,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5594,7 +5594,7 @@ dependencies = [ [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "assert_matches", "frame-benchmarking", @@ -5611,7 +5611,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5629,7 +5629,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5652,7 +5652,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-support-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5665,7 +5665,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5684,7 +5684,7 @@ dependencies = [ [[package]] name = "pallet-fast-unstake" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "docify", "frame-benchmarking", @@ -5703,7 +5703,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5726,7 +5726,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "enumflags2", "frame-benchmarking", @@ -5742,7 +5742,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5762,7 +5762,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5779,7 +5779,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5796,7 +5796,7 @@ dependencies = [ [[package]] name = "pallet-message-queue" version = "7.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5815,7 +5815,7 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5832,7 +5832,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5848,7 +5848,7 @@ dependencies = [ [[package]] name = "pallet-nis" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5864,7 +5864,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-support", "frame-system", @@ -5883,7 +5883,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-benchmarking" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5903,7 +5903,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-runtime-api" version = "1.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "pallet-nomination-pools", "parity-scale-codec", @@ -5914,7 +5914,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-support", "frame-system", @@ -5931,7 +5931,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5955,7 +5955,7 @@ dependencies = [ [[package]] name = "pallet-preimage" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5972,7 +5972,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -5987,7 +5987,7 @@ dependencies = [ [[package]] name = "pallet-ranked-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6005,7 +6005,7 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6020,7 +6020,7 @@ dependencies = [ [[package]] name = "pallet-referenda" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "assert_matches", "frame-benchmarking", @@ -6039,7 +6039,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6056,7 +6056,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-support", "frame-system", @@ -6077,7 +6077,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6093,7 +6093,7 @@ dependencies = [ [[package]] name = "pallet-society" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6111,7 +6111,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6134,7 +6134,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -6145,7 +6145,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-fn" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "log", "sp-arithmetic", @@ -6154,7 +6154,7 @@ dependencies = [ [[package]] name = "pallet-staking-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "sp-api", @@ -6163,7 +6163,7 @@ dependencies = [ [[package]] name = "pallet-state-trie-migration" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6180,7 +6180,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6195,7 +6195,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6213,7 +6213,7 @@ dependencies = [ [[package]] name = "pallet-tips" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6232,7 +6232,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-support", "frame-system", @@ -6248,7 +6248,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -6264,7 +6264,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -6276,7 +6276,7 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6293,7 +6293,7 @@ dependencies = [ [[package]] name = "pallet-uniques" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6308,7 +6308,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6324,7 +6324,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -6339,7 +6339,7 @@ dependencies = [ [[package]] name = "pallet-whitelist" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-benchmarking", "frame-support", @@ -9360,7 +9360,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "log", "sp-core", @@ -9371,7 +9371,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "futures", @@ -9399,7 +9399,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "futures", "futures-timer", @@ -9422,7 +9422,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -9437,7 +9437,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "memmap2", "sc-chain-spec-derive", @@ -9456,7 +9456,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -9467,7 +9467,7 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "array-bytes", "chrono", @@ -9506,7 +9506,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "fnv", "futures", @@ -9532,7 +9532,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "hash-db", "kvdb", @@ -9558,7 +9558,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "futures", @@ -9583,7 +9583,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "fork-tree", @@ -9619,7 +9619,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "futures", "jsonrpsee", @@ -9641,7 +9641,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "array-bytes", "async-channel", @@ -9675,7 +9675,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "futures", "jsonrpsee", @@ -9694,7 +9694,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "fork-tree", "parity-scale-codec", @@ -9707,7 +9707,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "ahash 0.8.2", "array-bytes", @@ -9748,7 +9748,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "finality-grandpa", "futures", @@ -9768,7 +9768,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "futures", @@ -9791,7 +9791,7 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -9813,7 +9813,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "sc-allocator", "sp-maybe-compressed-blob", @@ -9825,7 +9825,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "anyhow", "cfg-if", @@ -9842,7 +9842,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "ansi_term", "futures", @@ -9858,7 +9858,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "array-bytes", "parking_lot 0.12.1", @@ -9872,7 +9872,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "array-bytes", "async-channel", @@ -9915,7 +9915,7 @@ dependencies = [ [[package]] name = "sc-network-bitswap" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-channel", "cid", @@ -9935,7 +9935,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "bitflags", @@ -9952,7 +9952,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "ahash 0.8.2", "futures", @@ -9971,7 +9971,7 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "array-bytes", "async-channel", @@ -9992,7 +9992,7 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "array-bytes", "async-channel", @@ -10026,7 +10026,7 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "array-bytes", "futures", @@ -10044,7 +10044,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "array-bytes", "bytes", @@ -10078,7 +10078,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -10087,7 +10087,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "futures", "jsonrpsee", @@ -10118,7 +10118,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -10137,7 +10137,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "http", "jsonrpsee", @@ -10152,7 +10152,7 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "array-bytes", "futures", @@ -10179,7 +10179,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "directories", @@ -10243,7 +10243,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "log", "parity-scale-codec", @@ -10254,7 +10254,7 @@ dependencies = [ [[package]] name = "sc-storage-monitor" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "clap 4.2.5", "fs4", @@ -10268,7 +10268,7 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -10287,7 +10287,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "6.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "futures", "libc", @@ -10306,7 +10306,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "chrono", "futures", @@ -10325,7 +10325,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "ansi_term", "atty", @@ -10354,7 +10354,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -10365,7 +10365,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "futures", @@ -10391,7 +10391,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "futures", @@ -10407,7 +10407,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-channel", "futures", @@ -10955,7 +10955,7 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "hash-db", "log", @@ -10976,7 +10976,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "Inflector", "blake2", @@ -10990,7 +10990,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "scale-info", @@ -11003,7 +11003,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "16.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "integer-sqrt", "num-traits", @@ -11017,7 +11017,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "scale-info", @@ -11030,7 +11030,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "sp-api", "sp-inherents", @@ -11041,7 +11041,7 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "futures", "log", @@ -11059,7 +11059,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "futures", @@ -11074,7 +11074,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "parity-scale-codec", @@ -11091,7 +11091,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "parity-scale-codec", @@ -11110,7 +11110,7 @@ dependencies = [ [[package]] name = "sp-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "lazy_static", "parity-scale-codec", @@ -11129,7 +11129,7 @@ dependencies = [ [[package]] name = "sp-consensus-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "finality-grandpa", "log", @@ -11147,7 +11147,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "scale-info", @@ -11159,7 +11159,7 @@ dependencies = [ [[package]] name = "sp-core" version = "21.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "array-bytes", "arrayvec 0.7.4", @@ -11206,7 +11206,7 @@ dependencies = [ [[package]] name = "sp-core-hashing" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "blake2b_simd", "byteorder", @@ -11219,7 +11219,7 @@ dependencies = [ [[package]] name = "sp-core-hashing-proc-macro" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "quote", "sp-core-hashing", @@ -11229,7 +11229,7 @@ dependencies = [ [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "kvdb", "parking_lot 0.12.1", @@ -11238,7 +11238,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "proc-macro2", "quote", @@ -11248,7 +11248,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.19.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "environmental", "parity-scale-codec", @@ -11259,7 +11259,7 @@ dependencies = [ [[package]] name = "sp-genesis-builder" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "serde_json", "sp-api", @@ -11270,7 +11270,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -11284,7 +11284,7 @@ dependencies = [ [[package]] name = "sp-io" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "bytes", "ed25519", @@ -11309,7 +11309,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "lazy_static", "sp-core", @@ -11320,7 +11320,7 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.27.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -11332,7 +11332,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "thiserror", "zstd 0.12.3+zstd.1.5.2", @@ -11341,7 +11341,7 @@ dependencies = [ [[package]] name = "sp-metadata-ir" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-metadata", "parity-scale-codec", @@ -11352,7 +11352,7 @@ dependencies = [ [[package]] name = "sp-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "ckb-merkle-mountain-range", "log", @@ -11370,7 +11370,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "scale-info", @@ -11384,7 +11384,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "sp-api", "sp-core", @@ -11394,7 +11394,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "backtrace", "lazy_static", @@ -11404,7 +11404,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "rustc-hash", "serde", @@ -11414,7 +11414,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "either", "hash256-std-hasher", @@ -11436,7 +11436,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "17.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -11454,7 +11454,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "Inflector", "proc-macro-crate", @@ -11466,7 +11466,7 @@ dependencies = [ [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "scale-info", @@ -11481,7 +11481,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -11495,7 +11495,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.28.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "hash-db", "log", @@ -11516,7 +11516,7 @@ dependencies = [ [[package]] name = "sp-statement-store" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "aes-gcm 0.10.2", "curve25519-dalek 3.2.0", @@ -11540,12 +11540,12 @@ dependencies = [ [[package]] name = "sp-std" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" [[package]] name = "sp-storage" version = "13.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "impl-serde", "parity-scale-codec", @@ -11558,7 +11558,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "parity-scale-codec", @@ -11571,7 +11571,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "10.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "sp-std", @@ -11583,7 +11583,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "sp-api", "sp-runtime", @@ -11592,7 +11592,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "parity-scale-codec", @@ -11607,7 +11607,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "ahash 0.8.2", "hash-db", @@ -11630,7 +11630,7 @@ dependencies = [ [[package]] name = "sp-version" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "impl-serde", "parity-scale-codec", @@ -11647,7 +11647,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "proc-macro2", @@ -11658,7 +11658,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "14.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -11671,7 +11671,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "20.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "parity-scale-codec", "scale-info", @@ -11896,12 +11896,12 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "frame-system-rpc-runtime-api", "futures", @@ -11920,7 +11920,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "hyper", "log", @@ -11932,7 +11932,7 @@ dependencies = [ [[package]] name = "substrate-rpc-client" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "jsonrpsee", @@ -11945,7 +11945,7 @@ dependencies = [ [[package]] name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -11962,7 +11962,7 @@ dependencies = [ [[package]] name = "substrate-test-client" version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "array-bytes", "async-trait", @@ -11988,7 +11988,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "futures", "substrate-test-utils-derive", @@ -11998,7 +11998,7 @@ dependencies = [ [[package]] name = "substrate-test-utils-derive" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -12009,7 +12009,7 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "ansi_term", "build-helper", @@ -12879,7 +12879,7 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "try-runtime-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#19eb56a3fc51140b269e339ecb7e9a4a378c26ff" +source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" dependencies = [ "async-trait", "clap 4.2.5", diff --git a/runtime/kusama/src/lib.rs b/runtime/kusama/src/lib.rs index 335ef79fab58..0248b02e12f6 100644 --- a/runtime/kusama/src/lib.rs +++ b/runtime/kusama/src/lib.rs @@ -53,7 +53,8 @@ use runtime_parachains::{ use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; use beefy_primitives::ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; use frame_election_provider_support::{ - generate_solution_type, onchain, NposSolution, SequentialPhragmen, + bounds::ElectionBoundsBuilder, generate_solution_type, onchain, NposSolution, + SequentialPhragmen, }; use frame_support::{ construct_runtime, parameter_types, @@ -399,11 +400,12 @@ parameter_types! { // 1 hour session, 15 minutes unsigned phase, 8 offchain executions. pub OffchainRepeat: BlockNumber = UnsignedPhase::get() / 8; - /// We take the top 12500 nominators as electing voters.. pub const MaxElectingVoters: u32 = 12_500; - /// ... and all of the validators as electable targets. Whilst this is the case, we cannot and - /// shall not increase the size of the validator intentions. - pub const MaxElectableTargets: u16 = u16::MAX; + /// We take the top 12500 nominators as electing voters and all of the validators as electable + /// targets. Whilst this is the case, we cannot and shall not increase the size of the + /// validator intentions. + pub ElectionBounds: frame_election_provider_support::bounds::ElectionBounds = + ElectionBoundsBuilder::default().voters_count(MaxElectingVoters::get().into()).build(); pub NposSolutionPriority: TransactionPriority = Perbill::from_percent(90) * TransactionPriority::max_value(); /// Setup election pallet to support maximum winners upto 2000. This will mean Staking Pallet @@ -428,8 +430,7 @@ impl onchain::Config for OnChainSeqPhragmen { type DataProvider = Staking; type WeightInfo = weights::frame_election_provider_support::WeightInfo; type MaxWinners = MaxActiveValidators; - type VotersBound = MaxElectingVoters; - type TargetsBound = MaxElectableTargets; + type Bounds = ElectionBounds; } impl pallet_election_provider_multi_phase::MinerConfig for Runtime { @@ -495,9 +496,8 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type BenchmarkingConfig = runtime_common::elections::BenchmarkConfig; type ForceOrigin = EitherOf, StakingAdmin>; type WeightInfo = weights::pallet_election_provider_multi_phase::WeightInfo; - type MaxElectingVoters = MaxElectingVoters; - type MaxElectableTargets = MaxElectableTargets; type MaxWinners = MaxActiveValidators; + type ElectionBounds = ElectionBounds; } parameter_types! { @@ -564,7 +564,6 @@ parameter_types! { } impl pallet_staking::Config for Runtime { - type MaxNominations = MaxNominations; type Currency = Balances; type CurrencyBalance = Balance; type UnixTime = Timestamp; @@ -586,6 +585,7 @@ impl pallet_staking::Config for Runtime { type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type VoterList = VoterList; type TargetList = UseValidatorsMap; + type NominationsQuota = pallet_staking::FixedNominationsQuota<{ MaxNominations::get() }>; type MaxUnlockingChunks = frame_support::traits::ConstU32<32>; type HistoryDepth = frame_support::traits::ConstU32<84>; type BenchmarkingConfig = runtime_common::StakingBenchmarkingConfig; diff --git a/runtime/polkadot/src/lib.rs b/runtime/polkadot/src/lib.rs index ac031671a4e6..fbf896cdedc5 100644 --- a/runtime/polkadot/src/lib.rs +++ b/runtime/polkadot/src/lib.rs @@ -40,7 +40,9 @@ use runtime_parachains::{ use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; use beefy_primitives::ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; -use frame_election_provider_support::{generate_solution_type, onchain, SequentialPhragmen}; +use frame_election_provider_support::{ + bounds::ElectionBoundsBuilder, generate_solution_type, onchain, SequentialPhragmen, +}; use frame_support::{ construct_runtime, parameter_types, traits::{ @@ -393,11 +395,12 @@ parameter_types! { // 4 hour session, 1 hour unsigned phase, 32 offchain executions. pub OffchainRepeat: BlockNumber = UnsignedPhase::get() / 32; - /// We take the top 22500 nominators as electing voters.. pub const MaxElectingVoters: u32 = 22_500; - /// ... and all of the validators as electable targets. Whilst this is the case, we cannot and - /// shall not increase the size of the validator intentions. - pub const MaxElectableTargets: u16 = u16::MAX; + /// We take the top 22500 nominators as electing voters and all of the validators as electable + /// targets. Whilst this is the case, we cannot and shall not increase the size of the + /// validator intentions. + pub ElectionBounds: frame_election_provider_support::bounds::ElectionBounds = + ElectionBoundsBuilder::default().voters_count(MaxElectingVoters::get().into()).build(); /// Setup election pallet to support maximum winners upto 1200. This will mean Staking Pallet /// cannot have active validators higher than this count. pub const MaxActiveValidators: u32 = 1200; @@ -420,8 +423,7 @@ impl onchain::Config for OnChainSeqPhragmen { type DataProvider = Staking; type WeightInfo = weights::frame_election_provider_support::WeightInfo; type MaxWinners = MaxActiveValidators; - type VotersBound = MaxElectingVoters; - type TargetsBound = MaxElectableTargets; + type Bounds = ElectionBounds; } impl pallet_election_provider_multi_phase::MinerConfig for Runtime { @@ -487,9 +489,8 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type BenchmarkingConfig = runtime_common::elections::BenchmarkConfig; type ForceOrigin = EitherOf, StakingAdmin>; type WeightInfo = weights::pallet_election_provider_multi_phase::WeightInfo; - type MaxElectingVoters = MaxElectingVoters; - type MaxElectableTargets = MaxElectableTargets; type MaxWinners = MaxActiveValidators; + type ElectionBounds = ElectionBounds; } parameter_types! { @@ -572,7 +573,6 @@ impl pallet_staking::EraPayout for EraPayout { } impl pallet_staking::Config for Runtime { - type MaxNominations = MaxNominations; type Currency = Balances; type CurrencyBalance = Balance; type UnixTime = Timestamp; @@ -594,6 +594,7 @@ impl pallet_staking::Config for Runtime { type GenesisElectionProvider = onchain::OnChainExecution; type VoterList = VoterList; type TargetList = UseValidatorsMap; + type NominationsQuota = pallet_staking::FixedNominationsQuota<{ MaxNominations::get() }>; type MaxUnlockingChunks = frame_support::traits::ConstU32<32>; type HistoryDepth = frame_support::traits::ConstU32<84>; type BenchmarkingConfig = runtime_common::StakingBenchmarkingConfig; diff --git a/runtime/test-runtime/src/lib.rs b/runtime/test-runtime/src/lib.rs index 9e2f2a66455b..c9f3aa6cb203 100644 --- a/runtime/test-runtime/src/lib.rs +++ b/runtime/test-runtime/src/lib.rs @@ -36,7 +36,10 @@ use polkadot_runtime_parachains::{ use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; use beefy_primitives::ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; -use frame_election_provider_support::{onchain, SequentialPhragmen}; +use frame_election_provider_support::{ + bounds::{ElectionBounds, ElectionBoundsBuilder}, + onchain, SequentialPhragmen, +}; use frame_support::{ construct_runtime, parameter_types, traits::{Everything, KeyOwnerProofSystem, WithdrawReasons}, @@ -315,8 +318,8 @@ parameter_types! { pub storage OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub const MaxAuthorities: u32 = 100_000; pub const OnChainMaxWinners: u32 = u32::MAX; - pub const MaxElectingVoters: u32 = u32::MAX; - pub const MaxElectableTargets: u16 = u16::MAX; + // Unbounded number of election targets and voters. + pub ElectionBoundsOnChain: ElectionBounds = ElectionBoundsBuilder::default().build(); } pub struct OnChainSeqPhragmen; @@ -325,13 +328,14 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); + type Bounds = ElectionBoundsOnChain; type MaxWinners = OnChainMaxWinners; - type VotersBound = MaxElectingVoters; - type TargetsBound = MaxElectableTargets; } +/// Upper limit on the number of NPOS nominations. +const MAX_QUOTA_NOMINATIONS: u32 = 16; + impl pallet_staking::Config for Runtime { - type MaxNominations = frame_support::pallet_prelude::ConstU32<16>; type Currency = Balances; type CurrencyBalance = Balance; type UnixTime = Timestamp; @@ -355,6 +359,7 @@ impl pallet_staking::Config for Runtime { // to bags-list is a no-op, but the storage version will be updated. type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; + type NominationsQuota = pallet_staking::FixedNominationsQuota; type MaxUnlockingChunks = frame_support::traits::ConstU32<32>; type HistoryDepth = frame_support::traits::ConstU32<84>; type BenchmarkingConfig = runtime_common::StakingBenchmarkingConfig; diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index dd4bcff32e39..4b4659442cff 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -22,7 +22,7 @@ use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; use beefy_primitives::ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; -use frame_election_provider_support::{onchain, SequentialPhragmen}; +use frame_election_provider_support::{bounds::ElectionBoundsBuilder, onchain, SequentialPhragmen}; use frame_support::{ construct_runtime, parameter_types, traits::{ @@ -371,11 +371,12 @@ parameter_types! { // 1 hour session, 15 minutes unsigned phase, 4 offchain executions. pub OffchainRepeat: BlockNumber = UnsignedPhase::get() / 4; - /// We take the top 22500 nominators as electing voters.. pub const MaxElectingVoters: u32 = 22_500; - /// ... and all of the validators as electable targets. Whilst this is the case, we cannot and - /// shall not increase the size of the validator intentions. - pub const MaxElectableTargets: u16 = u16::MAX; + /// We take the top 22500 nominators as electing voters and all of the validators as electable + /// targets. Whilst this is the case, we cannot and shall not increase the size of the + /// validator intentions. + pub ElectionBounds: frame_election_provider_support::bounds::ElectionBounds = + ElectionBoundsBuilder::default().voters_count(MaxElectingVoters::get().into()).build(); // Maximum winners that can be chosen as active validators pub const MaxActiveValidators: u32 = 1000; @@ -398,8 +399,7 @@ impl onchain::Config for OnChainSeqPhragmen { type DataProvider = Staking; type WeightInfo = weights::frame_election_provider_support::WeightInfo; type MaxWinners = MaxActiveValidators; - type VotersBound = MaxElectingVoters; - type TargetsBound = MaxElectableTargets; + type Bounds = ElectionBounds; } impl pallet_election_provider_multi_phase::MinerConfig for Runtime { @@ -465,9 +465,8 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type BenchmarkingConfig = runtime_common::elections::BenchmarkConfig; type ForceOrigin = EnsureRoot; type WeightInfo = weights::pallet_election_provider_multi_phase::WeightInfo; - type MaxElectingVoters = MaxElectingVoters; - type MaxElectableTargets = MaxElectableTargets; type MaxWinners = MaxActiveValidators; + type ElectionBounds = ElectionBounds; } parameter_types! { @@ -508,7 +507,6 @@ parameter_types! { } impl pallet_staking::Config for Runtime { - type MaxNominations = MaxNominations; type Currency = Balances; type CurrencyBalance = Balance; type UnixTime = Timestamp; @@ -530,6 +528,7 @@ impl pallet_staking::Config for Runtime { type GenesisElectionProvider = onchain::OnChainExecution; type VoterList = VoterList; type TargetList = UseValidatorsMap; + type NominationsQuota = pallet_staking::FixedNominationsQuota<{ MaxNominations::get() }>; type MaxUnlockingChunks = frame_support::traits::ConstU32<32>; type HistoryDepth = frame_support::traits::ConstU32<84>; type BenchmarkingConfig = runtime_common::StakingBenchmarkingConfig; From c2efce0d60d656cb13f5ade52bf3d866e17085fa Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Thu, 10 Aug 2023 13:17:24 +0200 Subject: [PATCH 04/27] Add counter for unapproved candidates (#7491) * Add counter for unapproved candidates * Update metrics * Split metrics * Remove depth metric * Print only the oldest unapproved candidates * Update logging condition * Fix logging condition * Update logging * Update node/core/approval-voting/src/lib.rs Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> --------- Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> --- node/core/approval-voting/src/lib.rs | 29 ++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index a6a74da50480..05b92f459529 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -160,6 +160,7 @@ struct MetricsInner { time_db_transaction: prometheus::Histogram, time_recover_and_approve: prometheus::Histogram, candidate_signatures_requests_total: prometheus::Counter, + unapproved_candidates_in_unfinalized_chain: prometheus::Gauge, } /// Approval Voting metrics. @@ -246,6 +247,12 @@ impl Metrics { fn time_recover_and_approve(&self) -> Option { self.0.as_ref().map(|metrics| metrics.time_recover_and_approve.start_timer()) } + + fn on_unapproved_candidates_in_unfinalized_chain(&self, count: usize) { + if let Some(metrics) = &self.0 { + metrics.unapproved_candidates_in_unfinalized_chain.set(count as u64); + } + } } impl metrics::Metrics for Metrics { @@ -336,6 +343,13 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + unapproved_candidates_in_unfinalized_chain: prometheus::register( + prometheus::Gauge::new( + "polkadot_parachain_approval_unapproved_candidates_in_unfinalized_chain", + "Number of unapproved candidates in unfinalized chain", + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) @@ -1298,6 +1312,7 @@ async fn handle_from_overseer( lower_bound, wakeups, &mut approved_ancestor_span, + &metrics, ) .await { @@ -1423,9 +1438,11 @@ async fn handle_approved_ancestor( lower_bound: BlockNumber, wakeups: &Wakeups, span: &mut jaeger::Span, + metrics: &Metrics, ) -> SubsystemResult> { const MAX_TRACING_WINDOW: usize = 200; const ABNORMAL_DEPTH_THRESHOLD: usize = 5; + const LOGGING_DEPTH_THRESHOLD: usize = 10; let mut span = span .child("handle-approved-ancestor") .with_stage(jaeger::Stage::ApprovalChecking); @@ -1471,6 +1488,7 @@ async fn handle_approved_ancestor( } else { Vec::new() }; + let ancestry_len = ancestry.len(); let mut block_descriptions = Vec::new(); @@ -1534,6 +1552,17 @@ async fn handle_approved_ancestor( unapproved.len(), entry.candidates().len(), ); + if ancestry_len >= LOGGING_DEPTH_THRESHOLD && i > ancestry_len - LOGGING_DEPTH_THRESHOLD + { + gum::trace!( + target: LOG_TARGET, + ?block_hash, + "Unapproved candidates at depth {}: {:?}", + bits.len(), + unapproved + ) + } + metrics.on_unapproved_candidates_in_unfinalized_chain(unapproved.len()); entry_span.add_uint_tag("unapproved-candidates", unapproved.len() as u64); for candidate_hash in unapproved { match db.load_candidate_entry(&candidate_hash)? { From b420bad817e68e9b60f0956ba66f6520bfe92cae Mon Sep 17 00:00:00 2001 From: Chevdor Date: Fri, 11 Aug 2023 15:28:39 +0200 Subject: [PATCH 05/27] Publish RC container images (#7556) * WIP * Add missing checkout * Add debuggin * Fix VAR name * Bug fix * Rework jobs * Revert "Rework jobs" This reverts commit 2bfa79fd3ae633c17403b838f9a5025f0f7fc3f3. * Add cache * Add temp default for testing * Add missing checkout * Fix patch * Comment out the GPG check for now * Rename polkadot_injected_release into a more appropriate polkadot_injected_debian * Refactoring / renaming * Introduce a generic image for binary injection * Flag files to be deleted and changes to be done * WIP * Fix multi binaries images * Add test build scripts * Remove old file, add polkadot build-injected script * Fix doc * Fix tagging * Add build of the injected container * Fix for docker * Remove the need for TTY * Handling container publishing * Fix owner and registry * Fix vars * Fix repo * Fix var naming * Fix case when there is no tag * Fix case with no tag * Handle error * Fix spacings * Fix tags * Remove unnecessary grep that may fail * Add final check * Clean up and introduce GPG check * Add doc * Add doc * Update doc/docker.md Co-authored-by: Mira Ressel * type Co-authored-by: Mira Ressel * Fix used VAR * Improve doc * ci: Update .build-push-image jobs to use the new build-injected.sh * ci: fix path to build-injected.sh script * Rename the release artifacts folder to prevent confusion due to a similar folder in the gitlab CI * ci: check out polkadot repo in .build-push-image This seems far cleaner than copying the entire scripts/ folder into our job artifacts. * feat(build-injected.sh): make PROJECT_ROOT configurable This lets us avoid a dependency on git in our CI image. * ci: build injected images with buildah * ci: pass full image names to zombienet * Add missing ignore --------- Co-authored-by: Mira Ressel --- .github/workflows/check-licenses.yml | 2 +- .../workflows/release-40_publish-rc-image.yml | 132 ++++++++++++++++++ .../release-50_publish-docker-release.yml | 2 +- .../release-51_publish-docker-manual.yml | 2 +- .gitignore | 4 + .gitlab-ci.yml | 36 +++-- doc/docker.md | 86 +++++++----- scripts/ci/common/lib.sh | 70 ++++++++++ .../adder-collator/build-injected.sh | 13 ++ .../dockerfiles/adder-collator/test-build.sh | 23 +++ .../ci/dockerfiles/binary_injected.Dockerfile | 48 +++++++ scripts/ci/dockerfiles/build-injected.sh | 92 ++++++++++++ .../dockerfiles/collator_injected.Dockerfile | 49 ------- scripts/ci/dockerfiles/entrypoint.sh | 18 +++ .../ci/dockerfiles/malus/build-injected.sh | 14 ++ scripts/ci/dockerfiles/malus/test-build.sh | 19 +++ .../ci/dockerfiles/malus_injected.Dockerfile | 50 ------- scripts/ci/dockerfiles/polkadot/README.md | 2 + .../ci/dockerfiles/polkadot/build-injected.sh | 13 ++ scripts/ci/dockerfiles/polkadot/build.sh | 27 ---- .../dockerfiles/polkadot/docker-compose.yml | 13 +- .../polkadot_Dockerfile.README.md | 0 .../polkadot/polkadot_builder.Dockerfile | 2 +- .../polkadot_injected_debian.Dockerfile} | 2 +- scripts/ci/dockerfiles/polkadot/test-build.sh | 18 +++ .../polkadot_injected_debug.Dockerfile | 48 ------- .../ci/dockerfiles/staking-miner/README.md | 37 +++++ .../staking-miner/build-injected.sh | 13 ++ scripts/ci/dockerfiles/staking-miner/build.sh | 13 ++ .../staking-miner_builder.Dockerfile | 11 +- .../staking-miner_injected.Dockerfile | 43 ------ .../dockerfiles/staking-miner/test-build.sh | 18 +++ scripts/ci/gitlab/pipeline/build.yml | 4 - scripts/ci/gitlab/pipeline/publish.yml | 54 +++---- utils/staking-miner/README.md | 6 +- 35 files changed, 661 insertions(+), 323 deletions(-) create mode 100644 .github/workflows/release-40_publish-rc-image.yml create mode 100755 scripts/ci/dockerfiles/adder-collator/build-injected.sh create mode 100755 scripts/ci/dockerfiles/adder-collator/test-build.sh create mode 100644 scripts/ci/dockerfiles/binary_injected.Dockerfile create mode 100755 scripts/ci/dockerfiles/build-injected.sh delete mode 100644 scripts/ci/dockerfiles/collator_injected.Dockerfile create mode 100755 scripts/ci/dockerfiles/entrypoint.sh create mode 100755 scripts/ci/dockerfiles/malus/build-injected.sh create mode 100755 scripts/ci/dockerfiles/malus/test-build.sh delete mode 100644 scripts/ci/dockerfiles/malus_injected.Dockerfile create mode 100755 scripts/ci/dockerfiles/polkadot/build-injected.sh delete mode 100755 scripts/ci/dockerfiles/polkadot/build.sh rename scripts/ci/dockerfiles/{ => polkadot}/polkadot_Dockerfile.README.md (100%) rename scripts/ci/dockerfiles/{polkadot_injected_release.Dockerfile => polkadot/polkadot_injected_debian.Dockerfile} (95%) create mode 100755 scripts/ci/dockerfiles/polkadot/test-build.sh delete mode 100644 scripts/ci/dockerfiles/polkadot_injected_debug.Dockerfile create mode 100644 scripts/ci/dockerfiles/staking-miner/README.md create mode 100755 scripts/ci/dockerfiles/staking-miner/build-injected.sh create mode 100755 scripts/ci/dockerfiles/staking-miner/build.sh delete mode 100644 scripts/ci/dockerfiles/staking-miner/staking-miner_injected.Dockerfile create mode 100755 scripts/ci/dockerfiles/staking-miner/test-build.sh diff --git a/.github/workflows/check-licenses.yml b/.github/workflows/check-licenses.yml index a4c8d5d97424..522037b6827c 100644 --- a/.github/workflows/check-licenses.yml +++ b/.github/workflows/check-licenses.yml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Checkout sources - uses: actions/checkout@v3.3.0 + uses: actions/checkout@v3 - uses: actions/setup-node@v3.7.0 with: node-version: '18.x' diff --git a/.github/workflows/release-40_publish-rc-image.yml b/.github/workflows/release-40_publish-rc-image.yml new file mode 100644 index 000000000000..a821eaa033fd --- /dev/null +++ b/.github/workflows/release-40_publish-rc-image.yml @@ -0,0 +1,132 @@ +name: Release - Publish RC Container image +# see https://github.com/paritytech/release-engineering/issues/97#issuecomment-1651372277 + +on: + workflow_dispatch: + inputs: + release_id: + description: | + Release ID. + You can find it using the command: + curl -s \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" https://api.github.com/repos/$OWNER/$REPO/releases | \ + jq '.[] | { name: .name, id: .id }' + required: true + type: string + registry: + description: "Container registry" + required: true + type: string + default: docker.io + owner: + description: Owner of the container image repo + required: true + type: string + default: parity + +env: + RELEASE_ID: ${{ inputs.release_id }} + ENGINE: docker + REGISTRY: ${{ inputs.registry }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + DOCKER_OWNER: ${{ inputs.owner || github.repository_owner }} + REPO: ${{ github.repository }} + ARTIFACT_FOLDER: release-artifacts + +jobs: + fetch-artifacts: + runs-on: ubuntu-latest + + steps: + - name: Checkout sources + uses: actions/checkout@v3 + + - name: Fetch all artifacts + run: | + . ./scripts/ci/common/lib.sh + fetch_release_artifacts + + - name: Cache the artifacts + uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 + with: + key: artifacts-${{ github.sha }} + path: | + ${ARTIFACT_FOLDER}/**/* + + build-container: + runs-on: ubuntu-latest + needs: fetch-artifacts + + strategy: + matrix: + binary: ["polkadot", "staking-miner"] + + steps: + - name: Checkout sources + uses: actions/checkout@v3 + + - name: Get artifacts from cache + uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 + with: + key: artifacts-${{ github.sha }} + path: | + ${ARTIFACT_FOLDER}/**/* + + - name: Check sha256 ${{ matrix.binary }} + working-directory: ${ARTIFACT_FOLDER} + run: | + . ../scripts/ci/common/lib.sh + + echo "Checking binary ${{ matrix.binary }}" + check_sha256 ${{ matrix.binary }} && echo "OK" || echo "ERR" + + - name: Check GPG ${{ matrix.binary }} + working-directory: ${ARTIFACT_FOLDER} + run: | + . ../scripts/ci/common/lib.sh + import_gpg_keys + check_gpg ${{ matrix.binary }} + + - name: Fetch commit and tag + id: fetch_refs + run: | + release=release-${{ inputs.release_id }} && \ + echo "release=${release}" >> $GITHUB_OUTPUT + + commit=$(git rev-parse --short HEAD) && \ + echo "commit=${commit}" >> $GITHUB_OUTPUT + + tag=$(git name-rev --tags --name-only $(git rev-parse HEAD)) && \ + [ "${tag}" != "undefined" ] && echo "tag=${tag}" >> $GITHUB_OUTPUT || \ + echo "No tag, doing without" + + - name: Build Injected Container image for ${{ matrix.binary }} + env: + BIN_FOLDER: ${ARTIFACT_FOLDER} + BINARY: ${{ matrix.binary }} + TAGS: ${{join(steps.fetch_refs.outputs.*, ',')}} + run: | + echo "Building container for ${{ matrix.binary }}" + ./scripts/ci/dockerfiles/build-injected.sh + + - name: Login to Dockerhub + uses: docker/login-action@v2 + with: + username: ${{ inputs.owner }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Push Container image for ${{ matrix.binary }} + id: docker_push + env: + BINARY: ${{ matrix.binary }} + run: | + $ENGINE images | grep ${BINARY} + $ENGINE push --all-tags ${REGISTRY}/${DOCKER_OWNER}/${BINARY} + + - name: Check version for the published image for ${{ matrix.binary }} + env: + BINARY: ${{ matrix.binary }} + RELEASE_TAG: ${{ steps.fetch_refs.outputs.release }} + run: | + echo "Checking tag ${RELEASE_TAG} for image ${REGISTRY}/${DOCKER_OWNER}/${BINARY}" + $ENGINE run -i ${REGISTRY}/${DOCKER_OWNER}/${BINARY}:${RELEASE_TAG} --version diff --git a/.github/workflows/release-50_publish-docker-release.yml b/.github/workflows/release-50_publish-docker-release.yml index a6bf19162a46..81e5caa718f3 100644 --- a/.github/workflows/release-50_publish-docker-release.yml +++ b/.github/workflows/release-50_publish-docker-release.yml @@ -30,7 +30,7 @@ jobs: uses: docker/build-push-action@v4 with: push: true - file: scripts/ci/dockerfiles/polkadot_injected_release.Dockerfile + file: scripts/ci/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile tags: | parity/polkadot:latest parity/polkadot:${{ github.event.release.tag_name }} diff --git a/.github/workflows/release-51_publish-docker-manual.yml b/.github/workflows/release-51_publish-docker-manual.yml index 0c973d33b71c..919769f8700d 100644 --- a/.github/workflows/release-51_publish-docker-manual.yml +++ b/.github/workflows/release-51_publish-docker-manual.yml @@ -37,7 +37,7 @@ jobs: uses: docker/build-push-action@v4 with: push: true - file: scripts/ci/dockerfiles/polkadot_injected_release.Dockerfile + file: scripts/ci/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile tags: | parity/polkadot:latest parity/polkadot:${{ github.event.inputs.version }} diff --git a/.gitignore b/.gitignore index 0c6913dac340..61ef9e91a55e 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,7 @@ polkadot.* !polkadot.service .DS_Store .env + +artifacts +release-artifacts +release.json diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5056012e588e..5a84bbfeba85 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -159,31 +159,39 @@ default: - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 .build-push-image: + variables: + CI_IMAGE: "${BUILDAH_IMAGE}" + + REGISTRY: "docker.io" + DOCKER_OWNER: "paritypr" + DOCKER_USER: "${PARITYPR_USER}" + DOCKER_PASS: "${PARITYPR_PASS}" + IMAGE: "${REGISTRY}/${DOCKER_OWNER}/${IMAGE_NAME}" + + ENGINE: "${BUILDAH_COMMAND}" + BUILDAH_FORMAT: "docker" + SKIP_IMAGE_VALIDATION: 1 + + PROJECT_ROOT: "." + BIN_FOLDER: "./artifacts" + VCS_REF: "${CI_COMMIT_SHA}" + before_script: - !reference [.common-before-script, before_script] - test -s ./artifacts/VERSION || exit 1 - test -s ./artifacts/EXTRATAG || exit 1 - - VERSION="$(cat ./artifacts/VERSION)" + - export VERSION="$(cat ./artifacts/VERSION)" - EXTRATAG="$(cat ./artifacts/EXTRATAG)" - echo "Polkadot version = ${VERSION} (EXTRATAG = ${EXTRATAG})" script: - test "$DOCKER_USER" -a "$DOCKER_PASS" || ( echo "no docker credentials provided"; exit 1 ) - - cd ./artifacts - - $BUILDAH_COMMAND build - --format=docker - --build-arg VCS_REF="${CI_COMMIT_SHA}" - --build-arg BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" - --build-arg IMAGE_NAME="${IMAGE_NAME}" - --tag "$IMAGE_NAME:$VERSION" - --tag "$IMAGE_NAME:$EXTRATAG" - --file ${DOCKERFILE} . - # The job will success only on the protected branch + - TAGS="${VERSION},${EXTRATAG}" scripts/ci/dockerfiles/build-injected.sh - echo "$DOCKER_PASS" | - buildah login --username "$DOCKER_USER" --password-stdin docker.io + buildah login --username "$DOCKER_USER" --password-stdin "${REGISTRY}" - $BUILDAH_COMMAND info - - $BUILDAH_COMMAND push --format=v2s2 "$IMAGE_NAME:$VERSION" - - $BUILDAH_COMMAND push --format=v2s2 "$IMAGE_NAME:$EXTRATAG" + - $BUILDAH_COMMAND push --format=v2s2 "$IMAGE:$VERSION" + - $BUILDAH_COMMAND push --format=v2s2 "$IMAGE:$EXTRATAG" after_script: - buildah logout --all diff --git a/doc/docker.md b/doc/docker.md index e8b7fa74732e..f20c2d001edd 100644 --- a/doc/docker.md +++ b/doc/docker.md @@ -1,43 +1,58 @@ -# Using Docker +# Using Containers + +The following commands should work no matter if you use Docker or Podman. In general, Podman is recommended. All commands are "engine neutral" so you can use the container engine of your choice while still being able to copy/paste the commands below. + +Let's start defining Podman as our engine: +``` +ENGINE=podman +``` + +If you prefer to stick with Docker, use: +``` +ENGINE=docker +``` ## The easiest way -The easiest/faster option to run Polkadot in Docker is to use the latest release images. These are small images that use the latest official release of the Polkadot binary, pulled from our package repository. +The easiest/faster option to run Polkadot in Docker is to use the latest release images. These are small images that use the latest official release of the Polkadot binary, pulled from our Debian package. -**_Following examples are running on westend chain and without SSL. They can be used to quick start and learn how Polkadot needs to be configured. Please find out how to secure your node, if you want to operate it on the internet. Do not expose RPC and WS ports, if they are not correctly configured._** +**_The following examples are running on westend chain and without SSL. They can be used to quick start and learn how Polkadot needs to be configured. Please find out how to secure your node, if you want to operate it on the internet. Do not expose RPC and WS ports, if they are not correctly configured._** Let's first check the version we have. The first time you run this command, the Polkadot docker image will be downloaded. This takes a bit of time and bandwidth, be patient: ```bash -docker run --rm -it parity/polkadot:latest --version +$ENGINE run --rm -it parity/polkadot:latest --version ``` You can also pass any argument/flag that Polkadot supports: ```bash -docker run --rm -it parity/polkadot:latest --chain westend --name "PolkaDocker" +$ENGINE run --rm -it parity/polkadot:latest --chain westend --name "PolkaDocker" ``` ## Examples -Once you are done experimenting and picking the best node name :) you can start Polkadot as daemon, exposes the Polkadot ports and mount a volume that will keep your blockchain data locally. Make sure that you set the ownership of your local directory to the Polkadot user that is used by the container. Set user id 1000 and group id 1000, by running `chown 1000.1000 /my/local/folder -R` if you use a bind mount. - -To start a Polkadot node on default rpc port 9933 and default p2p port 30333 use the following command. If you want to connect to rpc port 9933, then must add Polkadot startup parameter: `--rpc-external`. +Once you are done experimenting and picking the best node name :) you can start Polkadot as daemon, exposes the Polkadot ports and mount a volume that will keep your blockchain data locally. Make sure that you set the ownership of your local directory to the Polkadot user that is used by the container. -```bash -docker run -d -p 30333:30333 -p 9933:9933 -v /my/local/folder:/polkadot parity/polkadot:latest --chain westend --rpc-external --rpc-cors all -``` +Set user id 1000 and group id 1000, by running `chown 1000.1000 /my/local/folder -R` if you use a bind mount. -Additionally if you want to have custom node name you can add the `--name "YourName"` at the end +To start a Polkadot node on default rpc port 9933 and default p2p port 30333 use the following command. If you want to connect to rpc port 9933, then must add Polkadot startup parameter: `--rpc-external`. ```bash -docker run -d -p 30333:30333 -p 9933:9933 -v /my/local/folder:/polkadot parity/polkadot:latest --chain westend --rpc-external --rpc-cors all --name "PolkaDocker" +$ENGINE run -d -p 30333:30333 -p 9933:9933 \ + -v /my/local/folder:/polkadot \ + parity/polkadot:latest \ + --chain westend --rpc-external --rpc-cors all \ + --name "PolkaDocker ``` If you also want to expose the webservice port 9944 use the following command: ```bash -docker run -d -p 30333:30333 -p 9933:9933 -p 9944:9944 -v /my/local/folder:/polkadot parity/polkadot:latest --chain westend --ws-external --rpc-external --rpc-cors all --name "PolkaDocker" +$ENGINE run -d -p 30333:30333 -p 9933:9933 -p 9944:9944 \ + -v /my/local/folder:/polkadot \ + parity/polkadot:latest \ + --chain westend --ws-external --rpc-external --rpc-cors all --name "PolkaDocker" ``` ## Using Docker compose @@ -55,17 +70,19 @@ services: - 30333:30333 # p2p port - 9933:9933 # rpc port - 9944:9944 # ws port + - 9615:9615 # Prometheus port volumes: - /my/local/folder:/polkadot command: [ "--name", "PolkaDocker", "--ws-external", "--rpc-external", + "--prometheus-external", "--rpc-cors", "all" ] ``` -With following docker-compose.yml you can set up a node and use polkadot-js-apps as the front end on port 80. After starting the node use a browser and enter your Docker host IP in the URL field: __ +With following `docker-compose.yml` you can set up a node and use polkadot-js-apps as the front end on port 80. After starting the node use a browser and enter your Docker host IP in the URL field: __ ```bash version: '2' @@ -78,10 +95,12 @@ services: - 30333:30333 # p2p port - 9933:9933 # rpc port - 9944:9944 # ws port + - 9615:9615 # Prometheus port command: [ "--name", "PolkaDocker", "--ws-external", "--rpc-external", + "--prometheus-external", "--rpc-cors", "all" ] @@ -100,27 +119,30 @@ Chain syncing will utilize all available memory and CPU power your server has to If running on a low resource VPS, use `--memory` and `--cpus` to limit the resources used. E.g. To allow a maximum of 512MB memory and 50% of 1 CPU, use `--cpus=".5" --memory="512m"`. Read more about limiting a container's resources [here](https://docs.docker.com/config/containers/resource_constraints). -Start a shell session with the daemon: -```bash -docker exec -it $(docker ps -q) bash; -``` +## Build your own image -Check the current version: +There are 3 options to build a polkadot container image: +- using the builder image +- using the injected "Debian" image +- using the generic injected image -```bash -polkadot --version -``` +### Builder image -## Build your own image +To get up and running with the smallest footprint on your system, you may use an existing Polkadot Container image. -To get up and running with the smallest footprint on your system, you may use the Polkadot Docker image. -You can build it yourself (it takes a while...) in the shell session of the daemon: +You may also build a polkadot container image yourself (it takes a while...) using the container specs `scripts/ci/dockerfiles/polkadot/polkadot_builder.Dockerfile`. -```bash -cd scripts/ci/dockerfiles/polkadot -./build.sh -``` +### Debian injected + +The Debian injected image is how the official polkadot container image is produced. It relies on the Debian package that is published upon each release. The Debian injected image is usually available a few minutes after a new release is published. +It has the benefit of relying on the GPG signatures embedded in the Debian package. + +### Generic injected + +For simple testing purposes, the easiest option for polkadot and also random binaries, is to use the `binary_injected.Dockerfile` container spec. This option is less secure since the injected binary is not checked at all but it has the benefit to be simple. This option requires to already have a valid `polkadot` binary, compiled for Linux. + +This binary is then simply copied inside the `parity/base-bin` image. ## Reporting issues @@ -128,8 +150,8 @@ If you run into issues with Polkadot when using docker, please run the following (replace the tag with the appropriate one if you do not use latest): ```bash -docker run --rm -it parity/polkadot:latest --version +$ENGINE run --rm -it parity/polkadot:latest --version ``` This will show you the Polkadot version as well as the git commit ref that was used to build your container. -Just paste that in the issue you create. +You can now paste the version information in a [new issue](https://github.com/paritytech/polkadot/issues/new/choose). diff --git a/scripts/ci/common/lib.sh b/scripts/ci/common/lib.sh index 2e94feb150ce..00abe9a1d8d4 100755 --- a/scripts/ci/common/lib.sh +++ b/scripts/ci/common/lib.sh @@ -193,3 +193,73 @@ check_bootnode(){ echo " Bootnode appears unreachable" return 1 } + +# Assumes the ENV are set: +# - RELEASE_ID +# - GITHUB_TOKEN +# - REPO in the form paritytech/polkadot +fetch_release_artifacts() { + echo "Release ID : $RELEASE_ID" + echo "Repo : $REPO" + echo "ARTIFACT_FOLDER: $ARTIFACT_FOLDER" + + curl -L -s \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${GITHUB_TOKEN}" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + https://api.github.com/repos/${REPO}/releases/$RELEASE_ID > release.json + + # Get Asset ids + ids=($(jq -r '.assets[].id' < release.json )) + count=$(jq '.assets|length' < release.json ) + + # Fetch artifacts + mkdir -p ${ARTIFACT_FOLDER} + pushd ${ARTIFACT_FOLDER} > /dev/null + + iter=1 + for id in "${ids[@]}" + do + echo " - $iter/$count: downloading asset id: $id..." + curl -s -OJ -L -H "Accept: application/octet-stream" \ + -H "Authorization: Token ${GITHUB_TOKEN}" \ + "https://api.github.com/repos/${REPO}/releases/assets/$id" + iter=$((iter + 1)) + done + + ls -al --color + popd > /dev/null +} + +# Check the checksum for a given binary +function check_sha256() { + echo "Checking SHA256 for $1" + shasum -qc $1.sha256 +} + +# Import GPG keys of the release team members +# This is done in parallel as it can take a while sometimes +function import_gpg_keys() { + GPG_KEYSERVER=${GPG_KEYSERVER:-"keyserver.ubuntu.com"} + SEC="9D4B2B6EB8F97156D19669A9FF0812D491B96798" + WILL="2835EAF92072BC01D188AF2C4A092B93E97CE1E2" + EGOR="E6FC4D4782EB0FA64A4903CCDB7D3555DD3932D3" + MARA="533C920F40E73A21EEB7E9EBF27AEA7E7594C9CF" + MORGAN="2E92A9D8B15D7891363D1AE8AF9E6C43F7F8C4CF" + + echo "Importing GPG keys from $GPG_KEYSERVER in parallel" + for key in $SEC $WILL $EGOR $MARA $MORGAN; do + ( + echo "Importing GPG key $key" + gpg --no-tty --quiet --keyserver $GPG_KEYSERVER --recv-keys $key + echo -e "5\ny\n" | gpg --no-tty --command-fd 0 --expert --edit-key $key trust; + ) & + done + wait +} + +# Check the GPG signature for a given binary +function check_gpg() { + echo "Checking GPG Signature for $1" + gpg --no-tty --verify -q $1.asc $1 +} diff --git a/scripts/ci/dockerfiles/adder-collator/build-injected.sh b/scripts/ci/dockerfiles/adder-collator/build-injected.sh new file mode 100755 index 000000000000..9a1857bc7ab4 --- /dev/null +++ b/scripts/ci/dockerfiles/adder-collator/build-injected.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# Sample call: +# $0 /path/to/folder_with_binary +# This script replace the former dedicated Dockerfile +# and shows how to use the generic binary_injected.dockerfile + +PROJECT_ROOT=`git rev-parse --show-toplevel` + +export BINARY=adder-collator,undying-collator +export BIN_FOLDER=$1 + +$PROJECT_ROOT/scripts/ci/dockerfiles/build-injected.sh diff --git a/scripts/ci/dockerfiles/adder-collator/test-build.sh b/scripts/ci/dockerfiles/adder-collator/test-build.sh new file mode 100755 index 000000000000..171e0309f807 --- /dev/null +++ b/scripts/ci/dockerfiles/adder-collator/test-build.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +TMP=$(mktemp -d) +ENGINE=${ENGINE:-podman} + +# TODO: Switch to /bin/bash when the image is built from parity/base-bin + +# Fetch some binaries +$ENGINE run --user root --rm -i \ + --pull always \ + -v "$TMP:/export" \ + --entrypoint /usr/bin/bash \ + paritypr/colander:master -c \ + 'cp "$(which adder-collator)" /export' + +$ENGINE run --user root --rm -i \ + --pull always \ + -v "$TMP:/export" \ + --entrypoint /usr/bin/bash \ + paritypr/colander:master -c \ + 'cp "$(which undying-collator)" /export' + +./build-injected.sh $TMP diff --git a/scripts/ci/dockerfiles/binary_injected.Dockerfile b/scripts/ci/dockerfiles/binary_injected.Dockerfile new file mode 100644 index 000000000000..cee81a2eb8ae --- /dev/null +++ b/scripts/ci/dockerfiles/binary_injected.Dockerfile @@ -0,0 +1,48 @@ +FROM docker.io/parity/base-bin + +# This file allows building a Generic container image +# based on one or multiple pre-built Linux binaries. +# Some defaults are set to polkadot but all can be overriden. + +SHELL ["/bin/bash", "-c"] + +# metadata +ARG VCS_REF +ARG BUILD_DATE +ARG IMAGE_NAME + +# That can be a single one or a comma separated list +ARG BINARY=polkadot + +ARG BIN_FOLDER=. +ARG DOC_URL=https://github.com/paritytech/polkadot +ARG DESCRIPTION="Polkadot: a platform for web3" +ARG AUTHORS="devops-team@parity.io" +ARG VENDOR="Parity Technologies" + +LABEL io.parity.image.authors=${AUTHORS} \ + io.parity.image.vendor="${VENDOR}" \ + io.parity.image.revision="${VCS_REF}" \ + io.parity.image.title="${IMAGE_NAME}" \ + io.parity.image.created="${BUILD_DATE}" \ + io.parity.image.documentation="${DOC_URL}" \ + io.parity.image.description="${DESCRIPTION}" \ + io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/binary_injected.Dockerfile" + +USER root +WORKDIR /app + +# add polkadot binary to docker image +# sample for polkadot: COPY ./polkadot ./polkadot-*-worker /usr/local/bin/ +COPY entrypoint.sh . +COPY "bin/*" "/usr/local/bin/" +RUN chmod -R a+rx "/usr/local/bin" + +USER parity +ENV BINARY=${BINARY} + +# ENTRYPOINT +ENTRYPOINT ["/app/entrypoint.sh"] + +# We call the help by default +CMD ["--help"] diff --git a/scripts/ci/dockerfiles/build-injected.sh b/scripts/ci/dockerfiles/build-injected.sh new file mode 100755 index 000000000000..d0e7fee3646e --- /dev/null +++ b/scripts/ci/dockerfiles/build-injected.sh @@ -0,0 +1,92 @@ +#!/usr/bin/env bash +set -e + +# This script allows building a Container Image from a Linux +# binary that is injected into a base-image. + +ENGINE=${ENGINE:-podman} + +if [ "$ENGINE" == "podman" ]; then + PODMAN_FLAGS="--format docker" +else + PODMAN_FLAGS="" +fi + +CONTEXT=$(mktemp -d) +REGISTRY=${REGISTRY:-docker.io} + +# The following line ensure we know the project root +PROJECT_ROOT=${PROJECT_ROOT:-$(git rev-parse --show-toplevel)} +DOCKERFILE=${DOCKERFILE:-$PROJECT_ROOT/scripts/ci/dockerfiles/binary_injected.Dockerfile} +VERSION_TOML=$(grep "^version " $PROJECT_ROOT/Cargo.toml | grep -oE "([0-9\.]+-?[0-9]+)") + +#n The following VAR have default that can be overriden +DOCKER_OWNER=${DOCKER_OWNER:-parity} + +# We may get 1..n binaries, comma separated +BINARY=${BINARY:-polkadot} +IFS=',' read -r -a BINARIES <<< "$BINARY" + +VERSION=${VERSION:-$VERSION_TOML} +BIN_FOLDER=${BIN_FOLDER:-.} + +IMAGE=${IMAGE:-${REGISTRY}/${DOCKER_OWNER}/${BINARIES[0]}} +DESCRIPTION_DEFAULT="Injected Container image built for ${BINARY}" +DESCRIPTION=${DESCRIPTION:-$DESCRIPTION_DEFAULT} + +VCS_REF=${VCS_REF:-01234567} + +# Build the image +echo "Using engine: $ENGINE" +echo "Using Dockerfile: $DOCKERFILE" +echo "Using context: $CONTEXT" +echo "Building ${IMAGE}:latest container image for ${BINARY} v${VERSION} from ${BIN_FOLDER} hang on!" +echo "BIN_FOLDER=$BIN_FOLDER" +echo "CONTEXT=$CONTEXT" + +# We need all binaries and resources available in the Container build "CONTEXT" +mkdir -p $CONTEXT/bin +for bin in "${BINARIES[@]}" +do + echo "Copying $BIN_FOLDER/$bin to context: $CONTEXT/bin" + cp "$BIN_FOLDER/$bin" "$CONTEXT/bin" +done + +cp "$PROJECT_ROOT/scripts/ci/dockerfiles/entrypoint.sh" "$CONTEXT" + +echo "Building image: ${IMAGE}" + +TAGS=${TAGS[@]:-latest} +IFS=',' read -r -a TAG_ARRAY <<< "$TAGS" +TAG_ARGS=" " + +echo "The image ${IMAGE} will be tagged with ${TAG_ARRAY[*]}" +for tag in "${TAG_ARRAY[@]}"; do + TAG_ARGS+="--tag ${IMAGE}:${tag} " +done + +echo "$TAG_ARGS" + +# time \ +$ENGINE build \ + ${PODMAN_FLAGS} \ + --build-arg VCS_REF="${VCS_REF}" \ + --build-arg BUILD_DATE=$(date -u '+%Y-%m-%dT%H:%M:%SZ') \ + --build-arg IMAGE_NAME="${IMAGE}" \ + --build-arg BINARY="${BINARY}" \ + --build-arg BIN_FOLDER="${BIN_FOLDER}" \ + --build-arg DESCRIPTION="${DESCRIPTION}" \ + ${TAG_ARGS} \ + -f "${DOCKERFILE}" \ + ${CONTEXT} + +echo "Your Container image for ${IMAGE} is ready" +$ENGINE images + +if [[ -z "${SKIP_IMAGE_VALIDATION}" ]]; then + echo "Check the image ${IMAGE}:${TAG_ARRAY[0]}" + $ENGINE run --rm -i "${IMAGE}:${TAG_ARRAY[0]}" --version + + echo "Query binaries" + $ENGINE run --rm -i --entrypoint /bin/bash "${IMAGE}:${TAG_ARRAY[0]}" -c 'echo BINARY: $BINARY' +fi diff --git a/scripts/ci/dockerfiles/collator_injected.Dockerfile b/scripts/ci/dockerfiles/collator_injected.Dockerfile deleted file mode 100644 index 91b8cb0057bf..000000000000 --- a/scripts/ci/dockerfiles/collator_injected.Dockerfile +++ /dev/null @@ -1,49 +0,0 @@ -# this file copies from scripts/ci/dockerfiles/Dockerfile and changes only the binary name -FROM docker.io/library/ubuntu:20.04 - -# metadata -ARG VCS_REF -ARG BUILD_DATE -ARG IMAGE_NAME - -LABEL io.parity.image.authors="devops-team@parity.io" \ - io.parity.image.vendor="Parity Technologies" \ - io.parity.image.title="${IMAGE_NAME}" \ - io.parity.image.description="Injected adder-collator Docker image" \ - io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/collator_injected.Dockerfile" \ - io.parity.image.revision="${VCS_REF}" \ - io.parity.image.created="${BUILD_DATE}" \ - io.parity.image.documentation="https://github.com/paritytech/polkadot/" - -# show backtraces -ENV RUST_BACKTRACE 1 - -# install tools and dependencies -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y \ - libssl1.1 \ - ca-certificates && \ -# apt cleanup - apt-get autoremove -y && \ - apt-get clean && \ - find /var/lib/apt/lists/ -type f -not -name lock -delete; \ -# add user and link ~/.local/share/adder-collator to /data - useradd -m -u 1000 -U -s /bin/sh -d /adder-collator adder-collator && \ - mkdir -p /data /adder-collator/.local/share && \ - chown -R adder-collator:adder-collator /data && \ - ln -s /data /adder-collator/.local/share/polkadot - -# add adder-collator binary to docker image -COPY ./adder-collator /usr/local/bin -COPY ./undying-collator /usr/local/bin - -USER adder-collator - -# check if executable works in this container -RUN /usr/local/bin/adder-collator --version -RUN /usr/local/bin/undying-collator --version - -EXPOSE 30333 9933 9944 -VOLUME ["/adder-collator"] - -ENTRYPOINT ["/usr/local/bin/adder-collator"] diff --git a/scripts/ci/dockerfiles/entrypoint.sh b/scripts/ci/dockerfiles/entrypoint.sh new file mode 100755 index 000000000000..eaa815faf6a4 --- /dev/null +++ b/scripts/ci/dockerfiles/entrypoint.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +# Sanity check +if [ -z "$BINARY" ] +then + echo "BINARY ENV not defined, this should never be the case. Aborting..." + exit 1 +fi + +# If the user built the image with multiple binaries, +# we consider the first one to be the canonical one +# To start with another binary, the user can either: +# - use the --entrypoint option +# - pass the ENV BINARY with a single binary +IFS=',' read -r -a BINARIES <<< "$BINARY" +BIN0=${BINARIES[0]} +echo "Starting binary $BIN0" +$BIN0 $@ diff --git a/scripts/ci/dockerfiles/malus/build-injected.sh b/scripts/ci/dockerfiles/malus/build-injected.sh new file mode 100755 index 000000000000..99bd5fde1d5a --- /dev/null +++ b/scripts/ci/dockerfiles/malus/build-injected.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +# Sample call: +# $0 /path/to/folder_with_binary +# This script replace the former dedicated Dockerfile +# and shows how to use the generic binary_injected.dockerfile + +PROJECT_ROOT=`git rev-parse --show-toplevel` + +export BINARY=malus,polkadot-execute-worker,polkadot-prepare-worker +export BIN_FOLDER=$1 +# export TAGS=... + +$PROJECT_ROOT/scripts/ci/dockerfiles/build-injected.sh diff --git a/scripts/ci/dockerfiles/malus/test-build.sh b/scripts/ci/dockerfiles/malus/test-build.sh new file mode 100755 index 000000000000..3114e9e2adf1 --- /dev/null +++ b/scripts/ci/dockerfiles/malus/test-build.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +TMP=$(mktemp -d) +ENGINE=${ENGINE:-podman} + +export TAGS=latest,beta,7777,1.0.2-rc23 + +# Fetch some binaries +$ENGINE run --user root --rm -i \ + --pull always \ + -v "$TMP:/export" \ + --entrypoint /bin/bash \ + paritypr/malus:7217 -c \ + 'cp "$(which malus)" /export' + +echo "Checking binaries we got:" +ls -al $TMP + +./build-injected.sh $TMP diff --git a/scripts/ci/dockerfiles/malus_injected.Dockerfile b/scripts/ci/dockerfiles/malus_injected.Dockerfile deleted file mode 100644 index fa429b5f142a..000000000000 --- a/scripts/ci/dockerfiles/malus_injected.Dockerfile +++ /dev/null @@ -1,50 +0,0 @@ -FROM debian:bullseye-slim - -# metadata -ARG VCS_REF -ARG BUILD_DATE -ARG IMAGE_NAME - -LABEL io.parity.image.authors="devops-team@parity.io" \ - io.parity.image.vendor="Parity Technologies" \ - io.parity.image.title="${IMAGE_NAME}" \ - io.parity.image.description="Malus - the nemesis of polkadot" \ - io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/malus.Dockerfile" \ - io.parity.image.revision="${VCS_REF}" \ - io.parity.image.created="${BUILD_DATE}" \ - io.parity.image.documentation="https://github.com/paritytech/polkadot/" - -# show backtraces -ENV RUST_BACKTRACE 1 - -# install tools and dependencies -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y \ - ca-certificates \ - curl \ - libssl1.1 \ - tini && \ -# apt cleanup - apt-get autoremove -y && \ - apt-get clean && \ - find /var/lib/apt/lists/ -type f -not -name lock -delete; \ -# add user - groupadd --gid 10000 nonroot && \ - useradd --home-dir /home/nonroot \ - --create-home \ - --shell /bin/bash \ - --gid nonroot \ - --groups nonroot \ - --uid 10000 nonroot - - -# add malus binary to docker image -COPY ./malus ./polkadot-execute-worker ./polkadot-prepare-worker /usr/local/bin - -USER nonroot - -# check if executable works in this container -RUN /usr/local/bin/malus --version - -# Tini allows us to avoid several Docker edge cases, see https://github.com/krallin/tini. -ENTRYPOINT ["tini", "--", "/bin/bash"] diff --git a/scripts/ci/dockerfiles/polkadot/README.md b/scripts/ci/dockerfiles/polkadot/README.md index 9ddf324bb29c..e331d8984c2c 100644 --- a/scripts/ci/dockerfiles/polkadot/README.md +++ b/scripts/ci/dockerfiles/polkadot/README.md @@ -1,7 +1,9 @@ # Self built Docker image The Polkadot repo contains several options to build Docker images for Polkadot. + This folder contains a self-contained image that does not require a Linux pre-built binary. + Instead, building the image is possible on any host having docker installed and will build Polkadot inside Docker. That also means that no Rust toolchain is required on the host machine for the build to succeed. diff --git a/scripts/ci/dockerfiles/polkadot/build-injected.sh b/scripts/ci/dockerfiles/polkadot/build-injected.sh new file mode 100755 index 000000000000..22774c7b7122 --- /dev/null +++ b/scripts/ci/dockerfiles/polkadot/build-injected.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# Sample call: +# $0 /path/to/folder_with_binary +# This script replace the former dedicated Dockerfile +# and shows how to use the generic binary_injected.dockerfile + +PROJECT_ROOT=`git rev-parse --show-toplevel` + +export BINARY=polkadot,polkadot-execute-worker,polkadot-prepare-worker +export BIN_FOLDER=$1 + +$PROJECT_ROOT/scripts/ci/dockerfiles/build-injected.sh diff --git a/scripts/ci/dockerfiles/polkadot/build.sh b/scripts/ci/dockerfiles/polkadot/build.sh deleted file mode 100755 index d00c9108bd8c..000000000000 --- a/scripts/ci/dockerfiles/polkadot/build.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash -set -e - -pushd . - -# The following line ensure we run from the project root -PROJECT_ROOT=`git rev-parse --show-toplevel` -cd $PROJECT_ROOT - -# Find the current version from Cargo.toml -VERSION=`grep "^version" ./cli/Cargo.toml | egrep -o "([0-9\.]+-?[0-9]+)"` -GITUSER=parity -GITREPO=polkadot - -# Build the image -echo "Building ${GITUSER}/${GITREPO}:latest docker image, hang on!" -time docker build \ - -f ./scripts/ci/dockerfiles/polkadot/polkadot_builder.Dockerfile \ - -t ${GITUSER}/${GITREPO}:latest \ - -t ${GITUSER}/${GITREPO}:v${VERSION} \ - . - -# Show the list of available images for this repo -echo "Your Docker image for $GITUSER/$GITREPO is ready" -docker images | grep ${GITREPO} - -popd diff --git a/scripts/ci/dockerfiles/polkadot/docker-compose.yml b/scripts/ci/dockerfiles/polkadot/docker-compose.yml index 978191af88c1..524b1164796a 100644 --- a/scripts/ci/dockerfiles/polkadot/docker-compose.yml +++ b/scripts/ci/dockerfiles/polkadot/docker-compose.yml @@ -1,23 +1,22 @@ version: '3' services: polkadot: + image: parity/polkadot:latest + ports: - "127.0.0.1:30333:30333/tcp" - "127.0.0.1:9933:9933/tcp" - image: parity/polkadot:latest + - "127.0.0.1:9944:9944/tcp" + - "127.0.0.1:9615:9615/tcp" + volumes: - "polkadot-data:/data" + command: | --unsafe-rpc-external --unsafe-ws-external --rpc-cors all --prometheus-external - ports: - - "30333:30333" - - "9933:9933" - - "9944:9944" - - "9615:9615" - volumes: polkadot-data: diff --git a/scripts/ci/dockerfiles/polkadot_Dockerfile.README.md b/scripts/ci/dockerfiles/polkadot/polkadot_Dockerfile.README.md similarity index 100% rename from scripts/ci/dockerfiles/polkadot_Dockerfile.README.md rename to scripts/ci/dockerfiles/polkadot/polkadot_Dockerfile.README.md diff --git a/scripts/ci/dockerfiles/polkadot/polkadot_builder.Dockerfile b/scripts/ci/dockerfiles/polkadot/polkadot_builder.Dockerfile index 6e31298432f7..f263c836bbfe 100644 --- a/scripts/ci/dockerfiles/polkadot/polkadot_builder.Dockerfile +++ b/scripts/ci/dockerfiles/polkadot/polkadot_builder.Dockerfile @@ -7,7 +7,7 @@ COPY . /polkadot RUN cargo build --locked --release # This is the 2nd stage: a very small image where we copy the Polkadot binary." -FROM docker.io/library/ubuntu:20.04 +FROM docker.io/parity/base-bin:latest LABEL description="Multistage Docker image for Polkadot: a platform for web3" \ io.parity.image.type="builder" \ diff --git a/scripts/ci/dockerfiles/polkadot_injected_release.Dockerfile b/scripts/ci/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile similarity index 95% rename from scripts/ci/dockerfiles/polkadot_injected_release.Dockerfile rename to scripts/ci/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile index 74b5c7f48f88..e2c72dcfe2e9 100644 --- a/scripts/ci/dockerfiles/polkadot_injected_release.Dockerfile +++ b/scripts/ci/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile @@ -11,7 +11,7 @@ LABEL io.parity.image.authors="devops-team@parity.io" \ io.parity.image.vendor="Parity Technologies" \ io.parity.image.title="parity/polkadot" \ io.parity.image.description="Polkadot: a platform for web3. This is the official Parity image with an injected binary." \ - io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/polkadot_injected_release.Dockerfile" \ + io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile" \ io.parity.image.revision="${VCS_REF}" \ io.parity.image.created="${BUILD_DATE}" \ io.parity.image.documentation="https://github.com/paritytech/polkadot/" diff --git a/scripts/ci/dockerfiles/polkadot/test-build.sh b/scripts/ci/dockerfiles/polkadot/test-build.sh new file mode 100755 index 000000000000..d2d904561cb5 --- /dev/null +++ b/scripts/ci/dockerfiles/polkadot/test-build.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +TMP=$(mktemp -d) +ENGINE=${ENGINE:-podman} + +# You need to build an injected image first + +# Fetch some binaries +$ENGINE run --user root --rm -i \ + -v "$TMP:/export" \ + --entrypoint /bin/bash \ + parity/polkadot -c \ + 'cp "$(which polkadot)" /export' + +echo "Checking binaries we got:" +tree $TMP + +./build-injected.sh $TMP diff --git a/scripts/ci/dockerfiles/polkadot_injected_debug.Dockerfile b/scripts/ci/dockerfiles/polkadot_injected_debug.Dockerfile deleted file mode 100644 index aebbbdcf1b7f..000000000000 --- a/scripts/ci/dockerfiles/polkadot_injected_debug.Dockerfile +++ /dev/null @@ -1,48 +0,0 @@ -FROM docker.io/library/ubuntu:20.04 - -# metadata -ARG VCS_REF -ARG BUILD_DATE -ARG IMAGE_NAME - -LABEL io.parity.image.authors="devops-team@parity.io" \ - io.parity.image.vendor="Parity Technologies" \ - io.parity.image.title="${IMAGE_NAME}" \ - io.parity.image.description="Polkadot: a platform for web3" \ - io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/polkadot_injected_debug.Dockerfile" \ - io.parity.image.revision="${VCS_REF}" \ - io.parity.image.created="${BUILD_DATE}" \ - io.parity.image.documentation="https://github.com/paritytech/polkadot/" - -# show backtraces -ENV RUST_BACKTRACE 1 - -# install tools and dependencies -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y \ - libssl1.1 \ - ca-certificates && \ -# apt cleanup - apt-get autoremove -y && \ - apt-get clean && \ - find /var/lib/apt/lists/ -type f -not -name lock -delete; \ -# add user and link ~/.local/share/polkadot to /data - useradd -m -u 1000 -U -s /bin/sh -d /polkadot polkadot && \ - mkdir -p /data /polkadot/.local/share && \ - chown -R polkadot:polkadot /data && \ - ln -s /data /polkadot/.local/share/polkadot - -# add polkadot binary to docker image -COPY ./polkadot ./polkadot-execute-worker ./polkadot-prepare-worker /usr/local/bin - -USER polkadot - -# check if executable works in this container -RUN /usr/local/bin/polkadot --version -RUN /usr/local/bin/polkadot-execute-worker --version -RUN /usr/local/bin/polkadot-prepare-worker --version - -EXPOSE 30333 9933 9944 -VOLUME ["/polkadot"] - -ENTRYPOINT ["/usr/local/bin/polkadot"] diff --git a/scripts/ci/dockerfiles/staking-miner/README.md b/scripts/ci/dockerfiles/staking-miner/README.md new file mode 100644 index 000000000000..3610e1130316 --- /dev/null +++ b/scripts/ci/dockerfiles/staking-miner/README.md @@ -0,0 +1,37 @@ +# staking-miner container image + +## Build using the Builder + +``` +./build.sh +``` + +## Build the injected Image + +You first need a valid Linux binary to inject. Let's assume this binary is located in `BIN_FOLDER`. + +``` +./build-injected.sh "$BIN_FOLDER" +``` + +## Test + +Here is how to test the image. We can generate a valid seed but the staking-miner will quickly notice that our +account is not funded and "does not exist". + +You may pass any ENV supported by the binary and must provide at least a few such as `SEED` and `URI`: +``` +ENV SEED="" +ENV URI="wss://rpc.polkadot.io:443" +ENV RUST_LOG="info" +``` + +``` +export SEED=$(subkey generate -n polkadot --output-type json | jq -r .secretSeed) +podman run --rm -it \ + -e URI="wss://rpc.polkadot.io:443" \ + -e RUST_LOG="info" \ + -e SEED \ + localhost/parity/staking-miner \ + dry-run seq-phragmen +``` diff --git a/scripts/ci/dockerfiles/staking-miner/build-injected.sh b/scripts/ci/dockerfiles/staking-miner/build-injected.sh new file mode 100755 index 000000000000..536636df6a91 --- /dev/null +++ b/scripts/ci/dockerfiles/staking-miner/build-injected.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# Sample call: +# $0 /path/to/folder_with_staking-miner_binary +# This script replace the former dedicated staking-miner "injected" Dockerfile +# and shows how to use the generic binary_injected.dockerfile + +PROJECT_ROOT=`git rev-parse --show-toplevel` + +export BINARY=staking-miner +export BIN_FOLDER=$1 + +$PROJECT_ROOT/scripts/ci/dockerfiles/build-injected.sh diff --git a/scripts/ci/dockerfiles/staking-miner/build.sh b/scripts/ci/dockerfiles/staking-miner/build.sh new file mode 100755 index 000000000000..67c82afcd2ce --- /dev/null +++ b/scripts/ci/dockerfiles/staking-miner/build.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# Sample call: +# $0 /path/to/folder_with_staking-miner_binary +# This script replace the former dedicated staking-miner "injected" Dockerfile +# and shows how to use the generic binary_injected.dockerfile + +PROJECT_ROOT=`git rev-parse --show-toplevel` +ENGINE=podman + +echo "Building the staking-miner using the Builder image" +echo "PROJECT_ROOT=$PROJECT_ROOT" +$ENGINE build -t staking-miner -f staking-miner_builder.Dockerfile "$PROJECT_ROOT" diff --git a/scripts/ci/dockerfiles/staking-miner/staking-miner_builder.Dockerfile b/scripts/ci/dockerfiles/staking-miner/staking-miner_builder.Dockerfile index a1932095fd4c..0ae77f36c79d 100644 --- a/scripts/ci/dockerfiles/staking-miner/staking-miner_builder.Dockerfile +++ b/scripts/ci/dockerfiles/staking-miner/staking-miner_builder.Dockerfile @@ -4,17 +4,17 @@ FROM paritytech/ci-linux:production as builder ARG VCS_REF ARG BUILD_DATE ARG IMAGE_NAME="staking-miner" -ARG PROFILE=release +ARG PROFILE=production LABEL description="This is the build stage. Here we create the binary." WORKDIR /app COPY . /app -RUN cargo build --locked --$PROFILE --package staking-miner +RUN cargo build --locked --profile $PROFILE --package staking-miner # ===== SECOND STAGE ====== -FROM docker.io/library/ubuntu:20.04 +FROM docker.io/parity/base-bin:latest LABEL description="This is the 2nd stage: a very small image where we copy the binary." LABEL io.parity.image.authors="devops-team@parity.io" \ io.parity.image.vendor="Parity Technologies" \ @@ -28,13 +28,10 @@ LABEL io.parity.image.authors="devops-team@parity.io" \ ARG PROFILE=release COPY --from=builder /app/target/$PROFILE/staking-miner /usr/local/bin -RUN useradd -u 1000 -U -s /bin/sh miner && \ - rm -rf /usr/bin /usr/sbin - # show backtraces ENV RUST_BACKTRACE 1 -USER miner +USER parity ENV SEED="" ENV URI="wss://rpc.polkadot.io" diff --git a/scripts/ci/dockerfiles/staking-miner/staking-miner_injected.Dockerfile b/scripts/ci/dockerfiles/staking-miner/staking-miner_injected.Dockerfile deleted file mode 100644 index 4901ab4a3736..000000000000 --- a/scripts/ci/dockerfiles/staking-miner/staking-miner_injected.Dockerfile +++ /dev/null @@ -1,43 +0,0 @@ -FROM docker.io/library/ubuntu:20.04 - -# metadata -ARG VCS_REF -ARG BUILD_DATE -ARG IMAGE_NAME="staking-miner" - -LABEL io.parity.image.authors="devops-team@parity.io" \ - io.parity.image.vendor="Parity Technologies" \ - io.parity.image.title="${IMAGE_NAME}" \ - io.parity.image.description="${IMAGE_NAME} for substrate based chains" \ - io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/${IMAGE_NAME}/${IMAGE_NAME}_injected.Dockerfile" \ - io.parity.image.revision="${VCS_REF}" \ - io.parity.image.created="${BUILD_DATE}" \ - io.parity.image.documentation="https://github.com/paritytech/polkadot/" - -# show backtraces -ENV RUST_BACKTRACE 1 - -# install tools and dependencies -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y \ - libssl1.1 \ - ca-certificates && \ -# apt cleanup - apt-get autoremove -y && \ - apt-get clean && \ - find /var/lib/apt/lists/ -type f -not -name lock -delete; \ - useradd -u 1000 -U -s /bin/sh miner - -# add binary to docker image -COPY ./staking-miner /usr/local/bin - -USER miner - -ENV SEED="" -ENV URI="wss://rpc.polkadot.io" -ENV RUST_LOG="info" - -# check if the binary works in this container -RUN /usr/local/bin/staking-miner --version - -ENTRYPOINT [ "/usr/local/bin/staking-miner" ] diff --git a/scripts/ci/dockerfiles/staking-miner/test-build.sh b/scripts/ci/dockerfiles/staking-miner/test-build.sh new file mode 100755 index 000000000000..0ce74e2df296 --- /dev/null +++ b/scripts/ci/dockerfiles/staking-miner/test-build.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +TMP=$(mktemp -d) +ENGINE=${ENGINE:-podman} + +# You need to build an injected image first + +# Fetch some binaries +$ENGINE run --user root --rm -i \ + -v "$TMP:/export" \ + --entrypoint /bin/bash \ + parity/staking-miner -c \ + 'cp "$(which staking-miner)" /export' + +echo "Checking binaries we got:" +tree $TMP + +./build-injected.sh $TMP diff --git a/scripts/ci/gitlab/pipeline/build.yml b/scripts/ci/gitlab/pipeline/build.yml index dafca393cd4f..845ac7970108 100644 --- a/scripts/ci/gitlab/pipeline/build.yml +++ b/scripts/ci/gitlab/pipeline/build.yml @@ -39,7 +39,6 @@ build-linux-stable: - echo -n ${CI_JOB_ID} > ./artifacts/BUILD_LINUX_JOB_ID - RELEASE_VERSION=$(./artifacts/polkadot -V | awk '{print $2}'| awk -F "-" '{print $1}') - echo -n "v${RELEASE_VERSION}" > ./artifacts/BUILD_RELEASE_VERSION - - cp -r scripts/* ./artifacts build-test-collators: stage: build @@ -64,7 +63,6 @@ build-test-collators: - echo -n "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" > ./artifacts/EXTRATAG - echo "adder-collator version = $(cat ./artifacts/VERSION) (EXTRATAG = $(cat ./artifacts/EXTRATAG))" - echo "undying-collator version = $(cat ./artifacts/VERSION) (EXTRATAG = $(cat ./artifacts/EXTRATAG))" - - cp -r ./scripts/* ./artifacts build-malus: stage: build @@ -88,7 +86,6 @@ build-malus: - echo -n "${CI_COMMIT_REF_NAME}" > ./artifacts/VERSION - echo -n "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" > ./artifacts/EXTRATAG - echo "polkadot-test-malus = $(cat ./artifacts/VERSION) (EXTRATAG = $(cat ./artifacts/EXTRATAG))" - - cp -r ./scripts/* ./artifacts build-staking-miner: stage: build @@ -110,7 +107,6 @@ build-staking-miner: - echo -n "${CI_COMMIT_REF_NAME}" > ./artifacts/VERSION - echo -n "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" > ./artifacts/EXTRATAG - echo "staking-miner = $(cat ./artifacts/VERSION) (EXTRATAG = $(cat ./artifacts/EXTRATAG))" - - cp -r ./scripts/* ./artifacts build-rustdoc: stage: build diff --git a/scripts/ci/gitlab/pipeline/publish.yml b/scripts/ci/gitlab/pipeline/publish.yml index d9a0dff95767..c224094125e3 100644 --- a/scripts/ci/gitlab/pipeline/publish.yml +++ b/scripts/ci/gitlab/pipeline/publish.yml @@ -19,20 +19,16 @@ publish-polkadot-debug-image: - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 variables: - CI_IMAGE: ${BUILDAH_IMAGE} - GIT_STRATEGY: none - DOCKER_USER: ${PARITYPR_USER} - DOCKER_PASS: ${PARITYPR_PASS} - # scripts/ci/dockerfiles/polkadot_injected_debug.Dockerfile - DOCKERFILE: ci/dockerfiles/polkadot_injected_debug.Dockerfile - IMAGE_NAME: docker.io/paritypr/polkadot-debug + IMAGE_NAME: "polkadot-debug" + BINARY: "polkadot,polkadot-execute-worker,polkadot-prepare-worker" needs: - job: build-linux-stable artifacts: true after_script: + - !reference [.build-push-image, after_script] # pass artifacts to the zombienet-tests job # https://docs.gitlab.com/ee/ci/multi_project_pipelines.html#with-variable-inheritance - - echo "PARACHAINS_IMAGE_NAME=${IMAGE_NAME}" > ./artifacts/parachains.env + - echo "PARACHAINS_IMAGE_NAME=${IMAGE}" > ./artifacts/parachains.env - echo "PARACHAINS_IMAGE_TAG=$(cat ./artifacts/EXTRATAG)" >> ./artifacts/parachains.env artifacts: reports: @@ -48,20 +44,15 @@ publish-test-collators-image: - .build-push-image - .zombienet-refs variables: - CI_IMAGE: ${BUILDAH_IMAGE} - GIT_STRATEGY: none - DOCKER_USER: ${PARITYPR_USER} - DOCKER_PASS: ${PARITYPR_PASS} - # scripts/ci/dockerfiles/collator_injected.Dockerfile - DOCKERFILE: ci/dockerfiles/collator_injected.Dockerfile - IMAGE_NAME: docker.io/paritypr/colander + IMAGE_NAME: "colander" + BINARY: "adder-collator,undying-collator" needs: - job: build-test-collators artifacts: true after_script: - - buildah logout --all + - !reference [.build-push-image, after_script] # pass artifacts to the zombienet-tests job - - echo "COLLATOR_IMAGE_NAME=${IMAGE_NAME}" > ./artifacts/collator.env + - echo "COLLATOR_IMAGE_NAME=${IMAGE}" > ./artifacts/collator.env - echo "COLLATOR_IMAGE_TAG=$(cat ./artifacts/EXTRATAG)" >> ./artifacts/collator.env artifacts: reports: @@ -76,20 +67,15 @@ publish-malus-image: - .build-push-image - .zombienet-refs variables: - CI_IMAGE: ${BUILDAH_IMAGE} - GIT_STRATEGY: none - DOCKER_USER: ${PARITYPR_USER} - DOCKER_PASS: ${PARITYPR_PASS} - # scripts/ci/dockerfiles/malus_injected.Dockerfile - DOCKERFILE: ci/dockerfiles/malus_injected.Dockerfile - IMAGE_NAME: docker.io/paritypr/malus + IMAGE_NAME: "malus" + BINARY: "malus,polkadot-execute-worker,polkadot-prepare-worker" needs: - job: build-malus artifacts: true after_script: - - buildah logout "$IMAGE_NAME" + - !reference [.build-push-image, after_script] # pass artifacts to the zombienet-tests job - - echo "MALUS_IMAGE_NAME=${IMAGE_NAME}" > ./artifacts/malus.env + - echo "MALUS_IMAGE_NAME=${IMAGE}" > ./artifacts/malus.env - echo "MALUS_IMAGE_TAG=$(cat ./artifacts/EXTRATAG)" >> ./artifacts/malus.env artifacts: reports: @@ -103,13 +89,11 @@ publish-staking-miner-image: - .build-push-image - .publish-refs variables: - CI_IMAGE: ${BUILDAH_IMAGE} - # scripts/ci/dockerfiles/staking-miner/staking-miner_injected.Dockerfile - DOCKERFILE: ci/dockerfiles/staking-miner/staking-miner_injected.Dockerfile - IMAGE_NAME: docker.io/paritytech/staking-miner - GIT_STRATEGY: none - DOCKER_USER: ${Docker_Hub_User_Parity} - DOCKER_PASS: ${Docker_Hub_Pass_Parity} + IMAGE_NAME: "staking-miner" + BINARY: "staking-miner" + DOCKER_OWNER: "paritytech" + DOCKER_USER: "${Docker_Hub_User_Parity}" + DOCKER_PASS: "${Docker_Hub_Pass_Parity}" needs: - job: build-staking-miner artifacts: true @@ -122,11 +106,11 @@ publish-polkadot-image-description: DOCKER_PASSWORD: ${Docker_Hub_Pass_Parity} DOCKERHUB_REPOSITORY: parity/polkadot SHORT_DESCRIPTION: "Polkadot Official Docker Image" - README_FILEPATH: $CI_PROJECT_DIR/scripts/ci/dockerfiles/polkadot_Dockerfile.README.md + README_FILEPATH: $CI_PROJECT_DIR/scripts/ci/dockerfiles/polkadot/polkadot_Dockerfile.README.md rules: - if: $CI_COMMIT_REF_NAME == "master" changes: - - scripts/ci/dockerfiles/polkadot_Dockerfile.README.md + - scripts/ci/dockerfiles/polkadot/polkadot_Dockerfile.README.md - if: $CI_PIPELINE_SOURCE == "schedule" when: never script: diff --git a/utils/staking-miner/README.md b/utils/staking-miner/README.md index 4148677ee7ca..b7f70de573b0 100644 --- a/utils/staking-miner/README.md +++ b/utils/staking-miner/README.md @@ -28,8 +28,9 @@ There are 2 options to build a staking-miner Docker image: ### Building the injected image First build the binary as documented [above](#building). -You may then inject the binary into a Docker base image from the root of the Polkadot repository: +You may then inject the binary into a Docker base image: `parity/base-bin` (running the command from the root of the Polkadot repository): ``` +TODO: UPDATE THAT docker build -t staking-miner -f scripts/ci/dockerfiles/staking-miner/staking-miner_injected.Dockerfile target/release ``` @@ -39,6 +40,7 @@ Unlike the injected image that requires a Linux pre-built binary, this option do The trade-off however is that it takes a little longer to build and this option is less ideal for CI tasks. You may build the multi-stage image the root of the Polkadot repository with: ``` +TODO: UPDATE THAT docker build -t staking-miner -f scripts/ci/dockerfiles/staking-miner/staking-miner_builder.Dockerfile . ``` @@ -51,7 +53,7 @@ While it won't prevent a malicious actor to read your `SEED` if they gain access # The following line starts with an extra space on purpose: SEED=0x1234... -docker run --rm -it \ +docker run --rm -i \ --name staking-miner \ --read-only \ -e RUST_LOG=info \ From 8f05479e4bd61341af69f0721e617f01cbad8bb2 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Fri, 11 Aug 2023 18:30:58 +0300 Subject: [PATCH 06/27] companion for 14754: cli: move no-beefy flag to sc-cli (#7600) * cli: move no-beefy flag to substrate sc-cli config * bump substrate ref --------- Signed-off-by: Adrian Catangiu --- Cargo.lock | 368 +++++++++--------- cli/src/cli.rs | 5 - cli/src/command.rs | 11 +- node/service/src/lib.rs | 3 +- node/test/service/src/lib.rs | 2 +- .../adder/collator/src/main.rs | 4 +- .../undying/collator/src/main.rs | 4 +- 7 files changed, 195 insertions(+), 202 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1ad880641445..862063ac72e4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -617,7 +617,7 @@ dependencies = [ [[package]] name = "binary-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "hash-db", "log", @@ -2446,7 +2446,7 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", ] @@ -2469,7 +2469,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-support", "frame-support-procedural", @@ -2494,7 +2494,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "Inflector", "array-bytes", @@ -2542,7 +2542,7 @@ dependencies = [ [[package]] name = "frame-election-provider-solution-type" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2553,7 +2553,7 @@ dependencies = [ [[package]] name = "frame-election-provider-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-election-provider-solution-type", "frame-support", @@ -2570,7 +2570,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-support", "frame-system", @@ -2599,7 +2599,7 @@ dependencies = [ [[package]] name = "frame-remote-externalities" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-recursion", "futures", @@ -2620,7 +2620,7 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "aquamarine", "bitflags", @@ -2657,7 +2657,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "Inflector", "cfg-expr", @@ -2675,7 +2675,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", @@ -2687,7 +2687,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "proc-macro2", "quote", @@ -2697,7 +2697,7 @@ dependencies = [ [[package]] name = "frame-support-test" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-executive", @@ -2724,7 +2724,7 @@ dependencies = [ [[package]] name = "frame-support-test-pallet" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-support", "frame-system", @@ -2737,7 +2737,7 @@ dependencies = [ [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "cfg-if", "frame-support", @@ -2756,7 +2756,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -2771,7 +2771,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "sp-api", @@ -2780,7 +2780,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-support", "parity-scale-codec", @@ -2962,7 +2962,7 @@ dependencies = [ [[package]] name = "generate-bags" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "chrono", "frame-election-provider-support", @@ -4829,7 +4829,7 @@ dependencies = [ [[package]] name = "mmr-gadget" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "futures", "log", @@ -4848,7 +4848,7 @@ dependencies = [ [[package]] name = "mmr-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "anyhow", "jsonrpsee", @@ -5374,7 +5374,7 @@ dependencies = [ [[package]] name = "pallet-assets" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5389,7 +5389,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-support", "frame-system", @@ -5405,7 +5405,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-support", "frame-system", @@ -5419,7 +5419,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5443,7 +5443,7 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5463,7 +5463,7 @@ dependencies = [ [[package]] name = "pallet-bags-list-remote-tests" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-election-provider-support", "frame-remote-externalities", @@ -5482,7 +5482,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5497,7 +5497,7 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-support", "frame-system", @@ -5516,7 +5516,7 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "array-bytes", "binary-merkle-tree", @@ -5540,7 +5540,7 @@ dependencies = [ [[package]] name = "pallet-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5558,7 +5558,7 @@ dependencies = [ [[package]] name = "pallet-child-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5577,7 +5577,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5594,7 +5594,7 @@ dependencies = [ [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "assert_matches", "frame-benchmarking", @@ -5611,7 +5611,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5629,7 +5629,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5652,7 +5652,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-support-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5665,7 +5665,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5684,7 +5684,7 @@ dependencies = [ [[package]] name = "pallet-fast-unstake" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "docify", "frame-benchmarking", @@ -5703,7 +5703,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5726,7 +5726,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "enumflags2", "frame-benchmarking", @@ -5742,7 +5742,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5762,7 +5762,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5779,7 +5779,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5796,7 +5796,7 @@ dependencies = [ [[package]] name = "pallet-message-queue" version = "7.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5815,7 +5815,7 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5832,7 +5832,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5848,7 +5848,7 @@ dependencies = [ [[package]] name = "pallet-nis" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5864,7 +5864,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-support", "frame-system", @@ -5883,7 +5883,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-benchmarking" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5903,7 +5903,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-runtime-api" version = "1.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "pallet-nomination-pools", "parity-scale-codec", @@ -5914,7 +5914,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-support", "frame-system", @@ -5931,7 +5931,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5955,7 +5955,7 @@ dependencies = [ [[package]] name = "pallet-preimage" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5972,7 +5972,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5987,7 +5987,7 @@ dependencies = [ [[package]] name = "pallet-ranked-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6005,7 +6005,7 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6020,7 +6020,7 @@ dependencies = [ [[package]] name = "pallet-referenda" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "assert_matches", "frame-benchmarking", @@ -6039,7 +6039,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6056,7 +6056,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-support", "frame-system", @@ -6077,7 +6077,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6093,7 +6093,7 @@ dependencies = [ [[package]] name = "pallet-society" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6111,7 +6111,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6134,7 +6134,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -6145,7 +6145,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-fn" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "log", "sp-arithmetic", @@ -6154,7 +6154,7 @@ dependencies = [ [[package]] name = "pallet-staking-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "sp-api", @@ -6163,7 +6163,7 @@ dependencies = [ [[package]] name = "pallet-state-trie-migration" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6180,7 +6180,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6195,7 +6195,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6213,7 +6213,7 @@ dependencies = [ [[package]] name = "pallet-tips" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6232,7 +6232,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-support", "frame-system", @@ -6248,7 +6248,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -6264,7 +6264,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -6276,7 +6276,7 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6293,7 +6293,7 @@ dependencies = [ [[package]] name = "pallet-uniques" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6308,7 +6308,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6324,7 +6324,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -6339,7 +6339,7 @@ dependencies = [ [[package]] name = "pallet-whitelist" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-benchmarking", "frame-support", @@ -9360,7 +9360,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "log", "sp-core", @@ -9371,7 +9371,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "futures", @@ -9399,7 +9399,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "futures", "futures-timer", @@ -9422,7 +9422,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -9437,7 +9437,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "memmap2", "sc-chain-spec-derive", @@ -9456,7 +9456,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -9467,7 +9467,7 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "array-bytes", "chrono", @@ -9506,7 +9506,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "fnv", "futures", @@ -9532,7 +9532,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "hash-db", "kvdb", @@ -9558,7 +9558,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "futures", @@ -9583,7 +9583,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "fork-tree", @@ -9619,7 +9619,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "futures", "jsonrpsee", @@ -9641,7 +9641,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "array-bytes", "async-channel", @@ -9675,7 +9675,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "futures", "jsonrpsee", @@ -9694,7 +9694,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "fork-tree", "parity-scale-codec", @@ -9707,7 +9707,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "ahash 0.8.2", "array-bytes", @@ -9748,7 +9748,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "finality-grandpa", "futures", @@ -9768,7 +9768,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "futures", @@ -9791,7 +9791,7 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -9813,7 +9813,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "sc-allocator", "sp-maybe-compressed-blob", @@ -9825,7 +9825,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "anyhow", "cfg-if", @@ -9842,7 +9842,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "ansi_term", "futures", @@ -9858,7 +9858,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "array-bytes", "parking_lot 0.12.1", @@ -9872,7 +9872,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "array-bytes", "async-channel", @@ -9915,7 +9915,7 @@ dependencies = [ [[package]] name = "sc-network-bitswap" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-channel", "cid", @@ -9935,7 +9935,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "bitflags", @@ -9952,7 +9952,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "ahash 0.8.2", "futures", @@ -9971,7 +9971,7 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "array-bytes", "async-channel", @@ -9992,7 +9992,7 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "array-bytes", "async-channel", @@ -10026,7 +10026,7 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "array-bytes", "futures", @@ -10044,7 +10044,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "array-bytes", "bytes", @@ -10078,7 +10078,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -10087,7 +10087,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "futures", "jsonrpsee", @@ -10118,7 +10118,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -10137,7 +10137,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "http", "jsonrpsee", @@ -10152,7 +10152,7 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "array-bytes", "futures", @@ -10179,7 +10179,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "directories", @@ -10243,7 +10243,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "log", "parity-scale-codec", @@ -10254,7 +10254,7 @@ dependencies = [ [[package]] name = "sc-storage-monitor" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "clap 4.2.5", "fs4", @@ -10268,7 +10268,7 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -10287,7 +10287,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "6.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "futures", "libc", @@ -10306,7 +10306,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "chrono", "futures", @@ -10325,7 +10325,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "ansi_term", "atty", @@ -10354,7 +10354,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -10365,7 +10365,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "futures", @@ -10391,7 +10391,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "futures", @@ -10407,7 +10407,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-channel", "futures", @@ -10955,7 +10955,7 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "hash-db", "log", @@ -10976,7 +10976,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "Inflector", "blake2", @@ -10990,7 +10990,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "scale-info", @@ -11003,7 +11003,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "16.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "integer-sqrt", "num-traits", @@ -11017,7 +11017,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "scale-info", @@ -11030,7 +11030,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "sp-api", "sp-inherents", @@ -11041,7 +11041,7 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "futures", "log", @@ -11059,7 +11059,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "futures", @@ -11074,7 +11074,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "parity-scale-codec", @@ -11091,7 +11091,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "parity-scale-codec", @@ -11110,7 +11110,7 @@ dependencies = [ [[package]] name = "sp-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "lazy_static", "parity-scale-codec", @@ -11129,7 +11129,7 @@ dependencies = [ [[package]] name = "sp-consensus-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "finality-grandpa", "log", @@ -11147,7 +11147,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "scale-info", @@ -11159,7 +11159,7 @@ dependencies = [ [[package]] name = "sp-core" version = "21.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "array-bytes", "arrayvec 0.7.4", @@ -11206,7 +11206,7 @@ dependencies = [ [[package]] name = "sp-core-hashing" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "blake2b_simd", "byteorder", @@ -11219,7 +11219,7 @@ dependencies = [ [[package]] name = "sp-core-hashing-proc-macro" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "quote", "sp-core-hashing", @@ -11229,7 +11229,7 @@ dependencies = [ [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "kvdb", "parking_lot 0.12.1", @@ -11238,7 +11238,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "proc-macro2", "quote", @@ -11248,7 +11248,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.19.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "environmental", "parity-scale-codec", @@ -11259,7 +11259,7 @@ dependencies = [ [[package]] name = "sp-genesis-builder" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "serde_json", "sp-api", @@ -11270,7 +11270,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -11284,7 +11284,7 @@ dependencies = [ [[package]] name = "sp-io" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "bytes", "ed25519", @@ -11309,7 +11309,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "lazy_static", "sp-core", @@ -11320,7 +11320,7 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.27.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -11332,7 +11332,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "thiserror", "zstd 0.12.3+zstd.1.5.2", @@ -11341,7 +11341,7 @@ dependencies = [ [[package]] name = "sp-metadata-ir" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-metadata", "parity-scale-codec", @@ -11352,7 +11352,7 @@ dependencies = [ [[package]] name = "sp-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "ckb-merkle-mountain-range", "log", @@ -11370,7 +11370,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "scale-info", @@ -11384,7 +11384,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "sp-api", "sp-core", @@ -11394,7 +11394,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "backtrace", "lazy_static", @@ -11404,7 +11404,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "rustc-hash", "serde", @@ -11414,7 +11414,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "either", "hash256-std-hasher", @@ -11436,7 +11436,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "17.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -11454,7 +11454,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "Inflector", "proc-macro-crate", @@ -11466,7 +11466,7 @@ dependencies = [ [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "scale-info", @@ -11481,7 +11481,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -11495,7 +11495,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.28.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "hash-db", "log", @@ -11516,7 +11516,7 @@ dependencies = [ [[package]] name = "sp-statement-store" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "aes-gcm 0.10.2", "curve25519-dalek 3.2.0", @@ -11540,12 +11540,12 @@ dependencies = [ [[package]] name = "sp-std" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" [[package]] name = "sp-storage" version = "13.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "impl-serde", "parity-scale-codec", @@ -11558,7 +11558,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "parity-scale-codec", @@ -11571,7 +11571,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "10.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "sp-std", @@ -11583,7 +11583,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "sp-api", "sp-runtime", @@ -11592,7 +11592,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "parity-scale-codec", @@ -11607,7 +11607,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "ahash 0.8.2", "hash-db", @@ -11630,7 +11630,7 @@ dependencies = [ [[package]] name = "sp-version" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "impl-serde", "parity-scale-codec", @@ -11647,7 +11647,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "proc-macro2", @@ -11658,7 +11658,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "14.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -11671,7 +11671,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "20.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "parity-scale-codec", "scale-info", @@ -11896,12 +11896,12 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "frame-system-rpc-runtime-api", "futures", @@ -11920,7 +11920,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "hyper", "log", @@ -11932,7 +11932,7 @@ dependencies = [ [[package]] name = "substrate-rpc-client" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "jsonrpsee", @@ -11945,7 +11945,7 @@ dependencies = [ [[package]] name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -11962,7 +11962,7 @@ dependencies = [ [[package]] name = "substrate-test-client" version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "array-bytes", "async-trait", @@ -11988,7 +11988,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "futures", "substrate-test-utils-derive", @@ -11998,7 +11998,7 @@ dependencies = [ [[package]] name = "substrate-test-utils-derive" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -12009,7 +12009,7 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "ansi_term", "build-helper", @@ -12879,7 +12879,7 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "try-runtime-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#57d2e53b6afefc079cec3b507710970ccd4a5aae" +source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" dependencies = [ "async-trait", "clap 4.2.5", diff --git a/cli/src/cli.rs b/cli/src/cli.rs index b7d884750762..e78213cf11c8 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -99,11 +99,6 @@ pub struct RunCmd { #[arg(long = "grandpa-pause", num_args = 2)] pub grandpa_pause: Vec, - /// Disable the BEEFY gadget - /// (currently enabled by default on Rococo, Wococo and Versi). - #[arg(long)] - pub no_beefy: bool, - /// Add the destination address to the jaeger agent. /// /// Must be valid socket address, of format `IP:Port` diff --git a/cli/src/command.rs b/cli/src/command.rs index ee71bb0840dc..c8e8673c6d70 100644 --- a/cli/src/command.rs +++ b/cli/src/command.rs @@ -235,15 +235,11 @@ fn run_node_inner( where F: FnOnce(&mut sc_cli::LoggerBuilder, &sc_service::Configuration), { - let runner = cli + let mut runner = cli .create_runner_with_logger_hook::(&cli.run.base, logger_hook) .map_err(Error::from)?; let chain_spec = &runner.config().chain_spec; - // By default, enable BEEFY on test networks. - let enable_beefy = (chain_spec.is_rococo() || chain_spec.is_wococo() || chain_spec.is_versi()) && - !cli.run.no_beefy; - set_default_ss58_version(chain_spec); let grandpa_pause = if cli.run.grandpa_pause.is_empty() { @@ -259,6 +255,10 @@ where info!(" KUSAMA FOUNDATION "); info!("----------------------------"); } + // BEEFY allowed only on test networks. + if !(chain_spec.is_rococo() || chain_spec.is_wococo() || chain_spec.is_versi()) { + runner.config_mut().disable_beefy = true; + } let jaeger_agent = if let Some(ref jaeger_agent) = cli.run.jaeger_agent { Some( @@ -289,7 +289,6 @@ where service::NewFullParams { is_collator: service::IsCollator::No, grandpa_pause, - enable_beefy, jaeger_agent, telemetry_worker_handle: None, node_version, diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index 457b5488ea14..fa8cb8ec77f7 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -629,7 +629,6 @@ where pub struct NewFullParams { pub is_collator: IsCollator, pub grandpa_pause: Option<(u32, u32)>, - pub enable_beefy: bool, pub jaeger_agent: Option, pub telemetry_worker_handle: Option, /// The version of the node. TESTING ONLY: `None` can be passed to skip the node/worker version @@ -711,7 +710,6 @@ pub fn new_full( NewFullParams { is_collator, grandpa_pause, - enable_beefy, jaeger_agent, telemetry_worker_handle, node_version, @@ -746,6 +744,7 @@ pub fn new_full( Some(backoff) }; + let enable_beefy = !config.disable_beefy; // If not on a known test network, warn the user that BEEFY is still experimental. if enable_beefy && !config.chain_spec.is_rococo() && diff --git a/node/test/service/src/lib.rs b/node/test/service/src/lib.rs index 99ccacb78f7e..a2c1b1941003 100644 --- a/node/test/service/src/lib.rs +++ b/node/test/service/src/lib.rs @@ -81,7 +81,6 @@ pub fn new_full( polkadot_service::NewFullParams { is_collator, grandpa_pause: None, - enable_beefy: true, jaeger_agent: None, telemetry_worker_handle: None, node_version: None, @@ -188,6 +187,7 @@ pub fn node_config( offchain_worker: Default::default(), force_authoring: false, disable_grandpa: false, + disable_beefy: false, dev_key_seed: Some(key_seed), tracing_targets: None, tracing_receiver: Default::default(), diff --git a/parachain/test-parachains/adder/collator/src/main.rs b/parachain/test-parachains/adder/collator/src/main.rs index d4bfc50c8db7..8d8a13767178 100644 --- a/parachain/test-parachains/adder/collator/src/main.rs +++ b/parachain/test-parachains/adder/collator/src/main.rs @@ -53,15 +53,15 @@ fn main() -> Result<()> { ) })?; - runner.run_node_until_exit(|config| async move { + runner.run_node_until_exit(|mut config| async move { let collator = Collator::new(); + config.disable_beefy = true; let full_node = polkadot_service::build_full( config, polkadot_service::NewFullParams { is_collator: polkadot_service::IsCollator::Yes(collator.collator_key()), grandpa_pause: None, - enable_beefy: false, jaeger_agent: None, telemetry_worker_handle: None, diff --git a/parachain/test-parachains/undying/collator/src/main.rs b/parachain/test-parachains/undying/collator/src/main.rs index 3b6b4259aaec..da8205ba1893 100644 --- a/parachain/test-parachains/undying/collator/src/main.rs +++ b/parachain/test-parachains/undying/collator/src/main.rs @@ -53,15 +53,15 @@ fn main() -> Result<()> { ) })?; - runner.run_node_until_exit(|config| async move { + runner.run_node_until_exit(|mut config| async move { let collator = Collator::new(cli.run.pov_size, cli.run.pvf_complexity); + config.disable_beefy = true; let full_node = polkadot_service::build_full( config, polkadot_service::NewFullParams { is_collator: polkadot_service::IsCollator::Yes(collator.collator_key()), grandpa_pause: None, - enable_beefy: false, jaeger_agent: None, telemetry_worker_handle: None, From 9417f1656c7523890a88665a6e89eaeb4be090be Mon Sep 17 00:00:00 2001 From: jserrat <35823283+Jpserrat@users.noreply.github.com> Date: Mon, 14 Aug 2023 06:48:15 -0300 Subject: [PATCH 07/27] pvf: use test-utils feature to export test only (#7538) * pvf: use test-utils feature to export test only * adding comment to test-utils feature * make prepare-worker and execute-worker as optional dependencies and add comments to test-utils * remove doc hidden from pvf testing * add prepare worker and execute worker entrypoints to test-utils feature * pvf: add sp_tracing as optional dependency of test-utils * add test-utils for polkadot and malus * add test-utils feature to prepare and execute workers script * remove required features from prepare and executing * Try to trigger CI again to fix broken jobs --------- Co-authored-by: Marcin S --- Cargo.lock | 3 +++ Cargo.toml | 3 +-- node/core/pvf/Cargo.toml | 13 ++++++++++--- node/core/pvf/common/Cargo.toml | 7 ++++++- node/core/pvf/common/src/lib.rs | 4 ++-- node/core/pvf/common/src/pvf.rs | 6 +++--- node/core/pvf/src/lib.rs | 6 ++++-- node/core/pvf/src/testing.rs | 1 - node/core/pvf/tests/it/worker_common.rs | 6 ++++-- node/malus/Cargo.toml | 2 +- parachain/test-parachains/adder/collator/Cargo.toml | 11 +++++++++-- .../test-parachains/undying/collator/Cargo.toml | 11 +++++++++-- 12 files changed, 52 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 862063ac72e4..85b0bf1cfa57 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7238,6 +7238,7 @@ dependencies = [ "parity-scale-codec", "pin-project", "polkadot-core-primitives", + "polkadot-node-core-pvf", "polkadot-node-core-pvf-common", "polkadot-node-core-pvf-execute-worker", "polkadot-node-core-pvf-prepare-worker", @@ -12253,6 +12254,7 @@ dependencies = [ "sp-keyring", "substrate-test-utils", "test-parachain-adder", + "test-parachain-adder-collator", "tokio", ] @@ -12301,6 +12303,7 @@ dependencies = [ "sp-keyring", "substrate-test-utils", "test-parachain-undying", + "test-parachain-undying-collator", "tokio", ] diff --git a/Cargo.toml b/Cargo.toml index 0a6fc1b97891..44cf027e35b3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,7 +39,7 @@ polkadot-node-core-pvf-prepare-worker = { path = "node/core/pvf/prepare-worker" polkadot-overseer = { path = "node/overseer" } # Needed for worker binaries. -polkadot-node-core-pvf-common = { path = "node/core/pvf/common" } +polkadot-node-core-pvf-common = { path = "node/core/pvf/common", features = ["test-utils"] } polkadot-node-core-pvf-execute-worker = { path = "node/core/pvf/execute-worker" } [dev-dependencies] @@ -227,7 +227,6 @@ fast-runtime = [ "polkadot-cli/fast-runtime" ] runtime-metrics = [ "polkadot-cli/runtime-metrics" ] pyroscope = ["polkadot-cli/pyroscope"] jemalloc-allocator = ["polkadot-node-core-pvf-prepare-worker/jemalloc-allocator", "polkadot-overseer/jemalloc-allocator"] - # Enables timeout-based tests supposed to be run only in CI environment as they may be flaky # when run locally depending on system load ci-only-tests = ["polkadot-node-core-pvf/ci-only-tests"] diff --git a/node/core/pvf/Cargo.toml b/node/core/pvf/Cargo.toml index 02a56ed9d2df..b55df45b0203 100644 --- a/node/core/pvf/Cargo.toml +++ b/node/core/pvf/Cargo.toml @@ -9,6 +9,7 @@ license.workspace = true [[bin]] name = "puppet_worker" path = "bin/puppet_worker.rs" +required-features = ["test-utils"] [dependencies] always-assert = "0.1" @@ -27,8 +28,6 @@ parity-scale-codec = { version = "3.6.1", default-features = false, features = [ polkadot-parachain = { path = "../../../parachain" } polkadot-core-primitives = { path = "../../../core-primitives" } polkadot-node-core-pvf-common = { path = "common" } -polkadot-node-core-pvf-execute-worker = { path = "execute-worker" } -polkadot-node-core-pvf-prepare-worker = { path = "prepare-worker" } polkadot-node-metrics = { path = "../../metrics" } polkadot-node-primitives = { path = "../../primitives" } polkadot-primitives = { path = "../../../primitives" } @@ -36,7 +35,9 @@ polkadot-primitives = { path = "../../../primitives" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-wasm-interface = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-maybe-compressed-blob = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-tracing = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-tracing = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true } +polkadot-node-core-pvf-prepare-worker = { path = "prepare-worker", optional = true } +polkadot-node-core-pvf-execute-worker = { path = "execute-worker", optional = true } [build-dependencies] substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "master" } @@ -44,9 +45,15 @@ substrate-build-script-utils = { git = "https://github.com/paritytech/substrate" [dev-dependencies] assert_matches = "1.4.0" hex-literal = "0.3.4" +polkadot-node-core-pvf-common = { path = "common", features = ["test-utils"] } +# For the puppet worker, depend on ourselves with the test-utils feature. +polkadot-node-core-pvf = { path = ".", features = ["test-utils"] } adder = { package = "test-parachain-adder", path = "../../../parachain/test-parachains/adder" } halt = { package = "test-parachain-halt", path = "../../../parachain/test-parachains/halt" } [features] ci-only-tests = [] +# This feature is used to export test code to other crates without putting it in the production build. +# This is also used by the `puppet_worker` binary. +test-utils = ["polkadot-node-core-pvf-prepare-worker", "polkadot-node-core-pvf-execute-worker", "sp-tracing"] diff --git a/node/core/pvf/common/Cargo.toml b/node/core/pvf/common/Cargo.toml index a091f8f75806..dfb490455b3d 100644 --- a/node/core/pvf/common/Cargo.toml +++ b/node/core/pvf/common/Cargo.toml @@ -25,7 +25,7 @@ sc-executor-wasmtime = { git = "https://github.com/paritytech/substrate", branch sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-externalities = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-tracing = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-tracing = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true } [target.'cfg(target_os = "linux")'.dependencies] landlock = "0.2.0" @@ -33,3 +33,8 @@ landlock = "0.2.0" [dev-dependencies] assert_matches = "1.4.0" tempfile = "3.3.0" + +[features] +# This feature is used to export test code to other crates without putting it in the production build. +# Also used for building the puppet worker. +test-utils = ["sp-tracing"] diff --git a/node/core/pvf/common/src/lib.rs b/node/core/pvf/common/src/lib.rs index 7e0cab45b671..8ff9757a07a0 100644 --- a/node/core/pvf/common/src/lib.rs +++ b/node/core/pvf/common/src/lib.rs @@ -26,7 +26,7 @@ pub mod worker; pub use cpu_time::ProcessTime; // Used by `decl_worker_main!`. -#[doc(hidden)] +#[cfg(feature = "test-utils")] pub use sp_tracing; const LOG_TARGET: &str = "parachain::pvf-common"; @@ -34,7 +34,7 @@ const LOG_TARGET: &str = "parachain::pvf-common"; use std::mem; use tokio::io::{self, AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _}; -#[doc(hidden)] +#[cfg(feature = "test-utils")] pub mod tests { use std::time::Duration; diff --git a/node/core/pvf/common/src/pvf.rs b/node/core/pvf/common/src/pvf.rs index ab0007352d1d..e31264713a57 100644 --- a/node/core/pvf/common/src/pvf.rs +++ b/node/core/pvf/common/src/pvf.rs @@ -84,7 +84,7 @@ impl PvfPrepData { } /// Creates a structure for tests. - #[doc(hidden)] + #[cfg(feature = "test-utils")] pub fn from_discriminator_and_timeout(num: u32, timeout: Duration) -> Self { let descriminator_buf = num.to_le_bytes().to_vec(); Self::from_code( @@ -96,13 +96,13 @@ impl PvfPrepData { } /// Creates a structure for tests. - #[doc(hidden)] + #[cfg(feature = "test-utils")] pub fn from_discriminator(num: u32) -> Self { Self::from_discriminator_and_timeout(num, crate::tests::TEST_PREPARATION_TIMEOUT) } /// Creates a structure for tests. - #[doc(hidden)] + #[cfg(feature = "test-utils")] pub fn from_discriminator_precheck(num: u32) -> Self { let mut pvf = Self::from_discriminator_and_timeout(num, crate::tests::TEST_PREPARATION_TIMEOUT); diff --git a/node/core/pvf/src/lib.rs b/node/core/pvf/src/lib.rs index 2ed3f5242ded..eb6ab39ac500 100644 --- a/node/core/pvf/src/lib.rs +++ b/node/core/pvf/src/lib.rs @@ -97,11 +97,11 @@ mod prepare; mod priority; mod worker_intf; -#[doc(hidden)] +#[cfg(feature = "test-utils")] pub mod testing; // Used by `decl_puppet_worker_main!`. -#[doc(hidden)] +#[cfg(feature = "test-utils")] pub use sp_tracing; pub use error::{InvalidCandidate, ValidationError}; @@ -118,7 +118,9 @@ pub use polkadot_node_core_pvf_common::{ }; // Re-export worker entrypoints. +#[cfg(feature = "test-utils")] pub use polkadot_node_core_pvf_execute_worker::worker_entrypoint as execute_worker_entrypoint; +#[cfg(feature = "test-utils")] pub use polkadot_node_core_pvf_prepare_worker::worker_entrypoint as prepare_worker_entrypoint; /// The log target for this crate. diff --git a/node/core/pvf/src/testing.rs b/node/core/pvf/src/testing.rs index 3cd1ce304ab8..980a28c01566 100644 --- a/node/core/pvf/src/testing.rs +++ b/node/core/pvf/src/testing.rs @@ -19,7 +19,6 @@ //! N.B. This is not guarded with some feature flag. Overexposing items here may affect the final //! artifact even for production builds. -#[doc(hidden)] pub use crate::worker_intf::{spawn_with_program_path, SpawnErr}; use polkadot_primitives::ExecutorParams; diff --git a/node/core/pvf/tests/it/worker_common.rs b/node/core/pvf/tests/it/worker_common.rs index 439ac8538c95..a3bf552e894a 100644 --- a/node/core/pvf/tests/it/worker_common.rs +++ b/node/core/pvf/tests/it/worker_common.rs @@ -14,10 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::PUPPET_EXE; -use polkadot_node_core_pvf::testing::{spawn_with_program_path, SpawnErr}; use std::time::Duration; +use polkadot_node_core_pvf::testing::{spawn_with_program_path, SpawnErr}; + +use crate::PUPPET_EXE; + // Test spawning a program that immediately exits with a failure code. #[tokio::test] async fn spawn_immediate_exit() { diff --git a/node/malus/Cargo.toml b/node/malus/Cargo.toml index 08656ea9f3da..0c9988159516 100644 --- a/node/malus/Cargo.toml +++ b/node/malus/Cargo.toml @@ -48,7 +48,7 @@ erasure = { package = "polkadot-erasure-coding", path = "../../erasure-coding" } rand = "0.8.5" # Required for worker binaries to build. -polkadot-node-core-pvf-common = { path = "../core/pvf/common" } +polkadot-node-core-pvf-common = { path = "../core/pvf/common", features = ["test-utils"] } polkadot-node-core-pvf-execute-worker = { path = "../core/pvf/execute-worker" } polkadot-node-core-pvf-prepare-worker = { path = "../core/pvf/prepare-worker" } diff --git a/parachain/test-parachains/adder/collator/Cargo.toml b/parachain/test-parachains/adder/collator/Cargo.toml index fec95a5718a1..08dcbcaa644e 100644 --- a/parachain/test-parachains/adder/collator/Cargo.toml +++ b/parachain/test-parachains/adder/collator/Cargo.toml @@ -13,6 +13,7 @@ path = "src/main.rs" [[bin]] name = "adder_collator_puppet_worker" path = "bin/puppet_worker.rs" +required-features = ["test-utils"] [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } @@ -31,11 +32,10 @@ polkadot-node-subsystem = { path = "../../../../node/subsystem" } sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" } - # This one is tricky. Even though it is not used directly by the collator, we still need it for the # `puppet_worker` binary, which is required for the integration test. However, this shouldn't be # a big problem since it is used transitively anyway. -polkadot-node-core-pvf = { path = "../../../../node/core/pvf" } +polkadot-node-core-pvf = { path = "../../../../node/core/pvf", features = ["test-utils"], optional = true } [dev-dependencies] polkadot-parachain = { path = "../../.." } @@ -44,5 +44,12 @@ polkadot-test-service = { path = "../../../../node/test/service" } substrate-test-utils = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +# For the puppet worker, depend on ourselves with the test-utils feature. +test-parachain-adder-collator = { path = ".", features = ["test-utils"] } tokio = { version = "1.24.2", features = ["macros"] } + +[features] +# This feature is used to export test code to other crates without putting it in the production build. +# This is also used by the `puppet_worker` binary. +test-utils = ["polkadot-node-core-pvf/test-utils"] diff --git a/parachain/test-parachains/undying/collator/Cargo.toml b/parachain/test-parachains/undying/collator/Cargo.toml index 4f1a34f977c8..5b5656efb4ac 100644 --- a/parachain/test-parachains/undying/collator/Cargo.toml +++ b/parachain/test-parachains/undying/collator/Cargo.toml @@ -13,6 +13,7 @@ path = "src/main.rs" [[bin]] name = "undying_collator_puppet_worker" path = "bin/puppet_worker.rs" +required-features = ["test-utils"] [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } @@ -31,18 +32,24 @@ polkadot-node-subsystem = { path = "../../../../node/subsystem" } sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" } - # This one is tricky. Even though it is not used directly by the collator, we still need it for the # `puppet_worker` binary, which is required for the integration test. However, this shouldn't be # a big problem since it is used transitively anyway. -polkadot-node-core-pvf = { path = "../../../../node/core/pvf" } +polkadot-node-core-pvf = { path = "../../../../node/core/pvf", features = ["test-utils"], optional = true } [dev-dependencies] polkadot-parachain = { path = "../../.." } polkadot-test-service = { path = "../../../../node/test/service" } +# For the puppet worker, depend on ourselves with the test-utils feature. +test-parachain-undying-collator = { path = ".", features = ["test-utils"] } substrate-test-utils = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } tokio = { version = "1.24.2", features = ["macros"] } + +[features] +# This feature is used to export test code to other crates without putting it in the production build. +# This is also used by the `puppet_worker` binary. +test-utils = ["polkadot-node-core-pvf/test-utils"] From 3999688e2cd10dcb48db987b9550049160f9e25d Mon Sep 17 00:00:00 2001 From: Chevdor Date: Mon, 14 Aug 2023 12:00:24 +0200 Subject: [PATCH 08/27] RC container image fixes (#7607) * Remove ENV for the artifacts folder --- .github/workflows/release-40_publish-rc-image.yml | 12 ++++++------ scripts/ci/common/lib.sh | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/release-40_publish-rc-image.yml b/.github/workflows/release-40_publish-rc-image.yml index a821eaa033fd..c46bf534b060 100644 --- a/.github/workflows/release-40_publish-rc-image.yml +++ b/.github/workflows/release-40_publish-rc-image.yml @@ -31,7 +31,6 @@ env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} DOCKER_OWNER: ${{ inputs.owner || github.repository_owner }} REPO: ${{ github.repository }} - ARTIFACT_FOLDER: release-artifacts jobs: fetch-artifacts: @@ -51,7 +50,7 @@ jobs: with: key: artifacts-${{ github.sha }} path: | - ${ARTIFACT_FOLDER}/**/* + ./release-artifacts/**/* build-container: runs-on: ubuntu-latest @@ -69,11 +68,12 @@ jobs: uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 with: key: artifacts-${{ github.sha }} + fail-on-cache-miss: true path: | - ${ARTIFACT_FOLDER}/**/* + ./release-artifacts/**/* - name: Check sha256 ${{ matrix.binary }} - working-directory: ${ARTIFACT_FOLDER} + working-directory: ./release-artifacts run: | . ../scripts/ci/common/lib.sh @@ -81,7 +81,7 @@ jobs: check_sha256 ${{ matrix.binary }} && echo "OK" || echo "ERR" - name: Check GPG ${{ matrix.binary }} - working-directory: ${ARTIFACT_FOLDER} + working-directory: ./release-artifacts run: | . ../scripts/ci/common/lib.sh import_gpg_keys @@ -102,7 +102,7 @@ jobs: - name: Build Injected Container image for ${{ matrix.binary }} env: - BIN_FOLDER: ${ARTIFACT_FOLDER} + BIN_FOLDER: ./release-artifacts BINARY: ${{ matrix.binary }} TAGS: ${{join(steps.fetch_refs.outputs.*, ',')}} run: | diff --git a/scripts/ci/common/lib.sh b/scripts/ci/common/lib.sh index 00abe9a1d8d4..a04dc2ef1da0 100755 --- a/scripts/ci/common/lib.sh +++ b/scripts/ci/common/lib.sh @@ -201,7 +201,6 @@ check_bootnode(){ fetch_release_artifacts() { echo "Release ID : $RELEASE_ID" echo "Repo : $REPO" - echo "ARTIFACT_FOLDER: $ARTIFACT_FOLDER" curl -L -s \ -H "Accept: application/vnd.github+json" \ @@ -214,8 +213,8 @@ fetch_release_artifacts() { count=$(jq '.assets|length' < release.json ) # Fetch artifacts - mkdir -p ${ARTIFACT_FOLDER} - pushd ${ARTIFACT_FOLDER} > /dev/null + mkdir -p "./release-artifacts" + pushd "./release-artifacts" > /dev/null iter=1 for id in "${ids[@]}" @@ -227,6 +226,7 @@ fetch_release_artifacts() { iter=$((iter + 1)) done + pwd ls -al --color popd > /dev/null } From 7da410e5068689c4864054d5fcce78ce312aef08 Mon Sep 17 00:00:00 2001 From: Chevdor Date: Mon, 14 Aug 2023 13:11:11 +0200 Subject: [PATCH 09/27] Fix the user used to login to Docker hub (#7610) --- .github/workflows/release-40_publish-rc-image.yml | 2 +- scripts/ci/common/lib.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release-40_publish-rc-image.yml b/.github/workflows/release-40_publish-rc-image.yml index c46bf534b060..3d91c5b8c682 100644 --- a/.github/workflows/release-40_publish-rc-image.yml +++ b/.github/workflows/release-40_publish-rc-image.yml @@ -112,7 +112,7 @@ jobs: - name: Login to Dockerhub uses: docker/login-action@v2 with: - username: ${{ inputs.owner }} + username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Push Container image for ${{ matrix.binary }} diff --git a/scripts/ci/common/lib.sh b/scripts/ci/common/lib.sh index a04dc2ef1da0..e490ec22d5bf 100755 --- a/scripts/ci/common/lib.sh +++ b/scripts/ci/common/lib.sh @@ -206,7 +206,7 @@ fetch_release_artifacts() { -H "Accept: application/vnd.github+json" \ -H "Authorization: Bearer ${GITHUB_TOKEN}" \ -H "X-GitHub-Api-Version: 2022-11-28" \ - https://api.github.com/repos/${REPO}/releases/$RELEASE_ID > release.json + https://api.github.com/repos/${REPO}/releases/${RELEASE_ID} > release.json # Get Asset ids ids=($(jq -r '.assets[].id' < release.json )) From c46d7426445ba64cf3ef3158e50e1c7665345a3f Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Mon, 14 Aug 2023 16:52:52 +0300 Subject: [PATCH 10/27] Remove ParityDb migration tests (#7612) --- node/service/src/parachains_db/upgrade.rs | 119 ---------------------- 1 file changed, 119 deletions(-) diff --git a/node/service/src/parachains_db/upgrade.rs b/node/service/src/parachains_db/upgrade.rs index 6041a093ef9b..54ef97afd71c 100644 --- a/node/service/src/parachains_db/upgrade.rs +++ b/node/service/src/parachains_db/upgrade.rs @@ -278,17 +278,6 @@ pub(crate) fn paritydb_version_3_config(path: &Path) -> parity_db::Options { options } -/// Database configuration for version 0. This is useful just for testing. -#[cfg(test)] -pub(crate) fn paritydb_version_0_config(path: &Path) -> parity_db::Options { - let mut options = - parity_db::Options::with_columns(&path, super::columns::v1::NUM_COLUMNS as u8); - options.columns[super::columns::v3::COL_AVAILABILITY_META as usize].btree_index = true; - options.columns[super::columns::v3::COL_CHAIN_SELECTION_DATA as usize].btree_index = true; - - options -} - /// Migration from version 0 to version 1. /// Cases covered: /// - upgrading from v0.9.23 or earlier -> the `dispute coordinator column` was changed @@ -332,82 +321,6 @@ mod tests { *, }; - #[test] - fn test_paritydb_migrate_0_to_1() { - use parity_db::Db; - - let db_dir = tempfile::tempdir().unwrap(); - let path = db_dir.path(); - { - let db = Db::open_or_create(&paritydb_version_0_config(&path)).unwrap(); - - db.commit(vec![ - (COL_DISPUTE_COORDINATOR_DATA as u8, b"1234".to_vec(), Some(b"somevalue".to_vec())), - (COL_AVAILABILITY_META as u8, b"5678".to_vec(), Some(b"somevalue".to_vec())), - ]) - .unwrap(); - } - - try_upgrade_db(&path, DatabaseKind::ParityDB).unwrap(); - - let db = Db::open(&paritydb_version_1_config(&path)).unwrap(); - assert_eq!(db.get(COL_DISPUTE_COORDINATOR_DATA as u8, b"1234").unwrap(), None); - assert_eq!( - db.get(COL_AVAILABILITY_META as u8, b"5678").unwrap(), - Some("somevalue".as_bytes().to_vec()) - ); - } - - #[test] - fn test_paritydb_migrate_1_to_2() { - use parity_db::Db; - - let db_dir = tempfile::tempdir().unwrap(); - let path = db_dir.path(); - - // We need to properly set db version for upgrade to work. - fs::write(version_file_path(path), "1").expect("Failed to write DB version"); - - { - let db = Db::open_or_create(&paritydb_version_1_config(&path)).unwrap(); - - // Write some dummy data - db.commit(vec![( - COL_DISPUTE_COORDINATOR_DATA as u8, - b"1234".to_vec(), - Some(b"somevalue".to_vec()), - )]) - .unwrap(); - - assert_eq!(db.num_columns(), columns::v1::NUM_COLUMNS as u8); - } - - try_upgrade_db(&path, DatabaseKind::ParityDB).unwrap(); - - let db = Db::open(&paritydb_version_2_config(&path)).unwrap(); - - assert_eq!(db.num_columns(), columns::v2::NUM_COLUMNS as u8); - - assert_eq!( - db.get(COL_DISPUTE_COORDINATOR_DATA as u8, b"1234").unwrap(), - Some("somevalue".as_bytes().to_vec()) - ); - - // Test we can write the new column. - db.commit(vec![( - COL_SESSION_WINDOW_DATA as u8, - b"1337".to_vec(), - Some(b"0xdeadb00b".to_vec()), - )]) - .unwrap(); - - // Read back data from new column. - assert_eq!( - db.get(COL_SESSION_WINDOW_DATA as u8, b"1337").unwrap(), - Some("0xdeadb00b".as_bytes().to_vec()) - ); - } - #[test] fn test_rocksdb_migrate_1_to_2() { use kvdb::{DBKey, DBOp}; @@ -467,38 +380,6 @@ mod tests { ); } - #[test] - fn test_paritydb_migrate_2_to_3() { - use parity_db::Db; - - let db_dir = tempfile::tempdir().unwrap(); - let path = db_dir.path(); - let test_key = b"1337"; - - // We need to properly set db version for upgrade to work. - fs::write(version_file_path(path), "2").expect("Failed to write DB version"); - - { - let db = Db::open_or_create(&paritydb_version_2_config(&path)).unwrap(); - - // Write some dummy data - db.commit(vec![( - COL_SESSION_WINDOW_DATA as u8, - test_key.to_vec(), - Some(b"0xdeadb00b".to_vec()), - )]) - .unwrap(); - - assert_eq!(db.num_columns(), columns::v2::NUM_COLUMNS as u8); - } - - try_upgrade_db(&path, DatabaseKind::ParityDB).unwrap(); - - let db = Db::open(&paritydb_version_3_config(&path)).unwrap(); - - assert_eq!(db.num_columns(), columns::v3::NUM_COLUMNS as u8); - } - #[test] fn test_rocksdb_migrate_2_to_3() { use kvdb_rocksdb::{Database, DatabaseConfig}; From 247e4a73ffd930dd8de415a1a8645da5531a373d Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Mon, 14 Aug 2023 16:29:29 +0200 Subject: [PATCH 11/27] Use same `fmt` and `clippy` configs as in Substrate (#7611) * Use same rustfmt.toml as Substrate Signed-off-by: Oliver Tale-Yazdi * format format file Signed-off-by: Oliver Tale-Yazdi * Format with new config Signed-off-by: Oliver Tale-Yazdi * Add Substrate Clippy config Signed-off-by: Oliver Tale-Yazdi * Print Clippy version in CI Otherwise its difficult to reproduce locally. Signed-off-by: Oliver Tale-Yazdi * Make fmt happy Signed-off-by: Oliver Tale-Yazdi * Update node/core/pvf/src/error.rs Co-authored-by: Tsvetomir Dimitrov * Update node/core/pvf/src/error.rs Co-authored-by: Tsvetomir Dimitrov --------- Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Tsvetomir Dimitrov --- .cargo/config.toml | 1 + cli/src/cli.rs | 4 +- cli/src/command.rs | 4 +- core-primitives/src/lib.rs | 12 +- node/collation-generation/src/lib.rs | 22 ++-- node/collation-generation/src/tests.rs | 10 +- .../approval-voting/src/approval_checking.rs | 8 +- node/core/approval-voting/src/criteria.rs | 15 +-- node/core/approval-voting/src/import.rs | 28 ++--- node/core/approval-voting/src/lib.rs | 35 +++--- node/core/approval-voting/src/ops.rs | 7 +- node/core/av-store/src/lib.rs | 35 +++--- node/core/backing/src/lib.rs | 14 ++- node/core/backing/src/metrics.rs | 3 +- node/core/backing/src/tests.rs | 3 +- node/core/bitfield-signing/src/lib.rs | 4 +- node/core/candidate-validation/src/lib.rs | 33 +++--- node/core/chain-selection/src/lib.rs | 6 +- node/core/dispute-coordinator/src/db/v1.rs | 7 +- node/core/dispute-coordinator/src/import.rs | 8 +- .../dispute-coordinator/src/initialized.rs | 12 +- node/core/dispute-coordinator/src/lib.rs | 19 +-- .../src/participation/queues/mod.rs | 22 ++-- .../src/participation/queues/tests.rs | 4 +- .../src/participation/tests.rs | 3 +- .../src/scraping/candidates.rs | 6 +- .../dispute-coordinator/src/scraping/mod.rs | 15 +-- .../dispute-coordinator/src/scraping/tests.rs | 19 +-- node/core/dispute-coordinator/src/tests.rs | 62 ++++++---- node/core/parachains-inherent/src/lib.rs | 11 +- node/core/provisioner/src/disputes/mod.rs | 6 +- .../src/disputes/prioritized_selection/mod.rs | 45 ++++---- .../disputes/prioritized_selection/tests.rs | 19 +-- node/core/provisioner/src/error.rs | 3 +- node/core/provisioner/src/lib.rs | 33 +++--- node/core/provisioner/src/metrics.rs | 7 +- node/core/provisioner/src/tests.rs | 3 +- node/core/pvf-checker/src/lib.rs | 4 +- node/core/pvf-checker/src/tests.rs | 4 +- node/core/pvf/common/src/error.rs | 23 ++-- node/core/pvf/common/src/executor_intf.rs | 20 ++-- node/core/pvf/common/src/worker/mod.rs | 6 +- node/core/pvf/execute-worker/src/lib.rs | 3 +- node/core/pvf/prepare-worker/src/lib.rs | 17 +-- .../pvf/prepare-worker/src/memory_stats.rs | 4 +- node/core/pvf/src/artifacts.rs | 3 +- node/core/pvf/src/error.rs | 29 ++--- node/core/pvf/src/execute/queue.rs | 3 +- node/core/pvf/src/execute/worker_intf.rs | 8 +- node/core/pvf/src/host.rs | 19 +-- node/core/pvf/src/lib.rs | 30 ++--- node/core/pvf/src/metrics.rs | 3 +- node/core/pvf/src/prepare/pool.rs | 16 +-- node/core/pvf/src/prepare/queue.rs | 5 +- node/core/pvf/src/prepare/worker_intf.rs | 4 +- node/core/pvf/src/worker_intf.rs | 26 +++-- node/core/runtime-api/src/lib.rs | 11 +- node/core/runtime-api/src/tests.rs | 3 +- node/gum/src/lib.rs | 15 ++- node/jaeger/src/lib.rs | 3 +- node/jaeger/src/spans.rs | 4 +- node/malus/src/variants/common.rs | 22 ++-- .../src/variants/dispute_valid_candidates.rs | 11 +- .../src/variants/suggest_garbage_candidate.rs | 11 +- node/metrics/src/lib.rs | 3 +- node/network/approval-distribution/src/lib.rs | 36 +++--- .../src/requester/fetch_task/mod.rs | 3 +- .../src/requester/mod.rs | 17 ++- .../src/futures_undead.rs | 1 - node/network/availability-recovery/src/lib.rs | 49 ++++---- .../availability-recovery/src/tests.rs | 3 +- node/network/bridge/src/rx/mod.rs | 6 +- node/network/bridge/src/rx/tests.rs | 5 +- node/network/bridge/src/tx/mod.rs | 3 +- .../network/bridge/src/validator_discovery.rs | 7 +- .../src/collator_side/mod.rs | 15 +-- .../src/collator_side/tests.rs | 10 +- .../src/collator_side/validators_buffer.rs | 6 +- .../src/validator_side/tests.rs | 3 +- node/network/dispute-distribution/src/lib.rs | 8 +- .../src/receiver/batches/batch.rs | 4 +- .../src/receiver/batches/waiting_queue.rs | 4 +- .../dispute-distribution/src/receiver/mod.rs | 10 +- .../src/sender/send_task.rs | 11 +- node/network/gossip-support/src/lib.rs | 3 +- node/network/protocol/src/grid_topology.rs | 19 +-- node/network/protocol/src/lib.rs | 6 +- node/network/protocol/src/peer_set.rs | 3 +- .../src/request_response/incoming/mod.rs | 4 +- .../protocol/src/request_response/mod.rs | 6 +- .../network/statement-distribution/src/lib.rs | 49 ++++---- .../statement-distribution/src/tests.rs | 8 +- node/overseer/src/lib.rs | 34 +++--- node/primitives/src/disputes/message.rs | 4 +- node/primitives/src/disputes/status.rs | 11 +- node/primitives/src/lib.rs | 26 +++-- node/service/src/chain_spec.rs | 11 +- node/service/src/fake_runtime_api.rs | 3 +- node/service/src/lib.rs | 14 ++- node/service/src/relay_chain_selection.rs | 11 +- node/service/src/tests.rs | 26 ++--- node/subsystem-test-helpers/src/lib.rs | 3 +- node/subsystem-types/src/lib.rs | 4 +- node/subsystem-types/src/messages.rs | 109 ++++++++++-------- node/subsystem-types/src/runtime_client.rs | 7 +- node/subsystem-util/src/lib.rs | 9 +- node/subsystem-util/src/nesting_sender.rs | 21 ++-- node/subsystem-util/src/reputation.rs | 3 +- node/test/client/src/block_builder.rs | 19 +-- node/test/service/src/lib.rs | 12 +- parachain/src/primitives.rs | 17 +-- .../test-parachains/adder/collator/src/lib.rs | 8 +- .../adder/collator/tests/integration.rs | 3 +- .../undying/collator/src/lib.rs | 8 +- .../undying/collator/tests/integration.rs | 3 +- primitives/src/runtime_api.rs | 21 ++-- primitives/src/v5/metrics.rs | 10 +- primitives/src/v5/mod.rs | 79 +++++++------ primitives/test-helpers/src/lib.rs | 3 +- runtime/common/slot_range_helper/src/lib.rs | 8 +- runtime/common/src/assigned_slots.rs | 9 +- runtime/common/src/auctions.rs | 42 +++---- runtime/common/src/claims.rs | 21 ++-- runtime/common/src/crowdloan/migration.rs | 4 +- runtime/common/src/crowdloan/mod.rs | 67 ++++++----- runtime/common/src/integration_tests.rs | 9 +- runtime/common/src/paras_registrar.rs | 44 ++++--- runtime/common/src/paras_sudo_wrapper.rs | 8 +- runtime/common/src/purchase.rs | 23 ++-- runtime/common/src/slots/mod.rs | 33 +++--- runtime/common/src/traits.rs | 25 ++-- runtime/kusama/src/xcm_config.rs | 28 ++--- runtime/parachains/src/builder.rs | 16 +-- runtime/parachains/src/configuration.rs | 48 ++++---- .../src/configuration/migration/v7.rs | 21 ++-- runtime/parachains/src/disputes.rs | 7 +- runtime/parachains/src/disputes/migration.rs | 6 +- runtime/parachains/src/disputes/tests.rs | 6 +- runtime/parachains/src/hrmp.rs | 19 +-- runtime/parachains/src/inclusion/mod.rs | 27 ++--- runtime/parachains/src/initializer.rs | 12 +- runtime/parachains/src/origin.rs | 1 - runtime/parachains/src/paras/mod.rs | 91 ++++++++------- runtime/parachains/src/paras/tests.rs | 8 +- runtime/parachains/src/paras_inherent/mod.rs | 42 ++++--- .../parachains/src/paras_inherent/tests.rs | 51 +++++--- runtime/parachains/src/runtime_api_impl/v5.rs | 3 +- runtime/parachains/src/scheduler.rs | 94 ++++++++------- runtime/parachains/src/scheduler/tests.rs | 14 ++- runtime/parachains/src/shared.rs | 4 +- runtime/parachains/src/util.rs | 4 +- runtime/polkadot/src/governance/old.rs | 3 +- runtime/polkadot/src/xcm_config.rs | 23 ++-- runtime/rococo/src/xcm_config.rs | 12 +- runtime/test-runtime/src/lib.rs | 4 +- runtime/test-runtime/src/xcm_config.rs | 4 +- runtime/westend/src/lib.rs | 4 +- runtime/westend/src/xcm_config.rs | 4 +- rustfmt.toml | 12 +- scripts/ci/gitlab/pipeline/test.yml | 1 + statement-table/src/generic.rs | 17 +-- tests/common.rs | 3 +- utils/staking-miner/src/opts.rs | 26 +++-- utils/staking-miner/src/rpc.rs | 3 +- xcm/pallet-xcm-benchmarks/src/generic/mod.rs | 9 +- xcm/pallet-xcm/src/lib.rs | 108 +++++++++-------- xcm/src/double_encoded.rs | 8 +- xcm/src/lib.rs | 12 +- xcm/src/v2/junction.rs | 16 +-- xcm/src/v2/mod.rs | 78 ++++++------- xcm/src/v2/multiasset.rs | 76 ++++++------ xcm/src/v2/multilocation.rs | 29 ++--- xcm/src/v2/traits.rs | 31 ++--- xcm/src/v3/junction.rs | 39 ++++--- xcm/src/v3/junctions.rs | 18 +-- xcm/src/v3/mod.rs | 69 ++++++----- xcm/src/v3/multiasset.rs | 57 +++++---- xcm/src/v3/multilocation.rs | 14 ++- xcm/src/v3/traits.rs | 9 +- xcm/xcm-builder/src/asset_conversion.rs | 6 +- xcm/xcm-builder/src/currency_adapter.rs | 4 +- xcm/xcm-builder/src/fungibles_adapter.rs | 4 +- xcm/xcm-builder/src/location_conversion.rs | 7 +- xcm/xcm-builder/src/origin_aliases.rs | 3 +- xcm/xcm-builder/src/origin_conversion.rs | 18 +-- xcm/xcm-builder/src/tests/assets.rs | 3 +- .../tests/bridging/paid_remote_relay_relay.rs | 6 +- xcm/xcm-builder/src/tests/mock.rs | 4 +- xcm/xcm-builder/src/tests/querying.rs | 3 +- xcm/xcm-builder/src/universal_exports.rs | 8 +- xcm/xcm-builder/src/weight.rs | 5 +- xcm/xcm-builder/tests/scenarios.rs | 4 +- xcm/xcm-executor/src/assets.rs | 47 ++++---- xcm/xcm-executor/src/lib.rs | 22 ++-- xcm/xcm-executor/src/traits/asset_exchange.rs | 4 +- xcm/xcm-executor/src/traits/asset_lock.rs | 4 +- xcm/xcm-executor/src/traits/conversion.rs | 6 +- .../src/traits/filter_asset_location.rs | 3 +- xcm/xcm-executor/src/traits/on_response.rs | 12 +- xcm/xcm-executor/src/traits/should_execute.rs | 4 +- xcm/xcm-executor/src/traits/transact_asset.rs | 53 +++++---- xcm/xcm-executor/src/traits/weight.rs | 8 +- xcm/xcm-simulator/src/lib.rs | 26 ++--- 203 files changed, 1880 insertions(+), 1504 deletions(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index 66b28b3485d8..4796a2c26965 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -29,4 +29,5 @@ rustflags = [ "-Aclippy::needless_option_as_deref", # false positives "-Aclippy::derivable_impls", # false positives "-Aclippy::stable_sort_primitive", # prefer stable sort + "-Aclippy::extra-unused-type-parameters", # stylistic ] diff --git a/cli/src/cli.rs b/cli/src/cli.rs index e78213cf11c8..c13340d91a04 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -130,8 +130,8 @@ pub struct RunCmd { pub overseer_channel_capacity_override: Option, /// Path to the directory where auxiliary worker binaries reside. If not specified, the main - /// binary's directory is searched first, then `/usr/lib/polkadot` is searched. TESTING ONLY: if - /// the path points to an executable rather then directory, that executable is used both as + /// binary's directory is searched first, then `/usr/lib/polkadot` is searched. TESTING ONLY: + /// if the path points to an executable rather then directory, that executable is used both as /// preparation and execution worker. #[arg(long, value_name = "PATH")] pub workers_path: Option, diff --git a/cli/src/command.rs b/cli/src/command.rs index c8e8673c6d70..c75f96ee2ebf 100644 --- a/cli/src/command.rs +++ b/cli/src/command.rs @@ -148,8 +148,8 @@ impl SubstrateCli for Cli { let chain_spec = Box::new(service::PolkadotChainSpec::from_json_file(path.clone())?) as Box; - // When `force_*` is given or the file name starts with the name of one of the known chains, - // we use the chain spec for the specific chain. + // When `force_*` is given or the file name starts with the name of one of the known + // chains, we use the chain spec for the specific chain. if self.run.force_rococo || chain_spec.is_rococo() || chain_spec.is_wococo() || diff --git a/core-primitives/src/lib.rs b/core-primitives/src/lib.rs index 5e06966ecfee..aa01cf8dfc45 100644 --- a/core-primitives/src/lib.rs +++ b/core-primitives/src/lib.rs @@ -91,10 +91,10 @@ impl sp_std::fmt::Debug for CandidateHash { pub type Nonce = u32; /// The balance of an account. -/// 128-bits (or 38 significant decimal figures) will allow for 10 m currency (`10^7`) at a resolution -/// to all for one second's worth of an annualised 50% reward be paid to a unit holder (`10^11` unit -/// denomination), or `10^18` total atomic units, to grow at 50%/year for 51 years (`10^9` multiplier) -/// for an eventual total of `10^27` units (27 significant decimal figures). +/// 128-bits (or 38 significant decimal figures) will allow for 10 m currency (`10^7`) at a +/// resolution to all for one second's worth of an annualised 50% reward be paid to a unit holder +/// (`10^11` unit denomination), or `10^18` total atomic units, to grow at 50%/year for 51 years +/// (`10^9` multiplier) for an eventual total of `10^27` units (27 significant decimal figures). /// We round denomination to `10^12` (12 SDF), and leave the other redundancy at the upper end so /// that 32 bits may be multiplied with a balance in 128 bits without worrying about overflow. pub type Balance = u128; @@ -121,8 +121,8 @@ pub type Remark = [u8; 32]; /// The size of the message is limited by the `config.max_downward_message_size` parameter. pub type DownwardMessage = sp_std::vec::Vec; -/// A wrapped version of `DownwardMessage`. The difference is that it has attached the block number when -/// the message was sent. +/// A wrapped version of `DownwardMessage`. The difference is that it has attached the block number +/// when the message was sent. #[derive(Encode, Decode, Clone, sp_runtime::RuntimeDebug, PartialEq, TypeInfo)] pub struct InboundDownwardMessage { /// The block number at which these messages were put into the downward message queue. diff --git a/node/collation-generation/src/lib.rs b/node/collation-generation/src/lib.rs index 02a0e8df8f61..8726ebf44c71 100644 --- a/node/collation-generation/src/lib.rs +++ b/node/collation-generation/src/lib.rs @@ -22,9 +22,11 @@ //! //! * If there is no collation generation config, ignore. //! * Otherwise, for each `activated` head in the update: -//! * Determine if the para is scheduled on any core by fetching the `availability_cores` Runtime API. +//! * Determine if the para is scheduled on any core by fetching the `availability_cores` Runtime +//! API. //! * Use the Runtime API subsystem to fetch the full validation data. -//! * Invoke the `collator`, and use its outputs to produce a [`CandidateReceipt`], signed with the configuration's `key`. +//! * Invoke the `collator`, and use its outputs to produce a [`CandidateReceipt`], signed with +//! the configuration's `key`. //! * Dispatch a [`CollatorProtocolMessage::DistributeCollation`]`(receipt, pov)`. #![deny(missing_docs)] @@ -77,8 +79,8 @@ impl CollationGenerationSubsystem { /// Conceptually, this is very simple: it just loops forever. /// /// - On incoming overseer messages, it starts or stops jobs as appropriate. - /// - On other incoming messages, if they can be converted into `Job::ToJob` and - /// include a hash, then they're forwarded to the appropriate individual job. + /// - On other incoming messages, if they can be converted into `Job::ToJob` and include a hash, + /// then they're forwarded to the appropriate individual job. /// - On outgoing messages from the jobs, it forwards them to the overseer. /// /// If `err_tx` is not `None`, errors are forwarded onto that channel as they occur. @@ -109,9 +111,10 @@ impl CollationGenerationSubsystem { } // handle an incoming message. return true if we should break afterwards. - // note: this doesn't strictly need to be a separate function; it's more an administrative function - // so that we don't clutter the run loop. It could in principle be inlined directly into there. - // it should hopefully therefore be ok that it's an async function mutably borrowing self. + // note: this doesn't strictly need to be a separate function; it's more an administrative + // function so that we don't clutter the run loop. It could in principle be inlined directly + // into there. it should hopefully therefore be ok that it's an async function mutably borrowing + // self. async fn handle_incoming( &mut self, incoming: SubsystemResult::Message>>, @@ -319,8 +322,9 @@ async fn handle_new_activations( // As long as `POV_BOMB_LIMIT` is at least `max_pov_size`, this ensures // that honest collators never produce a PoV which is uncompressed. // - // As such, honest collators never produce an uncompressed PoV which starts with - // a compression magic number, which would lead validators to reject the collation. + // As such, honest collators never produce an uncompressed PoV which starts + // with a compression magic number, which would lead validators to reject + // the collation. if encoded_size > validation_data.max_pov_size as usize { gum::debug!( target: LOG_TARGET, diff --git a/node/collation-generation/src/tests.rs b/node/collation-generation/src/tests.rs index b2534bcf36c1..1c98e1450941 100644 --- a/node/collation-generation/src/tests.rs +++ b/node/collation-generation/src/tests.rs @@ -203,9 +203,9 @@ mod handle_new_activations { .into_inner(); // the only activated hash should be from the 4 hash: - // each activated hash generates two scheduled cores: one with its value * 4, one with its value * 5 - // given that the test configuration has a `para_id` of 16, there's only one way to get that value: with the 4 - // hash. + // each activated hash generates two scheduled cores: one with its value * 4, one with its + // value * 5 given that the test configuration has a `para_id` of 16, there's only one way + // to get that value: with the 4 hash. assert_eq!(requested_validation_data, vec![[4; 32].into()]); } @@ -301,8 +301,8 @@ mod handle_new_activations { .into_inner(); // we expect a single message to be sent, containing a candidate receipt. - // we don't care too much about the `commitments_hash` right now, but let's ensure that we've calculated the - // correct descriptor + // we don't care too much about the `commitments_hash` right now, but let's ensure that + // we've calculated the correct descriptor let expect_pov_hash = test_collation_compressed().proof_of_validity.into_compressed().hash(); let expect_validation_data_hash = test_validation_data().hash(); diff --git a/node/core/approval-voting/src/approval_checking.rs b/node/core/approval-voting/src/approval_checking.rs index bfecdba73f88..f345b57029b5 100644 --- a/node/core/approval-voting/src/approval_checking.rs +++ b/node/core/approval-voting/src/approval_checking.rs @@ -42,8 +42,8 @@ pub enum RequiredTranches { /// assignments that are before the local time. maximum_broadcast: DelayTranche, /// The clock drift, in ticks, to apply to the local clock when determining whether - /// to broadcast an assignment or when to schedule a wakeup. The local clock should be treated - /// as though it is `clock_drift` ticks earlier. + /// to broadcast an assignment or when to schedule a wakeup. The local clock should be + /// treated as though it is `clock_drift` ticks earlier. clock_drift: Tick, }, /// An exact number of required tranches and a number of no-shows. This indicates that @@ -55,8 +55,8 @@ pub enum RequiredTranches { /// The amount of missing votes that should be tolerated. tolerated_missing: usize, /// When the next no-show would be, if any. This is used to schedule the next wakeup in the - /// event that there are some assignments that don't have corresponding approval votes. If this - /// is `None`, all assignments have approvals. + /// event that there are some assignments that don't have corresponding approval votes. If + /// this is `None`, all assignments have approvals. next_no_show: Option, /// The last tick at which a needed assignment was received. last_assignment_tick: Option, diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index 40a24e2dd937..0e1d18198c21 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -218,13 +218,14 @@ impl AssignmentCriteria for RealAssignmentCriteria { } /// Compute the assignments for a given block. Returns a map containing all assignments to cores in -/// the block. If more than one assignment targets the given core, only the earliest assignment is kept. +/// the block. If more than one assignment targets the given core, only the earliest assignment is +/// kept. /// -/// The `leaving_cores` parameter indicates all cores within the block where a candidate was included, -/// as well as the group index backing those. +/// The `leaving_cores` parameter indicates all cores within the block where a candidate was +/// included, as well as the group index backing those. /// -/// The current description of the protocol assigns every validator to check every core. But at different times. -/// The idea is that most assignments are never triggered and fall by the wayside. +/// The current description of the protocol assigns every validator to check every core. But at +/// different times. The idea is that most assignments are never triggered and fall by the wayside. /// /// This will not assign to anything the local validator was part of the backing group for. pub(crate) fn compute_assignments( @@ -463,8 +464,8 @@ pub(crate) enum InvalidAssignmentReason { /// * Sample is out of bounds /// * Validator is present in backing group. /// -/// This function does not check whether the core is actually a valid assignment or not. That should be done -/// outside the scope of this function. +/// This function does not check whether the core is actually a valid assignment or not. That should +/// be done outside the scope of this function. pub(crate) fn check_assignment_cert( claimed_core_index: CoreIndex, validator_index: ValidatorIndex, diff --git a/node/core/approval-voting/src/import.rs b/node/core/approval-voting/src/import.rs index e33caed49c5f..c504ba71b3c2 100644 --- a/node/core/approval-voting/src/import.rs +++ b/node/core/approval-voting/src/import.rs @@ -104,7 +104,8 @@ enum ImportedBlockInfoError { VrfInfoUnavailable, } -/// Computes information about the imported block. Returns an error if the info couldn't be extracted. +/// Computes information about the imported block. Returns an error if the info couldn't be +/// extracted. #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] async fn imported_block_info( ctx: &mut Context, @@ -181,20 +182,21 @@ async fn imported_block_info( // It's not obvious whether to use the hash or the parent hash for this, intuitively. We // want to use the block hash itself, and here's why: // - // First off, 'epoch' in BABE means 'session' in other places. 'epoch' is the terminology from - // the paper, which we fulfill using 'session's, which are a Substrate consensus concept. + // First off, 'epoch' in BABE means 'session' in other places. 'epoch' is the terminology + // from the paper, which we fulfill using 'session's, which are a Substrate consensus + // concept. // - // In BABE, the on-chain and off-chain view of the current epoch can differ at epoch boundaries - // because epochs change precisely at a slot. When a block triggers a new epoch, the state of - // its parent will still have the old epoch. Conversely, we have the invariant that every - // block in BABE has the epoch _it was authored in_ within its post-state. So we use the - // block, and not its parent. + // In BABE, the on-chain and off-chain view of the current epoch can differ at epoch + // boundaries because epochs change precisely at a slot. When a block triggers a new epoch, + // the state of its parent will still have the old epoch. Conversely, we have the invariant + // that every block in BABE has the epoch _it was authored in_ within its post-state. So we + // use the block, and not its parent. // - // It's worth nothing that Polkadot session changes, at least for the purposes of parachains, - // would function the same way, except for the fact that they're always delayed by one block. - // This gives us the opposite invariant for sessions - the parent block's post-state gives - // us the canonical information about the session index for any of its children, regardless - // of which slot number they might be produced at. + // It's worth nothing that Polkadot session changes, at least for the purposes of + // parachains, would function the same way, except for the fact that they're always delayed + // by one block. This gives us the opposite invariant for sessions - the parent block's + // post-state gives us the canonical information about the session index for any of its + // children, regardless of which slot number they might be produced at. ctx.send_message(RuntimeApiMessage::Request( block_hash, RuntimeApiRequest::CurrentBabeEpoch(s_tx), diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 05b92f459529..7e29e64c400a 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -1232,8 +1232,8 @@ async fn handle_from_overseer( ); // Our first wakeup will just be the tranche of our assignment, - // if any. This will likely be superseded by incoming assignments - // and approvals which trigger rescheduling. + // if any. This will likely be superseded by incoming + // assignments and approvals which trigger rescheduling. actions.push(Action::ScheduleWakeup { block_hash: block_batch.block_hash, block_number: block_batch.block_number, @@ -1256,12 +1256,14 @@ async fn handle_from_overseer( crate::ops::canonicalize(db, block_number, block_hash) .map_err(|e| SubsystemError::with_origin("db", e))?; - // `prune_finalized_wakeups` prunes all finalized block hashes. We prune spans accordingly. + // `prune_finalized_wakeups` prunes all finalized block hashes. We prune spans + // accordingly. wakeups.prune_finalized_wakeups(block_number, &mut state.spans); - // // `prune_finalized_wakeups` prunes all finalized block hashes. We prune spans accordingly. - // let hash_set = wakeups.block_numbers.values().flatten().collect::>(); - // state.spans.retain(|hash, _| hash_set.contains(hash)); + // // `prune_finalized_wakeups` prunes all finalized block hashes. We prune spans + // accordingly. let hash_set = + // wakeups.block_numbers.values().flatten().collect::>(); state.spans. + // retain(|hash, _| hash_set.contains(hash)); Vec::new() }, @@ -1403,8 +1405,8 @@ async fn get_approval_signatures_for_candidate( tx_distribution, )); - // Because of the unbounded sending and the nature of the call (just fetching data from state), - // this should not block long: + // Because of the unbounded sending and the nature of the call (just fetching data from + // state), this should not block long: match rx_distribution.timeout(WAIT_FOR_SIGS_TIMEOUT).await { None => { gum::warn!( @@ -2117,9 +2119,10 @@ impl ApprovalStateTransition { } } -// Advance the approval state, either by importing an approval vote which is already checked to be valid and corresponding to an assigned -// validator on the candidate and block, or by noting that there are no further wakeups or tranches needed. This updates the block entry and candidate entry as -// necessary and schedules any further wakeups. +// Advance the approval state, either by importing an approval vote which is already checked to be +// valid and corresponding to an assigned validator on the candidate and block, or by noting that +// there are no further wakeups or tranches needed. This updates the block entry and candidate entry +// as necessary and schedules any further wakeups. async fn advance_approval_state( sender: &mut Sender, state: &State, @@ -2251,7 +2254,8 @@ where // 1. This is not a local approval, as we don't store anything new in the approval entry. // 2. The candidate is not newly approved, as we haven't altered the approval entry's // approved flag with `mark_approved` above. - // 3. The approver, if any, had already approved the candidate, as we haven't altered the bitfield. + // 3. The approver, if any, had already approved the candidate, as we haven't altered the + // bitfield. if transition.is_local_approval() || newly_approved || !already_approved_by.unwrap_or(true) { // In all other cases, we need to write the candidate entry. @@ -2279,7 +2283,8 @@ fn should_trigger_assignment( &approval_entry, RequiredTranches::All, ) - .is_approved(Tick::max_value()), // when all are required, we are just waiting for the first 1/3+ + // when all are required, we are just waiting for the first 1/3+ + .is_approved(Tick::max_value()), RequiredTranches::Pending { maximum_broadcast, clock_drift, .. } => { let drifted_tranche_now = tranche_now.saturating_sub(clock_drift as DelayTranche); @@ -2615,8 +2620,8 @@ async fn launch_approval( match val_rx.await { Err(_) => return ApprovalState::failed(validator_index, candidate_hash), Ok(Ok(ValidationResult::Valid(_, _))) => { - // Validation checked out. Issue an approval command. If the underlying service is unreachable, - // then there isn't anything we can do. + // Validation checked out. Issue an approval command. If the underlying service is + // unreachable, then there isn't anything we can do. gum::trace!(target: LOG_TARGET, ?candidate_hash, ?para_id, "Candidate Valid"); diff --git a/node/core/approval-voting/src/ops.rs b/node/core/approval-voting/src/ops.rs index 4d6dc5e7ad66..6f57b2f80e8a 100644 --- a/node/core/approval-voting/src/ops.rs +++ b/node/core/approval-voting/src/ops.rs @@ -161,7 +161,8 @@ pub fn canonicalize( } } - // Update all blocks-at-height keys, deleting all those which now have empty `block_assignments`. + // Update all blocks-at-height keys, deleting all those which now have empty + // `block_assignments`. for (h, at) in visited_heights.into_iter() { if at.is_empty() { overlay_db.delete_blocks_at_height(h); @@ -170,8 +171,8 @@ pub fn canonicalize( } } - // due to the fork pruning, this range actually might go too far above where our actual highest block is, - // if a relatively short fork is canonicalized. + // due to the fork pruning, this range actually might go too far above where our actual highest + // block is, if a relatively short fork is canonicalized. // TODO https://github.com/paritytech/polkadot/issues/3389 let new_range = StoredBlockRange(canon_number + 1, std::cmp::max(range.1, canon_number + 2)); diff --git a/node/core/av-store/src/lib.rs b/node/core/av-store/src/lib.rs index 675d41b79c06..ef7dcecac075 100644 --- a/node/core/av-store/src/lib.rs +++ b/node/core/av-store/src/lib.rs @@ -67,8 +67,8 @@ const META_PREFIX: &[u8; 4] = b"meta"; const UNFINALIZED_PREFIX: &[u8; 11] = b"unfinalized"; const PRUNE_BY_TIME_PREFIX: &[u8; 13] = b"prune_by_time"; -// We have some keys we want to map to empty values because existence of the key is enough. We use this because -// rocksdb doesn't support empty values. +// We have some keys we want to map to empty values because existence of the key is enough. We use +// this because rocksdb doesn't support empty values. const TOMBSTONE_VALUE: &[u8] = b" "; /// Unavailable blocks are kept for 1 hour. @@ -139,10 +139,11 @@ enum State { /// Candidate data was first observed at the given time but is not available in any block. #[codec(index = 0)] Unavailable(BETimestamp), - /// The candidate was first observed at the given time and was included in the given list of unfinalized blocks, which may be - /// empty. The timestamp here is not used for pruning. Either one of these blocks will be finalized or the state will regress to - /// `State::Unavailable`, in which case the same timestamp will be reused. Blocks are sorted ascending first by block number and - /// then hash. + /// The candidate was first observed at the given time and was included in the given list of + /// unfinalized blocks, which may be empty. The timestamp here is not used for pruning. Either + /// one of these blocks will be finalized or the state will regress to `State::Unavailable`, in + /// which case the same timestamp will be reused. Blocks are sorted ascending first by block + /// number and then hash. #[codec(index = 1)] Unfinalized(BETimestamp, Vec<(BEBlockNumber, Hash)>), /// Candidate data has appeared in a finalized block and did so at the given time. @@ -820,8 +821,8 @@ fn note_block_included( match load_meta(db, config, &candidate_hash)? { None => { - // This is alarming. We've observed a block being included without ever seeing it backed. - // Warn and ignore. + // This is alarming. We've observed a block being included without ever seeing it + // backed. Warn and ignore. gum::warn!( target: LOG_TARGET, ?candidate_hash, @@ -894,9 +895,9 @@ async fn process_block_finalized( let mut db_transaction = DBTransaction::new(); let (start_prefix, end_prefix) = finalized_block_range(finalized_number); - // We have to do some juggling here of the `iter` to make sure it doesn't cross the `.await` boundary - // as it is not `Send`. That is why we create the iterator once within this loop, drop it, - // do an asynchronous request, and then instantiate the exact same iterator again. + // We have to do some juggling here of the `iter` to make sure it doesn't cross the `.await` + // boundary as it is not `Send`. That is why we create the iterator once within this loop, + // drop it, do an asynchronous request, and then instantiate the exact same iterator again. let batch_num = { let mut iter = subsystem .db @@ -961,8 +962,9 @@ async fn process_block_finalized( update_blocks_at_finalized_height(&subsystem, &mut db_transaction, batch, batch_num, now)?; - // We need to write at the end of the loop so the prefix iterator doesn't pick up the same values again - // in the next iteration. Another unfortunate effect of having to re-initialize the iterator. + // We need to write at the end of the loop so the prefix iterator doesn't pick up the same + // values again in the next iteration. Another unfortunate effect of having to re-initialize + // the iterator. subsystem.db.write(db_transaction)?; } @@ -1215,7 +1217,8 @@ fn process_message( // We do not bubble up internal errors to caller subsystems, instead the // tx channel is dropped and that error is caught by the caller subsystem. // - // We bubble up the specific error here so `av-store` logs still tell what happend. + // We bubble up the specific error here so `av-store` logs still tell what + // happend. return Err(e.into()) }, } @@ -1298,8 +1301,8 @@ fn store_available_data( .with_candidate(candidate_hash) .with_pov(&available_data.pov); - // Important note: This check below is critical for consensus and the `backing` subsystem relies on it to - // ensure candidate validity. + // Important note: This check below is critical for consensus and the `backing` subsystem relies + // on it to ensure candidate validity. let chunks = erasure::obtain_chunks_v1(n_validators, &available_data)?; let branches = erasure::branches(chunks.as_ref()); diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index dc0863cfa0b3..0abfbfad7657 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -422,7 +422,8 @@ struct CandidateBackingJob { awaiting_validation: HashSet, /// Data needed for retrying in case of `ValidatedCandidateCommand::AttestNoPoV`. fallbacks: HashMap)>, - /// `Some(h)` if this job has already issued `Seconded` statement for some candidate with `h` hash. + /// `Some(h)` if this job has already issued `Seconded` statement for some candidate with `h` + /// hash. seconded: Option, /// The candidates that are includable, by hash. Each entry here indicates /// that we've sent the provisioner the backed candidate. @@ -562,9 +563,10 @@ async fn store_available_data( expected_erasure_root: Hash, ) -> Result<(), Error> { let (tx, rx) = oneshot::channel(); - // Important: the `av-store` subsystem will check if the erasure root of the `available_data` matches `expected_erasure_root` - // which was provided by the collator in the `CandidateReceipt`. This check is consensus critical and the `backing` subsystem - // relies on it for ensuring candidate validity. + // Important: the `av-store` subsystem will check if the erasure root of the `available_data` + // matches `expected_erasure_root` which was provided by the collator in the `CandidateReceipt`. + // This check is consensus critical and the `backing` subsystem relies on it for ensuring + // candidate validity. sender .send_message(AvailabilityStoreMessage::StoreAvailableData { candidate_hash, @@ -582,8 +584,8 @@ async fn store_available_data( // Make a `PoV` available. // -// This calls the AV store to write the available data to storage. The AV store also checks the erasure root matches -// the `expected_erasure_root`. +// This calls the AV store to write the available data to storage. The AV store also checks the +// erasure root matches the `expected_erasure_root`. // This returns `Err()` on erasure root mismatch or due to any AV store subsystem error. // // Otherwise, it returns either `Ok(())` diff --git a/node/core/backing/src/metrics.rs b/node/core/backing/src/metrics.rs index 8468ea005404..77f0e7f9d92a 100644 --- a/node/core/backing/src/metrics.rs +++ b/node/core/backing/src/metrics.rs @@ -54,7 +54,8 @@ impl Metrics { self.0.as_ref().map(|metrics| metrics.process_statement.start_timer()) } - /// Provide a timer for handling `CandidateBackingMessage::GetBackedCandidates` which observes on drop. + /// Provide a timer for handling `CandidateBackingMessage::GetBackedCandidates` which observes + /// on drop. pub fn time_get_backed_candidates( &self, ) -> Option { diff --git a/node/core/backing/src/tests.rs b/node/core/backing/src/tests.rs index 35c83297fa71..386cc9e2279e 100644 --- a/node/core/backing/src/tests.rs +++ b/node/core/backing/src/tests.rs @@ -84,7 +84,8 @@ impl Default for TestState { ]; let keystore = Arc::new(sc_keystore::LocalKeystore::in_memory()); - // Make sure `Alice` key is in the keystore, so this mocked node will be a parachain validator. + // Make sure `Alice` key is in the keystore, so this mocked node will be a parachain + // validator. Keystore::sr25519_generate_new(&*keystore, ValidatorId::ID, Some(&validators[0].to_seed())) .expect("Insert key into keystore"); diff --git a/node/core/bitfield-signing/src/lib.rs b/node/core/bitfield-signing/src/lib.rs index 1e4d556de7ca..f29e827e1090 100644 --- a/node/core/bitfield-signing/src/lib.rs +++ b/node/core/bitfield-signing/src/lib.rs @@ -137,8 +137,8 @@ async fn get_availability_cores( /// - get the list of core states from the runtime /// - for each core, concurrently determine chunk availability (see `get_core_availability`) -/// - return the bitfield if there were no errors at any point in this process -/// (otherwise, it's prone to false negatives) +/// - return the bitfield if there were no errors at any point in this process (otherwise, it's +/// prone to false negatives) async fn construct_availability_bitfield( relay_parent: Hash, span: &jaeger::Span, diff --git a/node/core/candidate-validation/src/lib.rs b/node/core/candidate-validation/src/lib.rs index 93a7e05c8724..f53f2a6aee06 100644 --- a/node/core/candidate-validation/src/lib.rs +++ b/node/core/candidate-validation/src/lib.rs @@ -67,15 +67,15 @@ mod tests; const LOG_TARGET: &'static str = "parachain::candidate-validation"; -/// The amount of time to wait before retrying after a retry-able backing validation error. We use a lower value for the -/// backing case, to fit within the lower backing timeout. +/// The amount of time to wait before retrying after a retry-able backing validation error. We use a +/// lower value for the backing case, to fit within the lower backing timeout. #[cfg(not(test))] const PVF_BACKING_EXECUTION_RETRY_DELAY: Duration = Duration::from_millis(500); #[cfg(test)] const PVF_BACKING_EXECUTION_RETRY_DELAY: Duration = Duration::from_millis(200); -/// The amount of time to wait before retrying after a retry-able approval validation error. We use a higher value for -/// the approval case since we have more time, and if we wait longer it is more likely that transient conditions will -/// resolve. +/// The amount of time to wait before retrying after a retry-able approval validation error. We use +/// a higher value for the approval case since we have more time, and if we wait longer it is more +/// likely that transient conditions will resolve. #[cfg(not(test))] const PVF_APPROVAL_EXECUTION_RETRY_DELAY: Duration = Duration::from_secs(3); #[cfg(test)] @@ -451,9 +451,9 @@ where const ASSUMPTIONS: &[OccupiedCoreAssumption] = &[ OccupiedCoreAssumption::Included, OccupiedCoreAssumption::TimedOut, - // `TimedOut` and `Free` both don't perform any speculation and therefore should be the same - // for our purposes here. In other words, if `TimedOut` matched then the `Free` must be - // matched as well. + // `TimedOut` and `Free` both don't perform any speculation and therefore should be the + // same for our purposes here. In other words, if `TimedOut` matched then the `Free` must + // be matched as well. ]; // Consider running these checks in parallel to reduce validation latency. @@ -482,9 +482,10 @@ where AssumptionCheckOutcome::Matches(validation_data, validation_code) => Ok(Some((validation_data, validation_code))), AssumptionCheckOutcome::DoesNotMatch => { - // If neither the assumption of the occupied core having the para included or the assumption - // of the occupied core timing out are valid, then the persisted_validation_data_hash in the descriptor - // is not based on the relay parent and is thus invalid. + // If neither the assumption of the occupied core having the para included or the + // assumption of the occupied core timing out are valid, then the + // persisted_validation_data_hash in the descriptor is not based on the relay parent and + // is thus invalid. Ok(None) }, AssumptionCheckOutcome::BadRequest => @@ -704,7 +705,8 @@ where "Invalid candidate (commitments hash)" ); - // If validation produced a new set of commitments, we treat the candidate as invalid. + // If validation produced a new set of commitments, we treat the candidate as + // invalid. Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch)) } else { Ok(ValidationResult::Valid(outputs, persisted_validation_data)) @@ -744,7 +746,8 @@ trait ValidationBackend { prep_timeout, PrepareJobKind::Compilation, ); - // We keep track of the total time that has passed and stop retrying if we are taking too long. + // We keep track of the total time that has passed and stop retrying if we are taking too + // long. let total_time_start = Instant::now(); let mut validation_result = @@ -780,8 +783,8 @@ trait ValidationBackend { _ => break, } - // If we got a possibly transient error, retry once after a brief delay, on the assumption - // that the conditions that caused this error may have resolved on their own. + // If we got a possibly transient error, retry once after a brief delay, on the + // assumption that the conditions that caused this error may have resolved on their own. { // Wait a brief delay before retrying. futures_timer::Delay::new(retry_delay).await; diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 4b512347dae4..aa5bb9548ad2 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -44,13 +44,15 @@ mod tree; mod tests; const LOG_TARGET: &str = "parachain::chain-selection"; -/// Timestamp based on the 1 Jan 1970 UNIX base, which is persistent across node restarts and OS reboots. +/// Timestamp based on the 1 Jan 1970 UNIX base, which is persistent across node restarts and OS +/// reboots. type Timestamp = u64; // If a block isn't approved in 120 seconds, nodes will abandon it // and begin building on another chain. const STAGNANT_TIMEOUT: Timestamp = 120; -// Delay prunning of the stagnant keys in prune only mode by 25 hours to avoid interception with the finality +// Delay prunning of the stagnant keys in prune only mode by 25 hours to avoid interception with the +// finality const STAGNANT_PRUNE_DELAY: Timestamp = 25 * 60 * 60; // Maximum number of stagnant entries cleaned during one `STAGNANT_TIMEOUT` iteration const MAX_STAGNANT_ENTRIES: usize = 1000; diff --git a/node/core/dispute-coordinator/src/db/v1.rs b/node/core/dispute-coordinator/src/db/v1.rs index 2d14f5151003..f0f17d2325d6 100644 --- a/node/core/dispute-coordinator/src/db/v1.rs +++ b/node/core/dispute-coordinator/src/db/v1.rs @@ -52,8 +52,8 @@ const CLEANED_VOTES_WATERMARK_KEY: &[u8; 23] = b"cleaned-votes-watermark"; /// this should not be done at once, but rather in smaller batches so nodes won't get stalled by /// this. /// -/// 300 is with session duration of 1 hour and 30 parachains around <3_000_000 key purges in the worst -/// case. Which is already quite a lot, at the same time we have around 21_000 sessions on +/// 300 is with session duration of 1 hour and 30 parachains around <3_000_000 key purges in the +/// worst case. Which is already quite a lot, at the same time we have around 21_000 sessions on /// Kusama. This means at 300 purged sessions per session, cleaning everything up will take /// around 3 days. Depending on how severe disk usage becomes, we might want to bump the batch /// size, at the cost of risking issues at session boundaries (performance). @@ -346,7 +346,8 @@ pub(crate) fn note_earliest_session( if pruned_disputes.len() != 0 { overlay_db.write_recent_disputes(new_recent_disputes); - // Note: Deleting old candidate votes is handled in `write` based on the earliest session. + // Note: Deleting old candidate votes is handled in `write` based on the + // earliest session. } } }, diff --git a/node/core/dispute-coordinator/src/import.rs b/node/core/dispute-coordinator/src/import.rs index 912521834075..0da3723ebf22 100644 --- a/node/core/dispute-coordinator/src/import.rs +++ b/node/core/dispute-coordinator/src/import.rs @@ -19,12 +19,12 @@ //! This module encapsulates the actual logic for importing new votes and provides easy access of //! the current state for votes for a particular candidate. //! -//! In particular there is `CandidateVoteState` which tells what can be concluded for a particular set of -//! votes. E.g. whether a dispute is ongoing, whether it is confirmed, concluded, .. +//! In particular there is `CandidateVoteState` which tells what can be concluded for a particular +//! set of votes. E.g. whether a dispute is ongoing, whether it is confirmed, concluded, .. //! //! Then there is `ImportResult` which reveals information about what changed once additional votes -//! got imported on top of an existing `CandidateVoteState` and reveals "dynamic" information, like whether -//! due to the import a dispute was raised/got confirmed, ... +//! got imported on top of an existing `CandidateVoteState` and reveals "dynamic" information, like +//! whether due to the import a dispute was raised/got confirmed, ... use std::collections::{BTreeMap, HashMap, HashSet}; diff --git a/node/core/dispute-coordinator/src/initialized.rs b/node/core/dispute-coordinator/src/initialized.rs index 2a1d8fd4b83c..c1d02ef976cb 100644 --- a/node/core/dispute-coordinator/src/initialized.rs +++ b/node/core/dispute-coordinator/src/initialized.rs @@ -92,8 +92,8 @@ pub struct InitialData { pub(crate) struct Initialized { keystore: Arc, runtime_info: RuntimeInfo, - /// This is the highest `SessionIndex` seen via `ActiveLeavesUpdate`. It doesn't matter if it was - /// cached successfully or not. It is used to detect ancient disputes. + /// This is the highest `SessionIndex` seen via `ActiveLeavesUpdate`. It doesn't matter if it + /// was cached successfully or not. It is used to detect ancient disputes. highest_session_seen: SessionIndex, /// Will be set to `true` if an error occured during the last caching attempt gaps_in_cache: bool, @@ -308,8 +308,8 @@ impl Initialized { Ok(session_idx) if self.gaps_in_cache || session_idx > self.highest_session_seen => { - // Fetch the last `DISPUTE_WINDOW` number of sessions unless there are no gaps in - // cache and we are not missing too many `SessionInfo`s + // Fetch the last `DISPUTE_WINDOW` number of sessions unless there are no gaps + // in cache and we are not missing too many `SessionInfo`s let mut lower_bound = session_idx.saturating_sub(DISPUTE_WINDOW.get() - 1); if !self.gaps_in_cache && self.highest_session_seen > lower_bound { lower_bound = self.highest_session_seen + 1 @@ -1133,8 +1133,8 @@ impl Initialized { } // Participate in dispute if we did not cast a vote before and actually have keys to cast a - // local vote. Disputes should fall in one of the categories below, otherwise we will refrain - // from participation: + // local vote. Disputes should fall in one of the categories below, otherwise we will + // refrain from participation: // - `is_included` lands in prioritised queue // - `is_confirmed` | `is_backed` lands in best effort queue // We don't participate in disputes on finalized candidates. diff --git a/node/core/dispute-coordinator/src/lib.rs b/node/core/dispute-coordinator/src/lib.rs index 02bb6ef9ecda..a2c500e08e28 100644 --- a/node/core/dispute-coordinator/src/lib.rs +++ b/node/core/dispute-coordinator/src/lib.rs @@ -17,12 +17,13 @@ //! Implements the dispute coordinator subsystem. //! //! This is the central subsystem of the node-side components which participate in disputes. -//! This subsystem wraps a database which tracks all statements observed by all validators over some window of sessions. -//! Votes older than this session window are pruned. +//! This subsystem wraps a database which tracks all statements observed by all validators over some +//! window of sessions. Votes older than this session window are pruned. //! -//! This subsystem will be the point which produce dispute votes, either positive or negative, based on locally-observed -//! validation results as well as a sink for votes received by other subsystems. When importing a dispute vote from -//! another node, this will trigger dispute participation to recover and validate the block. +//! This subsystem will be the point which produce dispute votes, either positive or negative, based +//! on locally-observed validation results as well as a sink for votes received by other subsystems. +//! When importing a dispute vote from another node, this will trigger dispute participation to +//! recover and validate the block. use std::{num::NonZeroUsize, sync::Arc}; @@ -92,10 +93,10 @@ mod spam_slots; /// Handling of participation requests via `Participation`. /// -/// `Participation` provides an API (`Participation::queue_participation`) for queuing of dispute participations and will process those -/// participation requests, such that most important/urgent disputes will be resolved and processed -/// first and more importantly it will order requests in a way so disputes will get resolved, even -/// if there are lots of them. +/// `Participation` provides an API (`Participation::queue_participation`) for queuing of dispute +/// participations and will process those participation requests, such that most important/urgent +/// disputes will be resolved and processed first and more importantly it will order requests in a +/// way so disputes will get resolved, even if there are lots of them. pub(crate) mod participation; /// Pure processing of vote imports. diff --git a/node/core/dispute-coordinator/src/participation/queues/mod.rs b/node/core/dispute-coordinator/src/participation/queues/mod.rs index 4d8ee585ea29..8a4374999f88 100644 --- a/node/core/dispute-coordinator/src/participation/queues/mod.rs +++ b/node/core/dispute-coordinator/src/participation/queues/mod.rs @@ -294,8 +294,8 @@ impl Queues { return Self::pop_impl(&mut self.priority) } - // `pop_best_effort` and `pop_priority` do the same but on different `BTreeMap`s. This function has - // the extracted implementation + // `pop_best_effort` and `pop_priority` do the same but on different `BTreeMap`s. This function + // has the extracted implementation fn pop_impl( target: &mut BTreeMap, ) -> Option<(CandidateComparator, ParticipationRequest)> { @@ -331,9 +331,10 @@ impl Queues { #[derive(Copy, Clone)] #[cfg_attr(test, derive(Debug))] struct CandidateComparator { - /// Block number of the relay parent. It's wrapped in an `Option<>` because there are cases when - /// it can't be obtained. For example when the node is lagging behind and new leaves are received - /// with a slight delay. Candidates with unknown relay parent are treated with the lowest priority. + /// Block number of the relay parent. It's wrapped in an `Option<>` because there are cases + /// when it can't be obtained. For example when the node is lagging behind and new leaves are + /// received with a slight delay. Candidates with unknown relay parent are treated with the + /// lowest priority. /// /// The order enforced by `CandidateComparator` is important because we want to participate in /// the oldest disputes first. @@ -346,9 +347,10 @@ struct CandidateComparator { /// that is not stable. If a new fork appears after the fact, we would start ordering the same /// candidate differently, which would result in the same candidate getting queued twice. relay_parent_block_number: Option, - /// By adding the `CandidateHash`, we can guarantee a unique ordering across candidates with the - /// same relay parent block number. Candidates without `relay_parent_block_number` are ordered by - /// the `candidate_hash` (and treated with the lowest priority, as already mentioned). + /// By adding the `CandidateHash`, we can guarantee a unique ordering across candidates with + /// the same relay parent block number. Candidates without `relay_parent_block_number` are + /// ordered by the `candidate_hash` (and treated with the lowest priority, as already + /// mentioned). candidate_hash: CandidateHash, } @@ -364,11 +366,11 @@ impl CandidateComparator { /// Create a candidate comparator for a given candidate. /// /// Returns: - /// - `Ok(CandidateComparator{Some(relay_parent_block_number), candidate_hash})` when the + /// - `Ok(CandidateComparator{Some(relay_parent_block_number), candidate_hash})` when the /// relay parent can be obtained. This is the happy case. /// - `Ok(CandidateComparator{None, candidate_hash})` in case the candidate's relay parent /// can't be obtained. - /// - `FatalError` in case the chain API call fails with an unexpected error. + /// - `FatalError` in case the chain API call fails with an unexpected error. pub async fn new( sender: &mut impl overseer::DisputeCoordinatorSenderTrait, candidate: &CandidateReceipt, diff --git a/node/core/dispute-coordinator/src/participation/queues/tests.rs b/node/core/dispute-coordinator/src/participation/queues/tests.rs index 8293a935d11a..5e262d895e31 100644 --- a/node/core/dispute-coordinator/src/participation/queues/tests.rs +++ b/node/core/dispute-coordinator/src/participation/queues/tests.rs @@ -53,8 +53,8 @@ fn clone_request(request: &ParticipationRequest) -> ParticipationRequest { /// Check that dequeuing acknowledges order. /// /// Any priority item will be dequeued before any best effort items, priority and best effort with -/// known parent block number items will be processed in order. Best effort items without known parent -/// block number should be treated with lowest priority. +/// known parent block number items will be processed in order. Best effort items without known +/// parent block number should be treated with lowest priority. #[test] fn ordering_works_as_expected() { let metrics = Metrics::default(); diff --git a/node/core/dispute-coordinator/src/participation/tests.rs b/node/core/dispute-coordinator/src/participation/tests.rs index ab58db4e7628..32725a3ac658 100644 --- a/node/core/dispute-coordinator/src/participation/tests.rs +++ b/node/core/dispute-coordinator/src/participation/tests.rs @@ -305,7 +305,8 @@ fn reqs_get_queued_on_no_recent_block() { // Responds to messages from the test and verifies its behaviour let request_handler = async { - // If we receive `BlockNumber` request this implicitly proves that the participation is queued + // If we receive `BlockNumber` request this implicitly proves that the participation is + // queued assert_matches!( ctx_handle.recv().await, AllMessages::ChainApi(ChainApiMessage::BlockNumber(_, tx)) => { diff --git a/node/core/dispute-coordinator/src/scraping/candidates.rs b/node/core/dispute-coordinator/src/scraping/candidates.rs index 89323907a732..38956700545c 100644 --- a/node/core/dispute-coordinator/src/scraping/candidates.rs +++ b/node/core/dispute-coordinator/src/scraping/candidates.rs @@ -98,7 +98,8 @@ mod ref_counted_candidates_tests { /// Keeps track of scraped candidates. Supports `insert`, `remove_up_to_height` and `contains` /// operations. pub struct ScrapedCandidates { - /// Main data structure which keeps the candidates we know about. `contains` does lookups only here. + /// Main data structure which keeps the candidates we know about. `contains` does lookups only + /// here. candidates: RefCountedCandidates, /// Keeps track at which block number a candidate was inserted. Used in `remove_up_to_height`. /// Without this tracking we won't be able to remove all candidates before block X. @@ -117,7 +118,8 @@ impl ScrapedCandidates { self.candidates.contains(candidate_hash) } - // Removes all candidates up to a given height. The candidates at the block height are NOT removed. + // Removes all candidates up to a given height. The candidates at the block height are NOT + // removed. pub fn remove_up_to_height(&mut self, height: &BlockNumber) -> HashSet { let mut candidates_modified: HashSet = HashSet::new(); let not_stale = self.candidates_by_block_number.split_off(&height); diff --git a/node/core/dispute-coordinator/src/scraping/mod.rs b/node/core/dispute-coordinator/src/scraping/mod.rs index a1e385b5ff85..f93ad0abab91 100644 --- a/node/core/dispute-coordinator/src/scraping/mod.rs +++ b/node/core/dispute-coordinator/src/scraping/mod.rs @@ -120,7 +120,8 @@ impl Inclusions { ) { for candidate in candidates_modified { if let Some(blocks_including) = self.inclusions_inner.get_mut(&candidate) { - // Returns everything after the given key, including the key. This works because the blocks are sorted in ascending order. + // Returns everything after the given key, including the key. This works because the + // blocks are sorted in ascending order. *blocks_including = blocks_including.split_off(height); } } @@ -150,8 +151,8 @@ impl Inclusions { /// /// Concretely: /// -/// - Monitors for `CandidateIncluded` events to keep track of candidates that have been -/// included on chains. +/// - Monitors for `CandidateIncluded` events to keep track of candidates that have been included on +/// chains. /// - Monitors for `CandidateBacked` events to keep track of all backed candidates. /// - Calls `FetchOnChainVotes` for each block to gather potentially missed votes from chain. /// @@ -294,11 +295,11 @@ impl ChainScraper { /// Prune finalized candidates. /// - /// We keep each candidate for `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` blocks after finalization. - /// After that we treat it as low priority. + /// We keep each candidate for `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` blocks after + /// finalization. After that we treat it as low priority. pub fn process_finalized_block(&mut self, finalized_block_number: &BlockNumber) { - // `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION - 1` because `finalized_block_number`counts to the - // candidate lifetime. + // `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION - 1` because + // `finalized_block_number`counts to the candidate lifetime. match finalized_block_number.checked_sub(DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION - 1) { Some(key_to_prune) => { diff --git a/node/core/dispute-coordinator/src/scraping/tests.rs b/node/core/dispute-coordinator/src/scraping/tests.rs index 57e0731056b7..d938304a9e97 100644 --- a/node/core/dispute-coordinator/src/scraping/tests.rs +++ b/node/core/dispute-coordinator/src/scraping/tests.rs @@ -183,7 +183,8 @@ fn get_backed_candidate_event(block_number: BlockNumber) -> Vec GroupIndex::from(0), )] } -/// Hash for a 'magic' candidate. This is meant to be a special candidate used to verify special cases. +/// Hash for a 'magic' candidate. This is meant to be a special candidate used to verify special +/// cases. fn get_magic_candidate_hash() -> Hash { BlakeTwo256::hash(&"abc".encode()) } @@ -425,7 +426,7 @@ fn scraper_requests_candidates_of_non_finalized_ancestors() { &chain, finalized_block_number, BLOCKS_TO_SKIP - - (finalized_block_number - DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION) as usize, // Expect the provider not to go past finalized block. + (finalized_block_number - DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION) as usize, /* Expect the provider not to go past finalized block. */ get_backed_and_included_candidate_events, ); join(process_active_leaves_update(ctx.sender(), &mut ordering, next_update), overseer_fut) @@ -468,7 +469,8 @@ fn scraper_prunes_finalized_candidates() { let candidate = make_candidate_receipt(get_block_number_hash(TEST_TARGET_BLOCK_NUMBER)); - // After `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` blocks the candidate should be removed + // After `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` blocks the candidate should be + // removed finalized_block_number = TEST_TARGET_BLOCK_NUMBER + DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION; process_finalized_block(&mut scraper, &finalized_block_number); @@ -518,8 +520,9 @@ fn scraper_handles_backed_but_not_included_candidate() { finalized_block_number += 1; process_finalized_block(&mut scraper, &finalized_block_number); - // `FIRST_TEST_BLOCK` is finalized, which is within `BACKED_CANDIDATE_LIFETIME_AFTER_FINALIZATION` window. - // The candidate should still be backed. + // `FIRST_TEST_BLOCK` is finalized, which is within + // `BACKED_CANDIDATE_LIFETIME_AFTER_FINALIZATION` window. The candidate should still be + // backed. let candidate = make_candidate_receipt(get_block_number_hash(TEST_TARGET_BLOCK_NUMBER)); assert!(!scraper.is_candidate_included(&candidate.hash())); assert!(scraper.is_candidate_backed(&candidate.hash())); @@ -576,7 +579,8 @@ fn scraper_handles_the_same_candidate_incuded_in_two_different_block_heights() { .await; // Finalize blocks to enforce pruning of scraped events. - // The magic candidate was added twice, so it shouldn't be removed if we finalize two more blocks. + // The magic candidate was added twice, so it shouldn't be removed if we finalize two more + // blocks. finalized_block_number = test_targets.first().expect("there are two block nums") + DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION; process_finalized_block(&mut scraper, &finalized_block_number); @@ -641,7 +645,8 @@ fn inclusions_per_candidate_properly_adds_and_prunes() { ]) ); - // After `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` blocks the earlier inclusion should be removed + // After `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` blocks the earlier inclusion should + // be removed finalized_block_number = TEST_TARGET_BLOCK_NUMBER + DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION; process_finalized_block(&mut scraper, &finalized_block_number); diff --git a/node/core/dispute-coordinator/src/tests.rs b/node/core/dispute-coordinator/src/tests.rs index f2590aea1511..d0cf494d2d4d 100644 --- a/node/core/dispute-coordinator/src/tests.rs +++ b/node/core/dispute-coordinator/src/tests.rs @@ -734,8 +734,9 @@ fn too_many_unconfirmed_statements_are_considered_spam() { .await; // Participation has to fail here, otherwise the dispute will be confirmed. However - // participation won't happen at all because the dispute is neither backed, not confirmed - // nor the candidate is included. Or in other words - we'll refrain from participation. + // participation won't happen at all because the dispute is neither backed, not + // confirmed nor the candidate is included. Or in other words - we'll refrain from + // participation. { let (tx, rx) = oneshot::channel(); @@ -2050,7 +2051,8 @@ fn concluded_supermajority_against_non_active_after_time() { ImportStatementsResult::ValidImport => {} ); - // Use a different expected commitments hash to ensure the candidate validation returns invalid. + // Use a different expected commitments hash to ensure the candidate validation returns + // invalid. participation_with_distribution( &mut virtual_overseer, &candidate_hash, @@ -2351,7 +2353,8 @@ fn resume_dispute_with_local_statement() { assert_eq!(messages.len(), 1, "A message should have gone out."); - // Assert that subsystem is not sending Participation messages because we issued a local statement + // Assert that subsystem is not sending Participation messages because we issued a local + // statement assert!(virtual_overseer.recv().timeout(TEST_TIMEOUT).await.is_none()); virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; @@ -2445,7 +2448,8 @@ fn resume_dispute_without_local_statement_or_local_key() { Box::pin(async move { test_state.handle_resume_sync(&mut virtual_overseer, session).await; - // Assert that subsystem is not sending Participation messages because we issued a local statement + // Assert that subsystem is not sending Participation messages because we issued a + // local statement assert!(virtual_overseer.recv().timeout(TEST_TIMEOUT).await.is_none()); virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; @@ -2751,7 +2755,8 @@ fn redundant_votes_ignored() { } #[test] -/// Make sure no disputes are recorded when there are no opposing votes, even if we reached supermajority. +/// Make sure no disputes are recorded when there are no opposing votes, even if we reached +/// supermajority. fn no_onesided_disputes() { test_harness(|mut test_state, mut virtual_overseer| { Box::pin(async move { @@ -3124,16 +3129,17 @@ fn participation_requests_reprioritized_for_newly_included() { candidate_receipt.descriptor.pov_hash = Hash::from( [repetition; 32], // Altering this receipt so its hash will be changed ); - // Set consecutive parents (starting from zero). They will order the candidates for participation. + // Set consecutive parents (starting from zero). They will order the candidates for + // participation. let parent_block_num: BlockNumber = repetition as BlockNumber - 1; candidate_receipt.descriptor.relay_parent = test_state.block_num_to_header.get(&parent_block_num).unwrap().clone(); receipts.push(candidate_receipt.clone()); } - // Mark all candidates as backed, so their participation requests make it to best effort. - // These calls must all occur before including the candidates due to test overseer - // oddities. + // Mark all candidates as backed, so their participation requests make it to best + // effort. These calls must all occur before including the candidates due to test + // overseer oddities. let mut candidate_events = Vec::new(); for r in receipts.iter() { candidate_events.push(make_candidate_backed_event(r.clone())) @@ -3172,7 +3178,8 @@ fn participation_requests_reprioritized_for_newly_included() { .await; // Handle corresponding messages to unblock import - // we need to handle `ApprovalVotingMessage::GetApprovalSignaturesForCandidate` for import + // we need to handle `ApprovalVotingMessage::GetApprovalSignaturesForCandidate` for + // import handle_approval_vote_request( &mut virtual_overseer, &candidate_hash, @@ -3180,8 +3187,9 @@ fn participation_requests_reprioritized_for_newly_included() { ) .await; - // We'll trigger participation for the first `MAX_PARALLEL_PARTICIPATIONS` candidates. - // The rest will be queued => we need to handle `ChainApiMessage::BlockNumber` for them. + // We'll trigger participation for the first `MAX_PARALLEL_PARTICIPATIONS` + // candidates. The rest will be queued => we need to handle + // `ChainApiMessage::BlockNumber` for them. if idx >= crate::participation::MAX_PARALLEL_PARTICIPATIONS { // We send the `idx` as parent block number, because it is used for ordering. // This way we get predictable ordering and participation. @@ -3201,11 +3209,13 @@ fn participation_requests_reprioritized_for_newly_included() { ) .await; - // NB: The checks below are a bit racy. In theory candidate 2 can be processed even before candidate 0 and this is okay. If any - // of the asserts in the two functions after this comment fail -> rework `participation_with_distribution` to expect a set of + // NB: The checks below are a bit racy. In theory candidate 2 can be processed even + // before candidate 0 and this is okay. If any of the asserts in the two functions after + // this comment fail -> rework `participation_with_distribution` to expect a set of // commitment hashes instead of just one. - // This is the candidate for which participation was started initially (`MAX_PARALLEL_PARTICIPATIONS` threshold was not yet hit) + // This is the candidate for which participation was started initially + // (`MAX_PARALLEL_PARTICIPATIONS` threshold was not yet hit) participation_with_distribution( &mut virtual_overseer, &receipts.get(0).expect("There is more than one candidate").hash(), @@ -3326,7 +3336,8 @@ fn informs_chain_selection_when_dispute_concluded_against() { ImportStatementsResult::ValidImport => {} ); - // Use a different expected commitments hash to ensure the candidate validation returns invalid. + // Use a different expected commitments hash to ensure the candidate validation returns + // invalid. participation_with_distribution( &mut virtual_overseer, &candidate_hash, @@ -3440,7 +3451,8 @@ fn session_info_is_requested_only_once() { test_state.handle_resume_sync(&mut virtual_overseer, session).await; - // This leaf activation shouldn't fetch `SessionInfo` because the session is already cached + // This leaf activation shouldn't fetch `SessionInfo` because the session is already + // cached test_state .activate_leaf_at_session( &mut virtual_overseer, @@ -3475,8 +3487,8 @@ fn session_info_is_requested_only_once() { }); } -// Big jump means the new session we see with a leaf update is at least a `DISPUTE_WINDOW` bigger than -// the already known one. In this case The whole `DISPUTE_WINDOW` should be fetched. +// Big jump means the new session we see with a leaf update is at least a `DISPUTE_WINDOW` bigger +// than the already known one. In this case The whole `DISPUTE_WINDOW` should be fetched. #[test] fn session_info_big_jump_works() { test_harness(|mut test_state, mut virtual_overseer| { @@ -3485,7 +3497,8 @@ fn session_info_big_jump_works() { test_state.handle_resume_sync(&mut virtual_overseer, session_on_startup).await; - // This leaf activation shouldn't fetch `SessionInfo` because the session is already cached + // This leaf activation shouldn't fetch `SessionInfo` because the session is already + // cached test_state .activate_leaf_at_session( &mut virtual_overseer, @@ -3525,8 +3538,8 @@ fn session_info_big_jump_works() { }); } -// Small jump means the new session we see with a leaf update is at less than last known one + `DISPUTE_WINDOW`. In this -// case fetching should start from last known one + 1. +// Small jump means the new session we see with a leaf update is at less than last known one + +// `DISPUTE_WINDOW`. In this case fetching should start from last known one + 1. #[test] fn session_info_small_jump_works() { test_harness(|mut test_state, mut virtual_overseer| { @@ -3535,7 +3548,8 @@ fn session_info_small_jump_works() { test_state.handle_resume_sync(&mut virtual_overseer, session_on_startup).await; - // This leaf activation shouldn't fetch `SessionInfo` because the session is already cached + // This leaf activation shouldn't fetch `SessionInfo` because the session is already + // cached test_state .activate_leaf_at_session( &mut virtual_overseer, diff --git a/node/core/parachains-inherent/src/lib.rs b/node/core/parachains-inherent/src/lib.rs index f27481ee5a7d..3063147fb136 100644 --- a/node/core/parachains-inherent/src/lib.rs +++ b/node/core/parachains-inherent/src/lib.rs @@ -16,11 +16,12 @@ //! The parachain inherent data provider //! -//! Parachain backing and approval is an off-chain process, but the parachain needs to progress on chain as well. To -//! make it progress on chain a block producer needs to forward information about the state of a parachain to the -//! runtime. This information is forwarded through an inherent to the runtime. Here we provide the -//! [`ParachainInherentDataProvider`] that requests the relevant data from the provisioner subsystem and creates the -//! the inherent data that the runtime will use to create an inherent. +//! Parachain backing and approval is an off-chain process, but the parachain needs to progress on +//! chain as well. To make it progress on chain a block producer needs to forward information about +//! the state of a parachain to the runtime. This information is forwarded through an inherent to +//! the runtime. Here we provide the [`ParachainInherentDataProvider`] that requests the relevant +//! data from the provisioner subsystem and creates the the inherent data that the runtime will use +//! to create an inherent. #![deny(unused_crate_dependencies, unused_results)] diff --git a/node/core/provisioner/src/disputes/mod.rs b/node/core/provisioner/src/disputes/mod.rs index fab70a054698..2d8f6fb6e93b 100644 --- a/node/core/provisioner/src/disputes/mod.rs +++ b/node/core/provisioner/src/disputes/mod.rs @@ -14,7 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! The disputes module is responsible for selecting dispute votes to be sent with the inherent data. +//! The disputes module is responsible for selecting dispute votes to be sent with the inherent +//! data. use crate::LOG_TARGET; use futures::channel::oneshot; @@ -22,7 +23,8 @@ use polkadot_node_primitives::CandidateVotes; use polkadot_node_subsystem::{messages::DisputeCoordinatorMessage, overseer}; use polkadot_primitives::{CandidateHash, SessionIndex}; -/// Request the relevant dispute statements for a set of disputes identified by `CandidateHash` and the `SessionIndex`. +/// Request the relevant dispute statements for a set of disputes identified by `CandidateHash` and +/// the `SessionIndex`. async fn request_votes( sender: &mut impl overseer::ProvisionerSenderTrait, disputes_to_query: Vec<(SessionIndex, CandidateHash)>, diff --git a/node/core/provisioner/src/disputes/prioritized_selection/mod.rs b/node/core/provisioner/src/disputes/prioritized_selection/mod.rs index 5c8aaad422f2..096b73d271a8 100644 --- a/node/core/provisioner/src/disputes/prioritized_selection/mod.rs +++ b/node/core/provisioner/src/disputes/prioritized_selection/mod.rs @@ -48,7 +48,8 @@ pub const MAX_DISPUTE_VOTES_FORWARDED_TO_RUNTIME: usize = 200; /// Controls how much dispute votes to be fetched from the `dispute-coordinator` per iteration in /// `fn vote_selection`. The purpose is to fetch the votes in batches until /// `MAX_DISPUTE_VOTES_FORWARDED_TO_RUNTIME` is reached. If all votes are fetched in single call -/// we might fetch votes which we never use. This will create unnecessary load on `dispute-coordinator`. +/// we might fetch votes which we never use. This will create unnecessary load on +/// `dispute-coordinator`. /// /// This value should be less than `MAX_DISPUTE_VOTES_FORWARDED_TO_RUNTIME`. Increase it in case /// `provisioner` sends too many `QueryCandidateVotes` messages to `dispite-coordinator`. @@ -68,22 +69,23 @@ const VOTES_SELECTION_BATCH_SIZE: usize = 11; /// * Offchain vs Onchain /// * Concluded onchain vs Unconcluded onchain /// -/// Provisioner fetches all disputes from `dispute-coordinator` and separates them in multiple partitions. -/// Please refer to `struct PartitionedDisputes` for details about the actual partitions. -/// Each partition has got a priority implicitly assigned to it and the disputes are selected based on this -/// priority (e.g. disputes in partition 1, then if there is space - disputes from partition 2 and so on). +/// Provisioner fetches all disputes from `dispute-coordinator` and separates them in multiple +/// partitions. Please refer to `struct PartitionedDisputes` for details about the actual +/// partitions. Each partition has got a priority implicitly assigned to it and the disputes are +/// selected based on this priority (e.g. disputes in partition 1, then if there is space - disputes +/// from partition 2 and so on). /// /// # Votes selection /// -/// Besides the prioritization described above the votes in each partition are filtered too. Provisioner -/// fetches all onchain votes and filters them out from all partitions. As a result the Runtime receives -/// only fresh votes (votes it didn't know about). +/// Besides the prioritization described above the votes in each partition are filtered too. +/// Provisioner fetches all onchain votes and filters them out from all partitions. As a result the +/// Runtime receives only fresh votes (votes it didn't know about). /// /// # How the onchain votes are fetched /// -/// The logic outlined above relies on `RuntimeApiRequest::Disputes` message from the Runtime. The user -/// check the Runtime version before calling `select_disputes`. If the function is used with old runtime -/// an error is logged and the logic will continue with empty onchain votes `HashMap`. +/// The logic outlined above relies on `RuntimeApiRequest::Disputes` message from the Runtime. The +/// user check the Runtime version before calling `select_disputes`. If the function is used with +/// old runtime an error is logged and the logic will continue with empty onchain votes `HashMap`. pub async fn select_disputes( sender: &mut Sender, metrics: &metrics::Metrics, @@ -110,7 +112,8 @@ where r }, Err(GetOnchainDisputesError::NotSupported(runtime_api_err, relay_parent)) => { - // Runtime version is checked before calling this method, so the error below should never happen! + // Runtime version is checked before calling this method, so the error below should + // never happen! gum::error!( target: LOG_TARGET, ?runtime_api_err, @@ -152,7 +155,8 @@ where gum::trace!(target: LOG_TARGET, ?leaf, "Filtering recent disputes"); // Filter out unconfirmed disputes. However if the dispute is already onchain - don't skip it. - // In this case we'd better push as much fresh votes as possible to bring it to conclusion faster. + // In this case we'd better push as much fresh votes as possible to bring it to conclusion + // faster. let recent_disputes = recent_disputes .into_iter() .filter(|d| d.2.is_confirmed_concluded() || onchain.contains_key(&(d.0, d.1))) @@ -178,9 +182,9 @@ where make_multi_dispute_statement_set(metrics, result) } -/// Selects dispute votes from `PartitionedDisputes` which should be sent to the runtime. Votes which -/// are already onchain are filtered out. Result should be sorted by `(SessionIndex, CandidateHash)` -/// which is enforced by the `BTreeMap`. This is a requirement from the runtime. +/// Selects dispute votes from `PartitionedDisputes` which should be sent to the runtime. Votes +/// which are already onchain are filtered out. Result should be sorted by `(SessionIndex, +/// CandidateHash)` which is enforced by the `BTreeMap`. This is a requirement from the runtime. async fn vote_selection( sender: &mut Sender, partitioned: PartitionedDisputes, @@ -237,9 +241,9 @@ where for (session_index, candidate_hash, selected_votes) in votes { let votes_len = selected_votes.valid.raw().len() + selected_votes.invalid.len(); if votes_len + total_votes_len > MAX_DISPUTE_VOTES_FORWARDED_TO_RUNTIME { - // we are done - no more votes can be added. Importantly, we don't add any votes for a dispute here - // if we can't fit them all. This gives us an important invariant, that backing votes for - // disputes make it into the provisioned vote set. + // we are done - no more votes can be added. Importantly, we don't add any votes for + // a dispute here if we can't fit them all. This gives us an important invariant, + // that backing votes for disputes make it into the provisioned vote set. gum::trace!( target: LOG_TARGET, ?request_votes_counter, @@ -483,7 +487,8 @@ fn make_multi_dispute_statement_set( .collect() } -/// Gets the on-chain disputes at a given block number and returns them as a `HashMap` so that searching in them is cheap. +/// Gets the on-chain disputes at a given block number and returns them as a `HashMap` so that +/// searching in them is cheap. pub async fn get_onchain_disputes( sender: &mut Sender, relay_parent: Hash, diff --git a/node/core/provisioner/src/disputes/prioritized_selection/tests.rs b/node/core/provisioner/src/disputes/prioritized_selection/tests.rs index 4ae67e3b7968..7798ebe51aaf 100644 --- a/node/core/provisioner/src/disputes/prioritized_selection/tests.rs +++ b/node/core/provisioner/src/disputes/prioritized_selection/tests.rs @@ -237,21 +237,22 @@ fn partitioning_happy_case() { ); } -// This test verifies the double voting behavior. Currently we don't care if a supermajority is achieved with or -// without the 'help' of a double vote (a validator voting for and against at the same time). This makes the test -// a bit pointless but anyway I'm leaving it here to make this decision explicit and have the test code ready in -// case this behavior needs to be further tested in the future. -// Link to the PR with the discussions: https://github.com/paritytech/polkadot/pull/5567 +// This test verifies the double voting behavior. Currently we don't care if a supermajority is +// achieved with or without the 'help' of a double vote (a validator voting for and against at the +// same time). This makes the test a bit pointless but anyway I'm leaving it here to make this +// decision explicit and have the test code ready in case this behavior needs to be further tested +// in the future. Link to the PR with the discussions: https://github.com/paritytech/polkadot/pull/5567 #[test] fn partitioning_doubled_onchain_vote() { let mut input = Vec::<(SessionIndex, CandidateHash, DisputeStatus)>::new(); let mut onchain = HashMap::<(u32, CandidateHash), DisputeState>::new(); - // Dispute A relies on a 'double onchain vote' to conclude. Validator with index 0 has voted both `for` and `against`. - // Despite that this dispute should be considered 'can conclude onchain'. + // Dispute A relies on a 'double onchain vote' to conclude. Validator with index 0 has voted + // both `for` and `against`. Despite that this dispute should be considered 'can conclude + // onchain'. let dispute_a = (3, CandidateHash(Hash::random()), DisputeStatus::Active); - // Dispute B has supermajority + 1 votes, so the doubled onchain vote doesn't affect it. It should be considered - // as 'can conclude onchain'. + // Dispute B has supermajority + 1 votes, so the doubled onchain vote doesn't affect it. It + // should be considered as 'can conclude onchain'. let dispute_b = (4, CandidateHash(Hash::random()), DisputeStatus::Active); input.push(dispute_a.clone()); input.push(dispute_b.clone()); diff --git a/node/core/provisioner/src/error.rs b/node/core/provisioner/src/error.rs index 0f1747995843..5645ed2762bc 100644 --- a/node/core/provisioner/src/error.rs +++ b/node/core/provisioner/src/error.rs @@ -81,7 +81,8 @@ pub enum Error { OverseerExited(SubsystemError), } -/// Used by `get_onchain_disputes` to represent errors related to fetching on-chain disputes from the Runtime +/// Used by `get_onchain_disputes` to represent errors related to fetching on-chain disputes from +/// the Runtime #[allow(dead_code)] // Remove when promoting to stable #[fatality::fatality] pub enum GetOnchainDisputesError { diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index 3ae297fee736..b5073763dfab 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -466,11 +466,11 @@ async fn send_inherent_data( /// - not more than one per validator /// - each 1 bit must correspond to an occupied core /// -/// If we have too many, an arbitrary selection policy is fine. For purposes of maximizing availability, -/// we pick the one with the greatest number of 1 bits. +/// If we have too many, an arbitrary selection policy is fine. For purposes of maximizing +/// availability, we pick the one with the greatest number of 1 bits. /// -/// Note: This does not enforce any sorting precondition on the output; the ordering there will be unrelated -/// to the sorting of the input. +/// Note: This does not enforce any sorting precondition on the output; the ordering there will be +/// unrelated to the sorting of the input. fn select_availability_bitfields( cores: &[CoreState], bitfields: &[SignedAvailabilityBitfield], @@ -532,7 +532,8 @@ fn select_availability_bitfields( selected.into_values().collect() } -/// Determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core. +/// Determine which cores are free, and then to the degree possible, pick a candidate appropriate to +/// each free core. async fn select_candidates( availability_cores: &[CoreState], bitfields: &[SignedAvailabilityBitfield], @@ -593,7 +594,8 @@ async fn select_candidates( let computed_validation_data_hash = validation_data.hash(); - // we arbitrarily pick the first of the backed candidates which match the appropriate selection criteria + // we arbitrarily pick the first of the backed candidates which match the appropriate + // selection criteria if let Some(candidate) = candidates.iter().find(|backed_candidate| { let descriptor = &backed_candidate.descriptor; descriptor.para_id == scheduled_core.para_id && @@ -628,12 +630,12 @@ async fn select_candidates( gum::trace!(target: LOG_TARGET, leaf_hash=?relay_parent, "Got {} backed candidates", candidates.len()); - // `selected_candidates` is generated in ascending order by core index, and `GetBackedCandidates` - // _should_ preserve that property, but let's just make sure. + // `selected_candidates` is generated in ascending order by core index, and + // `GetBackedCandidates` _should_ preserve that property, but let's just make sure. // - // We can't easily map from `BackedCandidate` to `core_idx`, but we know that every selected candidate - // maps to either 0 or 1 backed candidate, and the hashes correspond. Therefore, by checking them - // in order, we can ensure that the backed candidates are also in order. + // We can't easily map from `BackedCandidate` to `core_idx`, but we know that every selected + // candidate maps to either 0 or 1 backed candidate, and the hashes correspond. Therefore, by + // checking them in order, we can ensure that the backed candidates are also in order. let mut backed_idx = 0; for selected in selected_candidates { if selected == @@ -705,8 +707,9 @@ fn bitfields_indicate_availability( let validator_idx = bitfield.validator_index().0 as usize; match availability.get_mut(validator_idx) { None => { - // in principle, this function might return a `Result` so that we can more clearly express this error condition - // however, in practice, that would just push off an error-handling routine which would look a whole lot like this one. + // in principle, this function might return a `Result` so that we can + // more clearly express this error condition however, in practice, that would just + // push off an error-handling routine which would look a whole lot like this one. // simpler to just handle the error internally here. gum::warn!( target: LOG_TARGET, @@ -726,8 +729,8 @@ fn bitfields_indicate_availability( 3 * availability.count_ones() >= 2 * availability.len() } -// If we have to be absolutely precise here, this method gets the version of the `ParachainHost` api. -// For brevity we'll just call it 'runtime version'. +// If we have to be absolutely precise here, this method gets the version of the `ParachainHost` +// api. For brevity we'll just call it 'runtime version'. async fn has_required_runtime( sender: &mut impl overseer::ProvisionerSenderTrait, relay_parent: Hash, diff --git a/node/core/provisioner/src/metrics.rs b/node/core/provisioner/src/metrics.rs index c65d999d04a7..fabbd798cf02 100644 --- a/node/core/provisioner/src/metrics.rs +++ b/node/core/provisioner/src/metrics.rs @@ -28,9 +28,10 @@ struct MetricsInner { /// Bitfields array length in `ProvisionerInherentData` (the result for `RequestInherentData`) inherent_data_response_bitfields: prometheus::Histogram, - /// The following metrics track how many disputes/votes the runtime will have to process. These will count - /// all recent statements meaning every dispute from last sessions: 10 min on Rococo, 60 min on Kusama and - /// 4 hours on Polkadot. The metrics are updated only when the node authors a block, so values vary across nodes. + /// The following metrics track how many disputes/votes the runtime will have to process. These + /// will count all recent statements meaning every dispute from last sessions: 10 min on + /// Rococo, 60 min on Kusama and 4 hours on Polkadot. The metrics are updated only when the + /// node authors a block, so values vary across nodes. inherent_data_dispute_statement_sets: prometheus::Counter, inherent_data_dispute_statements: prometheus::CounterVec, diff --git a/node/core/provisioner/src/tests.rs b/node/core/provisioner/src/tests.rs index e8692df8543a..4a469a43c893 100644 --- a/node/core/provisioner/src/tests.rs +++ b/node/core/provisioner/src/tests.rs @@ -90,7 +90,8 @@ mod select_availability_bitfields { let cores = vec![occupied_core(0), occupied_core(1)]; // we pass in three bitfields with two validators - // this helps us check the postcondition that we get two bitfields back, for which the validators differ + // this helps us check the postcondition that we get two bitfields back, for which the + // validators differ let bitfields = vec![ signed_bitfield(&keystore, bitvec.clone(), ValidatorIndex(0)), signed_bitfield(&keystore, bitvec.clone(), ValidatorIndex(1)), diff --git a/node/core/pvf-checker/src/lib.rs b/node/core/pvf-checker/src/lib.rs index 222e85e36542..2946f3f78861 100644 --- a/node/core/pvf-checker/src/lib.rs +++ b/node/core/pvf-checker/src/lib.rs @@ -110,8 +110,8 @@ struct State { /// /// Here are some fun facts about these futures: /// - /// - Pre-checking can take quite some time, in the matter of tens of seconds, so the futures here - /// can soak for quite some time. + /// - Pre-checking can take quite some time, in the matter of tens of seconds, so the futures + /// here can soak for quite some time. /// - Pre-checking of one PVF can take drastically more time than pre-checking of another PVF. /// This leads to results coming out of order. /// diff --git a/node/core/pvf-checker/src/tests.rs b/node/core/pvf-checker/src/tests.rs index 46e760936144..b223b1b54c0b 100644 --- a/node/core/pvf-checker/src/tests.rs +++ b/node/core/pvf-checker/src/tests.rs @@ -110,8 +110,8 @@ impl TestState { Self { leaves, sessions, last_session_index } } - /// A convenience function to receive a message from the overseer and returning `None` if nothing - /// was received within a reasonable (for local tests anyway) timeout. + /// A convenience function to receive a message from the overseer and returning `None` if + /// nothing was received within a reasonable (for local tests anyway) timeout. async fn recv_timeout(&mut self, handle: &mut VirtualOverseer) -> Option { futures::select! { msg = handle.recv().fuse() => { diff --git a/node/core/pvf/common/src/error.rs b/node/core/pvf/common/src/error.rs index 64d17800ac10..6eb0d9b7df42 100644 --- a/node/core/pvf/common/src/error.rs +++ b/node/core/pvf/common/src/error.rs @@ -18,8 +18,8 @@ use crate::prepare::PrepareStats; use parity_scale_codec::{Decode, Encode}; use std::fmt; -/// Result of PVF preparation performed by the validation host. Contains stats about the preparation if -/// successful +/// Result of PVF preparation performed by the validation host. Contains stats about the preparation +/// if successful pub type PrepareResult = Result; /// An error that occurred during the prepare part of the PVF pipeline. @@ -35,13 +35,15 @@ pub enum PrepareError { Panic(String), /// Failed to prepare the PVF due to the time limit. TimedOut, - /// An IO error occurred. This state is reported by either the validation host or by the worker. + /// An IO error occurred. This state is reported by either the validation host or by the + /// worker. IoErr(String), - /// The temporary file for the artifact could not be created at the given cache path. This state is reported by the - /// validation host (not by the worker). + /// The temporary file for the artifact could not be created at the given cache path. This + /// state is reported by the validation host (not by the worker). CreateTmpFileErr(String), - /// The response from the worker is received, but the file cannot be renamed (moved) to the final destination - /// location. This state is reported by the validation host (not by the worker). + /// The response from the worker is received, but the file cannot be renamed (moved) to the + /// final destination location. This state is reported by the validation host (not by the + /// worker). RenameTmpFileErr(String), } @@ -81,15 +83,16 @@ impl fmt::Display for PrepareError { /// Some internal error occurred. /// -/// Should only ever be used for validation errors independent of the candidate and PVF, or for errors we ruled out -/// during pre-checking (so preparation errors are fine). +/// Should only ever be used for validation errors independent of the candidate and PVF, or for +/// errors we ruled out during pre-checking (so preparation errors are fine). #[derive(Debug, Clone, Encode, Decode)] pub enum InternalValidationError { /// Some communication error occurred with the host. HostCommunication(String), /// Could not find or open compiled artifact file. CouldNotOpenFile(String), - /// An error occurred in the CPU time monitor thread. Should be totally unrelated to validation. + /// An error occurred in the CPU time monitor thread. Should be totally unrelated to + /// validation. CpuTimeMonitorThread(String), /// Some non-deterministic preparation error occurred. NonDeterministicPrepareError(PrepareError), diff --git a/node/core/pvf/common/src/executor_intf.rs b/node/core/pvf/common/src/executor_intf.rs index ef74e5f2ca92..42ed4b79c761 100644 --- a/node/core/pvf/common/src/executor_intf.rs +++ b/node/core/pvf/common/src/executor_intf.rs @@ -35,10 +35,10 @@ use std::any::{Any, TypeId}; // left for the stack; this is, of course, overridable at link time when compiling the runtime) // plus the number of pages specified in the `extra_heap_pages` passed to the executor. // -// By default, rustc (or `lld` specifically) should allocate 1 MiB for the shadow stack, or 16 pages. -// The data section for runtimes are typically rather small and can fit in a single digit number of -// WASM pages, so let's say an extra 16 pages. Thus let's assume that 32 pages or 2 MiB are used for -// these needs by default. +// By default, rustc (or `lld` specifically) should allocate 1 MiB for the shadow stack, or 16 +// pages. The data section for runtimes are typically rather small and can fit in a single digit +// number of WASM pages, so let's say an extra 16 pages. Thus let's assume that 32 pages or 2 MiB +// are used for these needs by default. const DEFAULT_HEAP_PAGES_ESTIMATE: u32 = 32; const EXTRA_HEAP_PAGES: u32 = 2048; @@ -65,9 +65,9 @@ pub const DEFAULT_CONFIG: Config = Config { // // Here is how the values below were chosen. // - // At the moment of writing, the default native stack size limit is 1 MiB. Assuming a logical item - // (see the docs about the field and the instrumentation algorithm) is 8 bytes, 1 MiB can - // fit 2x 65536 logical items. + // At the moment of writing, the default native stack size limit is 1 MiB. Assuming a + // logical item (see the docs about the field and the instrumentation algorithm) is 8 bytes, + // 1 MiB can fit 2x 65536 logical items. // // Since reaching the native stack limit is undesirable, we halve the logical item limit and // also increase the native 256x. This hopefully should preclude wasm code from reaching @@ -113,7 +113,7 @@ pub fn params_to_wasmtime_semantics(par: &ExecutorParams) -> Result sem.wasm_bulk_memory = true, // TODO: Not implemented yet; . ExecutorParam::PrecheckingMaxMemory(_) => (), - ExecutorParam::PvfPrepTimeout(_, _) | ExecutorParam::PvfExecTimeout(_, _) => (), // Not used here + ExecutorParam::PvfPrepTimeout(_, _) | ExecutorParam::PvfExecTimeout(_, _) => (), /* Not used here */ } } sem.deterministic_stack_limit = Some(stack_limit); @@ -135,8 +135,8 @@ impl Executor { Ok(Self { config }) } - /// Executes the given PVF in the form of a compiled artifact and returns the result of execution - /// upon success. + /// Executes the given PVF in the form of a compiled artifact and returns the result of + /// execution upon success. /// /// # Safety /// diff --git a/node/core/pvf/common/src/worker/mod.rs b/node/core/pvf/common/src/worker/mod.rs index 8dd99fc762d8..d9a0dff71b24 100644 --- a/node/core/pvf/common/src/worker/mod.rs +++ b/node/core/pvf/common/src/worker/mod.rs @@ -251,9 +251,9 @@ pub mod thread { Arc::new((Mutex::new(WaitOutcome::Pending), Condvar::new())) } - /// Runs a worker thread. Will first enable security features, and afterwards notify the threads waiting on the - /// condvar. Catches panics during execution and resumes the panics after triggering the condvar, so that the - /// waiting thread is notified on panics. + /// Runs a worker thread. Will first enable security features, and afterwards notify the threads + /// waiting on the condvar. Catches panics during execution and resumes the panics after + /// triggering the condvar, so that the waiting thread is notified on panics. /// /// # Returns /// diff --git a/node/core/pvf/execute-worker/src/lib.rs b/node/core/pvf/execute-worker/src/lib.rs index c6ee515f9093..6f632a0ae95e 100644 --- a/node/core/pvf/execute-worker/src/lib.rs +++ b/node/core/pvf/execute-worker/src/lib.rs @@ -239,7 +239,8 @@ pub fn worker_entrypoint( WaitOutcome::TimedOut => { match cpu_time_monitor_thread.join() { Ok(Some(cpu_time_elapsed)) => { - // Log if we exceed the timeout and the other thread hasn't finished. + // Log if we exceed the timeout and the other thread hasn't + // finished. gum::warn!( target: LOG_TARGET, %worker_pid, diff --git a/node/core/pvf/prepare-worker/src/lib.rs b/node/core/pvf/prepare-worker/src/lib.rs index 3f60163c6196..caa7d33df12a 100644 --- a/node/core/pvf/prepare-worker/src/lib.rs +++ b/node/core/pvf/prepare-worker/src/lib.rs @@ -190,8 +190,9 @@ pub fn worker_entrypoint( // If we are pre-checking, check for runtime construction errors. // - // As pre-checking is more strict than just preparation in terms of memory and - // time, it is okay to do extra checks here. This takes negligible time anyway. + // As pre-checking is more strict than just preparation in terms of memory + // and time, it is okay to do extra checks here. This takes negligible time + // anyway. if let PrepareJobKind::Prechecking = prepare_job_kind { result = result.and_then(|output| { runtime_construction_check(output.0.as_ref(), executor_params)?; @@ -253,10 +254,11 @@ pub fn worker_entrypoint( // Write the serialized artifact into a temp file. // - // PVF host only keeps artifacts statuses in its memory, successfully - // compiled code gets stored on the disk (and consequently deserialized - // by execute-workers). The prepare worker is only required to send `Ok` - // to the pool to indicate the success. + // PVF host only keeps artifacts statuses in its memory, + // successfully compiled code gets stored on the disk (and + // consequently deserialized by execute-workers). The prepare worker + // is only required to send `Ok` to the pool to indicate the + // success. gum::debug!( target: LOG_TARGET, @@ -275,7 +277,8 @@ pub fn worker_entrypoint( WaitOutcome::TimedOut => { match cpu_time_monitor_thread.join() { Ok(Some(cpu_time_elapsed)) => { - // Log if we exceed the timeout and the other thread hasn't finished. + // Log if we exceed the timeout and the other thread hasn't + // finished. gum::warn!( target: LOG_TARGET, %worker_pid, diff --git a/node/core/pvf/prepare-worker/src/memory_stats.rs b/node/core/pvf/prepare-worker/src/memory_stats.rs index e6dc8572c4a3..7904dfa9cb88 100644 --- a/node/core/pvf/prepare-worker/src/memory_stats.rs +++ b/node/core/pvf/prepare-worker/src/memory_stats.rs @@ -83,8 +83,8 @@ pub mod memory_tracker { /// /// # Errors /// - /// For simplicity, any errors are returned as a string. As this is not a critical component, errors - /// are used for informational purposes (logging) only. + /// For simplicity, any errors are returned as a string. As this is not a critical component, + /// errors are used for informational purposes (logging) only. pub fn memory_tracker_loop(condvar: thread::Cond) -> Result { // NOTE: This doesn't need to be too fine-grained since preparation currently takes 3-10s or // more. Apart from that, there is not really a science to this number. diff --git a/node/core/pvf/src/artifacts.rs b/node/core/pvf/src/artifacts.rs index 78d2f88941b8..a180af15db27 100644 --- a/node/core/pvf/src/artifacts.rs +++ b/node/core/pvf/src/artifacts.rs @@ -224,7 +224,8 @@ impl Artifacts { .is_none()); } - /// Remove and retrieve the artifacts from the table that are older than the supplied Time-To-Live. + /// Remove and retrieve the artifacts from the table that are older than the supplied + /// Time-To-Live. pub fn prune(&mut self, artifact_ttl: Duration) -> Vec { let now = SystemTime::now(); diff --git a/node/core/pvf/src/error.rs b/node/core/pvf/src/error.rs index 7372cd233c49..cb35ec9e9d9a 100644 --- a/node/core/pvf/src/error.rs +++ b/node/core/pvf/src/error.rs @@ -38,29 +38,30 @@ pub enum InvalidCandidate { /// The worker has died during validation of a candidate. That may fall in one of the following /// categories, which we cannot distinguish programmatically: /// - /// (a) Some sort of transient glitch caused the worker process to abort. An example would be that - /// the host machine ran out of free memory and the OOM killer started killing the processes, - /// and in order to save the parent it will "sacrifice child" first. + /// (a) Some sort of transient glitch caused the worker process to abort. An example would be + /// that the host machine ran out of free memory and the OOM killer started killing the + /// processes, and in order to save the parent it will "sacrifice child" first. /// /// (b) The candidate triggered a code path that has lead to the process death. For example, - /// the PVF found a way to consume unbounded amount of resources and then it either exceeded - /// an `rlimit` (if set) or, again, invited OOM killer. Another possibility is a bug in - /// wasmtime allowed the PVF to gain control over the execution worker. + /// the PVF found a way to consume unbounded amount of resources and then it either + /// exceeded an `rlimit` (if set) or, again, invited OOM killer. Another possibility is a + /// bug in wasmtime allowed the PVF to gain control over the execution worker. /// /// We attribute such an event to an *invalid candidate* in either case. /// /// The rationale for this is that a glitch may lead to unfair rejecting candidate by a single - /// validator. If the glitch is somewhat more persistent the validator will reject all candidate - /// thrown at it and hopefully the operator notices it by decreased reward performance of the - /// validator. On the other hand, if the worker died because of (b) we would have better chances - /// to stop the attack. + /// validator. If the glitch is somewhat more persistent the validator will reject all + /// candidate thrown at it and hopefully the operator notices it by decreased reward + /// performance of the validator. On the other hand, if the worker died because of (b) we would + /// have better chances to stop the attack. AmbiguousWorkerDeath, /// PVF execution (compilation is not included) took more time than was allotted. HardTimeout, - /// A panic occurred and we can't be sure whether the candidate is really invalid or some internal glitch occurred. - /// Whenever we are unsure, we can never treat an error as internal as we would abstain from voting. This is bad - /// because if the issue was due to the candidate, then all validators would abstain, stalling finality on the - /// chain. So we will first retry the candidate, and if the issue persists we are forced to vote invalid. + /// A panic occurred and we can't be sure whether the candidate is really invalid or some + /// internal glitch occurred. Whenever we are unsure, we can never treat an error as internal + /// as we would abstain from voting. This is bad because if the issue was due to the candidate, + /// then all validators would abstain, stalling finality on the chain. So we will first retry + /// the candidate, and if the issue persists we are forced to vote invalid. Panic(String), } diff --git a/node/core/pvf/src/execute/queue.rs b/node/core/pvf/src/execute/queue.rs index 33a1c6f89709..acb260e25693 100644 --- a/node/core/pvf/src/execute/queue.rs +++ b/node/core/pvf/src/execute/queue.rs @@ -419,7 +419,8 @@ fn spawn_extra_worker(queue: &mut Queue, job: ExecuteJob) { /// beforehand. In such a way, a race condition is avoided: during the worker being spawned, /// another job in the queue, with an incompatible execution environment, may become stale, and /// the queue would have to kill a newly started worker and spawn another one. -/// Nevertheless, if the worker finishes executing the job, it becomes idle and may be used to execute other jobs with a compatible execution environment. +/// Nevertheless, if the worker finishes executing the job, it becomes idle and may be used to +/// execute other jobs with a compatible execution environment. async fn spawn_worker_task( program_path: PathBuf, job: ExecuteJob, diff --git a/node/core/pvf/src/execute/worker_intf.rs b/node/core/pvf/src/execute/worker_intf.rs index 9d8b61d10447..948abd2261d7 100644 --- a/node/core/pvf/src/execute/worker_intf.rs +++ b/node/core/pvf/src/execute/worker_intf.rs @@ -74,8 +74,9 @@ pub enum Outcome { /// PVF execution completed successfully and the result is returned. The worker is ready for /// another job. Ok { result_descriptor: ValidationResult, duration: Duration, idle_worker: IdleWorker }, - /// The candidate validation failed. It may be for example because the wasm execution triggered a trap. - /// Errors related to the preparation process are not expected to be encountered by the execution workers. + /// The candidate validation failed. It may be for example because the wasm execution triggered + /// a trap. Errors related to the preparation process are not expected to be encountered by the + /// execution workers. InvalidCandidate { err: String, idle_worker: IdleWorker }, /// An internal error happened during the validation. Such an error is most likely related to /// some transient glitch. @@ -95,7 +96,8 @@ pub enum Outcome { /// Given the idle token of a worker and parameters of work, communicates with the worker and /// returns the outcome. /// -/// NOTE: Not returning the idle worker token in `Outcome` will trigger the child process being killed. +/// NOTE: Not returning the idle worker token in `Outcome` will trigger the child process being +/// killed. pub async fn start_work( worker: IdleWorker, artifact: ArtifactPathId, diff --git a/node/core/pvf/src/host.rs b/node/core/pvf/src/host.rs index a5772e34e16e..9f3b7e23fd89 100644 --- a/node/core/pvf/src/host.rs +++ b/node/core/pvf/src/host.rs @@ -455,8 +455,8 @@ async fn handle_precheck_pvf( ArtifactState::Preparing { waiting_for_response, num_failures: _ } => waiting_for_response.push(result_sender), ArtifactState::FailedToProcess { error, .. } => { - // Do not retry failed preparation if another pre-check request comes in. We do not retry pre-checking, - // anyway. + // Do not retry failed preparation if another pre-check request comes in. We do not + // retry pre-checking, anyway. let _ = result_sender.send(PrepareResult::Err(error.clone())); }, } @@ -470,8 +470,8 @@ async fn handle_precheck_pvf( /// Handles PVF execution. /// -/// This will try to prepare the PVF, if a prepared artifact does not already exist. If there is already a -/// preparation job, we coalesce the two preparation jobs. +/// This will try to prepare the PVF, if a prepared artifact does not already exist. If there is +/// already a preparation job, we coalesce the two preparation jobs. /// /// If the prepare job succeeded previously, we will enqueue an execute job right away. /// @@ -521,7 +521,8 @@ async fn handle_execute_pvf( "handle_execute_pvf: Re-queuing PVF preparation for prepared artifact with missing file." ); - // The artifact has been prepared previously but the file is missing, prepare it again. + // The artifact has been prepared previously but the file is missing, prepare it + // again. *state = ArtifactState::Preparing { waiting_for_response: Vec::new(), num_failures: 0, @@ -721,8 +722,8 @@ async fn handle_prepare_done( pending_requests { if result_tx.is_canceled() { - // Preparation could've taken quite a bit of time and the requester may be not interested - // in execution anymore, in which case we just skip the request. + // Preparation could've taken quite a bit of time and the requester may be not + // interested in execution anymore, in which case we just skip the request. continue } @@ -855,8 +856,8 @@ fn can_retry_prepare_after_failure( return false } - // Retry if the retry cooldown has elapsed and if we have already retried less than `NUM_PREPARE_RETRIES` times. IO - // errors may resolve themselves. + // Retry if the retry cooldown has elapsed and if we have already retried less than + // `NUM_PREPARE_RETRIES` times. IO errors may resolve themselves. SystemTime::now() >= last_time_failed + PREPARE_FAILURE_COOLDOWN && num_failures <= NUM_PREPARE_RETRIES } diff --git a/node/core/pvf/src/lib.rs b/node/core/pvf/src/lib.rs index eb6ab39ac500..1da0593835fb 100644 --- a/node/core/pvf/src/lib.rs +++ b/node/core/pvf/src/lib.rs @@ -32,26 +32,26 @@ //! (a) PVF pre-checking. This takes the `Pvf` code and tries to prepare it (verify and //! compile) in order to pre-check its validity. //! -//! (b) PVF execution. This accepts the PVF [`params`][`polkadot_parachain::primitives::ValidationParams`] -//! and the `Pvf` code, prepares (verifies and compiles) the code, and then executes PVF -//! with the `params`. +//! (b) PVF execution. This accepts the PVF +//! [`params`][`polkadot_parachain::primitives::ValidationParams`] and the `Pvf` code, prepares +//! (verifies and compiles) the code, and then executes PVF with the `params`. //! //! (c) Heads up. This request allows to signal that the given PVF may be needed soon and that it //! should be prepared for execution. //! -//! The preparation results are cached for some time after they either used or was signaled in heads up. -//! All requests that depends on preparation of the same PVF are bundled together and will be executed -//! as soon as the artifact is prepared. +//! The preparation results are cached for some time after they either used or was signaled in heads +//! up. All requests that depends on preparation of the same PVF are bundled together and will be +//! executed as soon as the artifact is prepared. //! //! # Priority //! -//! PVF execution requests can specify the [priority][`Priority`] with which the given request should -//! be handled. Different priority levels have different effects. This is discussed below. +//! PVF execution requests can specify the [priority][`Priority`] with which the given request +//! should be handled. Different priority levels have different effects. This is discussed below. //! //! Preparation started by a heads up signal always starts with the background priority. If there -//! is already a request for that PVF preparation under way the priority is inherited. If after heads -//! up, a new PVF execution request comes in with a higher priority, then the original task's priority -//! will be adjusted to match the new one if it's larger. +//! is already a request for that PVF preparation under way the priority is inherited. If after +//! heads up, a new PVF execution request comes in with a higher priority, then the original task's +//! priority will be adjusted to match the new one if it's larger. //! //! Priority can never go down, only up. //! @@ -63,11 +63,11 @@ //! dissimilar to actors. Each of such "processes" is a future task that contains an event loop that //! processes incoming messages, potentially delegating sub-tasks to other "processes". //! -//! Two of these processes are queues. The first one is for preparation jobs and the second one is for -//! execution. Both of the queues are backed by separate pools of workers of different kind. +//! Two of these processes are queues. The first one is for preparation jobs and the second one is +//! for execution. Both of the queues are backed by separate pools of workers of different kind. //! -//! Preparation workers handle preparation requests by prevalidating and instrumenting PVF wasm code, -//! and then passing it into the compiler, to prepare the artifact. +//! Preparation workers handle preparation requests by prevalidating and instrumenting PVF wasm +//! code, and then passing it into the compiler, to prepare the artifact. //! //! ## Artifacts //! diff --git a/node/core/pvf/src/metrics.rs b/node/core/pvf/src/metrics.rs index 62f8c6dc5157..3d792793498b 100644 --- a/node/core/pvf/src/metrics.rs +++ b/node/core/pvf/src/metrics.rs @@ -85,7 +85,8 @@ impl Metrics { #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] if let Some(tracker_stats) = memory_stats.memory_tracker_stats { - // We convert these stats from B to KB to match the unit of `ru_maxrss` from `getrusage`. + // We convert these stats from B to KB to match the unit of `ru_maxrss` from + // `getrusage`. let max_resident_kb = (tracker_stats.resident / 1024) as f64; let max_allocated_kb = (tracker_stats.allocated / 1024) as f64; diff --git a/node/core/pvf/src/prepare/pool.rs b/node/core/pvf/src/prepare/pool.rs index 1e8ccc7365bf..92aa4896c263 100644 --- a/node/core/pvf/src/prepare/pool.rs +++ b/node/core/pvf/src/prepare/pool.rs @@ -61,9 +61,9 @@ pub enum ToPool { /// Request the given worker to start working on the given code. /// - /// Once the job either succeeded or failed, a [`FromPool::Concluded`] message will be sent back. - /// It's also possible that the worker dies before handling the message in which case [`FromPool::Rip`] - /// will be sent back. + /// Once the job either succeeded or failed, a [`FromPool::Concluded`] message will be sent + /// back. It's also possible that the worker dies before handling the message in which case + /// [`FromPool::Rip`] will be sent back. /// /// In either case, the worker is considered busy and no further `StartWork` messages should be /// sent until either `Concluded` or `Rip` message is received. @@ -237,8 +237,8 @@ fn handle_to_pool( ); } else { // idle token is present after spawn and after a job is concluded; - // the precondition for `StartWork` is it should be sent only if all previous work - // items concluded; + // the precondition for `StartWork` is it should be sent only if all previous + // work items concluded; // thus idle token is Some; // qed. never!("unexpected absence of the idle token in prepare pool"); @@ -311,7 +311,8 @@ fn handle_mux( match outcome { Outcome::Concluded { worker: idle, result } => handle_concluded_no_rip(from_pool, spawned, worker, idle, result), - // Return `Concluded`, but do not kill the worker since the error was on the host side. + // Return `Concluded`, but do not kill the worker since the error was on the host + // side. Outcome::CreateTmpFileErr { worker: idle, err } => handle_concluded_no_rip( from_pool, spawned, @@ -319,7 +320,8 @@ fn handle_mux( idle, Err(PrepareError::CreateTmpFileErr(err)), ), - // Return `Concluded`, but do not kill the worker since the error was on the host side. + // Return `Concluded`, but do not kill the worker since the error was on the host + // side. Outcome::RenameTmpFileErr { worker: idle, result: _, err } => handle_concluded_no_rip( from_pool, diff --git a/node/core/pvf/src/prepare/queue.rs b/node/core/pvf/src/prepare/queue.rs index 5e19a4c7217a..c38012d74548 100644 --- a/node/core/pvf/src/prepare/queue.rs +++ b/node/core/pvf/src/prepare/queue.rs @@ -96,8 +96,9 @@ impl WorkerData { } } -/// A queue structured like this is prone to starving, however, we don't care that much since we expect -/// there is going to be a limited number of critical jobs and we don't really care if background starve. +/// A queue structured like this is prone to starving, however, we don't care that much since we +/// expect there is going to be a limited number of critical jobs and we don't really care if +/// background starve. #[derive(Default)] struct Unscheduled { normal: VecDeque, diff --git a/node/core/pvf/src/prepare/worker_intf.rs b/node/core/pvf/src/prepare/worker_intf.rs index d0d9a026dda7..5280ab6b42a2 100644 --- a/node/core/pvf/src/prepare/worker_intf.rs +++ b/node/core/pvf/src/prepare/worker_intf.rs @@ -247,8 +247,8 @@ where let outcome = f(tmp_file.clone(), stream).await; - // The function called above is expected to move `tmp_file` to a new location upon success. However, - // the function may as well fail and in that case we should remove the tmp file here. + // The function called above is expected to move `tmp_file` to a new location upon success. + // However, the function may as well fail and in that case we should remove the tmp file here. // // In any case, we try to remove the file here so that there are no leftovers. We only report // errors that are different from the `NotFound`. diff --git a/node/core/pvf/src/worker_intf.rs b/node/core/pvf/src/worker_intf.rs index ef5733ec0e6d..795ad4524443 100644 --- a/node/core/pvf/src/worker_intf.rs +++ b/node/core/pvf/src/worker_intf.rs @@ -196,13 +196,15 @@ pub enum SpawnErr { Handshake, } -/// This is a representation of a potentially running worker. Drop it and the process will be killed. +/// This is a representation of a potentially running worker. Drop it and the process will be +/// killed. /// /// A worker's handle is also a future that resolves when it's detected that the worker's process /// has been terminated. Since the worker is running in another process it is obviously not /// necessary to poll this future to make the worker run, it's only for termination detection. /// -/// This future relies on the fact that a child process's stdout `fd` is closed upon it's termination. +/// This future relies on the fact that a child process's stdout `fd` is closed upon it's +/// termination. #[pin_project] pub struct WorkerHandle { child: process::Child, @@ -240,15 +242,15 @@ impl WorkerHandle { child_id, stdout, program: program.as_ref().to_path_buf(), - // We don't expect the bytes to be ever read. But in case we do, we should not use a buffer - // of a small size, because otherwise if the child process does return any data we will end up - // issuing a syscall for each byte. We also prefer not to do allocate that on the stack, since - // each poll the buffer will be allocated and initialized (and that's due `poll_read` takes &mut [u8] - // and there are no guarantees that a `poll_read` won't ever read from there even though that's - // unlikely). + // We don't expect the bytes to be ever read. But in case we do, we should not use a + // buffer of a small size, because otherwise if the child process does return any data + // we will end up issuing a syscall for each byte. We also prefer not to do allocate + // that on the stack, since each poll the buffer will be allocated and initialized (and + // that's due `poll_read` takes &mut [u8] and there are no guarantees that a `poll_read` + // won't ever read from there even though that's unlikely). // - // OTOH, we also don't want to be super smart here and we could just afford to allocate a buffer - // for that here. + // OTOH, we also don't want to be super smart here and we could just afford to allocate + // a buffer for that here. drop_box: vec![0; 8192].into_boxed_slice(), }) } @@ -280,8 +282,8 @@ impl futures::Future for WorkerHandle { } }, Err(err) => { - // The implementation is guaranteed to not to return `WouldBlock` and Interrupted. This - // leaves us with legit errors which we suppose were due to termination. + // The implementation is guaranteed to not to return `WouldBlock` and Interrupted. + // This leaves us with legit errors which we suppose were due to termination. // Log the status code. gum::debug!( diff --git a/node/core/runtime-api/src/lib.rs b/node/core/runtime-api/src/lib.rs index 252bb21b0edb..0ee5ca24ceee 100644 --- a/node/core/runtime-api/src/lib.rs +++ b/node/core/runtime-api/src/lib.rs @@ -321,7 +321,8 @@ where return futures::pending!() } - // If there are active requests, this will always resolve to `Some(_)` when a request is finished. + // If there are active requests, this will always resolve to `Some(_)` when a request is + // finished. if let Some(Ok(Some(result))) = self.active_requests.next().await { self.store_cache(result); } @@ -343,10 +344,10 @@ where { loop { // Let's add some back pressure when the subsystem is running at `MAX_PARALLEL_REQUESTS`. - // This can never block forever, because `active_requests` is owned by this task and any mutations - // happen either in `poll_requests` or `spawn_request` - so if `is_busy` returns true, then - // even if all of the requests finish before us calling `poll_requests` the `active_requests` length - // remains invariant. + // This can never block forever, because `active_requests` is owned by this task and any + // mutations happen either in `poll_requests` or `spawn_request` - so if `is_busy` returns + // true, then even if all of the requests finish before us calling `poll_requests` the + // `active_requests` length remains invariant. if subsystem.is_busy() { // Since we are not using any internal waiting queues, we need to wait for exactly // one request to complete before we can read the next one from the overseer channel. diff --git a/node/core/runtime-api/src/tests.rs b/node/core/runtime-api/src/tests.rs index 27090a102ec2..33f5eef3869f 100644 --- a/node/core/runtime-api/src/tests.rs +++ b/node/core/runtime-api/src/tests.rs @@ -895,7 +895,8 @@ fn multiple_requests_in_parallel_are_working() { receivers.push(rx); } - // The backpressure from reaching `MAX_PARALLEL_REQUESTS` will make the test block, we need to drop the lock. + // The backpressure from reaching `MAX_PARALLEL_REQUESTS` will make the test block, we need + // to drop the lock. drop(lock); for _ in 0..MAX_PARALLEL_REQUESTS * 100 { diff --git a/node/gum/src/lib.rs b/node/gum/src/lib.rs index e989a15ae4e3..1cc4d8dec1cb 100644 --- a/node/gum/src/lib.rs +++ b/node/gum/src/lib.rs @@ -67,14 +67,13 @@ //! //! Here's the rundown on how fields work: //! -//! - Fields on spans and events are specified using the `syntax field_name = -//! field_value`. -//! - Local variables may be used as field values without an assignment, similar to -//! struct initializers. -//! - The `?` sigil is shorthand that specifies a field should be recorded using its -//! `fmt::Debug` implementation. -//! - The `%` sigil operates similarly, but indicates that the value should be -//! recorded using its `fmt::Display` implementation. +//! - Fields on spans and events are specified using the `syntax field_name = field_value`. +//! - Local variables may be used as field values without an assignment, similar to struct +//! initializers. +//! - The `?` sigil is shorthand that specifies a field should be recorded using its `fmt::Debug` +//! implementation. +//! - The `%` sigil operates similarly, but indicates that the value should be recorded using its +//! `fmt::Display` implementation. //! //! For full details, again see [the tracing //! docs](https://docs.rs/tracing/latest/tracing/index.html#recording-fields). diff --git a/node/jaeger/src/lib.rs b/node/jaeger/src/lib.rs index 99222589d4ab..7de458606816 100644 --- a/node/jaeger/src/lib.rs +++ b/node/jaeger/src/lib.rs @@ -132,7 +132,8 @@ impl Jaeger { match tokio::net::UdpSocket::bind("0.0.0.0:0").await { Ok(udp_socket) => loop { let buf = traces_out.next().await; - // UDP sending errors happen only either if the API is misused or in case of missing privilege. + // UDP sending errors happen only either if the API is misused or in case of + // missing privilege. if let Err(e) = udp_socket.send_to(&buf, jaeger_agent).await { log::debug!(target: "jaeger", "UDP send error: {}", e); } diff --git a/node/jaeger/src/spans.rs b/node/jaeger/src/spans.rs index be8bf9cd5ddc..4038d41344f2 100644 --- a/node/jaeger/src/spans.rs +++ b/node/jaeger/src/spans.rs @@ -110,8 +110,8 @@ impl PerLeafSpan { /// Creates a new instance. /// /// Takes the `leaf_span` that is created by the overseer per leaf and a name for a child span. - /// Both will be stored in this object, while the child span is implicitly accessible by using the - /// [`Deref`](std::ops::Deref) implementation. + /// Both will be stored in this object, while the child span is implicitly accessible by using + /// the [`Deref`](std::ops::Deref) implementation. pub fn new(leaf_span: Arc, name: &'static str) -> Self { let span = leaf_span.child(name); diff --git a/node/malus/src/variants/common.rs b/node/malus/src/variants/common.rs index 4ea8b88b56a5..ab1dfbbb360a 100644 --- a/node/malus/src/variants/common.rs +++ b/node/malus/src/variants/common.rs @@ -125,8 +125,8 @@ where Self { fake_validation, fake_validation_error, distribution, spawner } } - /// Creates and sends the validation response for a given candidate. Queries the runtime to obtain the validation data for the - /// given candidate. + /// Creates and sends the validation response for a given candidate. Queries the runtime to + /// obtain the validation data for the given candidate. pub fn send_validation_response( &self, candidate_descriptor: CandidateDescriptor, @@ -203,7 +203,8 @@ where { type Message = CandidateValidationMessage; - // Capture all (approval and backing) candidate validation requests and depending on configuration fail them. + // Capture all (approval and backing) candidate validation requests and depending on + // configuration fail them. fn intercept_incoming( &self, subsystem_sender: &mut Sender, @@ -279,7 +280,8 @@ where }, FakeCandidateValidation::ApprovalInvalid | FakeCandidateValidation::BackingAndApprovalInvalid => { - // Set the validation result to invalid with probability `p` and trigger a dispute + // Set the validation result to invalid with probability `p` and trigger a + // dispute let behave_maliciously = self.distribution.sample(&mut rand::thread_rng()); match behave_maliciously { true => { @@ -294,7 +296,8 @@ where &validation_result, ); - // We're not even checking the candidate, this makes us appear faster than honest validators. + // We're not even checking the candidate, this makes us appear + // faster than honest validators. sender.send(Ok(validation_result)).unwrap(); None }, @@ -370,7 +373,8 @@ where ); None }, - // If the `PoV` is malicious, we behave normally with some probability `(1-p)` + // If the `PoV` is malicious, we behave normally with some probability + // `(1-p)` false => Some(FromOrchestra::Communication { msg: CandidateValidationMessage::ValidateFromChainState( candidate_receipt, @@ -383,7 +387,8 @@ where }, FakeCandidateValidation::BackingInvalid | FakeCandidateValidation::BackingAndApprovalInvalid => { - // Maliciously set the validation result to invalid for a valid candidate with probability `p` + // Maliciously set the validation result to invalid for a valid candidate + // with probability `p` let behave_maliciously = self.distribution.sample(&mut rand::thread_rng()); match behave_maliciously { true => { @@ -396,7 +401,8 @@ where "😈 Maliciously sending invalid validation result: {:?}.", &validation_result, ); - // We're not even checking the candidate, this makes us appear faster than honest validators. + // We're not even checking the candidate, this makes us appear + // faster than honest validators. response_sender.send(Ok(validation_result)).unwrap(); None }, diff --git a/node/malus/src/variants/dispute_valid_candidates.rs b/node/malus/src/variants/dispute_valid_candidates.rs index ab1fba478beb..9ea8449a1d0b 100644 --- a/node/malus/src/variants/dispute_valid_candidates.rs +++ b/node/malus/src/variants/dispute_valid_candidates.rs @@ -45,14 +45,15 @@ use std::sync::Arc; #[command(rename_all = "kebab-case")] #[allow(missing_docs)] pub struct DisputeAncestorOptions { - /// Malicious candidate validation subsystem configuration. When enabled, node PVF execution is skipped - /// during backing and/or approval and it's result can by specified by this option and `--fake-validation-error` - /// for invalid candidate outcomes. + /// Malicious candidate validation subsystem configuration. When enabled, node PVF execution is + /// skipped during backing and/or approval and it's result can by specified by this option and + /// `--fake-validation-error` for invalid candidate outcomes. #[arg(long, value_enum, ignore_case = true, default_value_t = FakeCandidateValidation::BackingAndApprovalInvalid)] pub fake_validation: FakeCandidateValidation, - /// Applies only when `--fake-validation` is configured to reject candidates as invalid. It allows - /// to specify the exact error to return from the malicious candidate validation subsystem. + /// Applies only when `--fake-validation` is configured to reject candidates as invalid. It + /// allows to specify the exact error to return from the malicious candidate validation + /// subsystem. #[arg(long, value_enum, ignore_case = true, default_value_t = FakeCandidateValidationError::InvalidOutputs)] pub fake_validation_error: FakeCandidateValidationError, diff --git a/node/malus/src/variants/suggest_garbage_candidate.rs b/node/malus/src/variants/suggest_garbage_candidate.rs index 9fd8f6473bde..7d301c194b44 100644 --- a/node/malus/src/variants/suggest_garbage_candidate.rs +++ b/node/malus/src/variants/suggest_garbage_candidate.rs @@ -88,14 +88,15 @@ where "Received request to second candidate", ); - // Need to draw value from Bernoulli distribution with given probability of success defined by the clap parameter. - // Note that clap parameter must be f64 since this is expected by the Bernoulli::new() function. - // It must be converted from u8, due to the lack of support for the .range() call on u64 in the clap crate. + // Need to draw value from Bernoulli distribution with given probability of success + // defined by the clap parameter. Note that clap parameter must be f64 since this is + // expected by the Bernoulli::new() function. It must be converted from u8, due to + // the lack of support for the .range() call on u64 in the clap crate. let distribution = Bernoulli::new(self.percentage / 100.0) .expect("Invalid probability! Percentage must be in range [0..=100]."); - // Draw a random boolean from the Bernoulli distribution with probability of true equal to `p`. - // We use `rand::thread_rng` as the source of randomness. + // Draw a random boolean from the Bernoulli distribution with probability of true + // equal to `p`. We use `rand::thread_rng` as the source of randomness. let generate_malicious_candidate = distribution.sample(&mut rand::thread_rng()); if generate_malicious_candidate == true { diff --git a/node/metrics/src/lib.rs b/node/metrics/src/lib.rs index 69b3771d696a..9cb0f289a580 100644 --- a/node/metrics/src/lib.rs +++ b/node/metrics/src/lib.rs @@ -19,7 +19,8 @@ //! Collects a bunch of metrics providers and related features such as //! `Metronome` for usage with metrics collections. //! -//! This crate also reexports Prometheus metric types which are expected to be implemented by subsystems. +//! This crate also reexports Prometheus metric types which are expected to be implemented by +//! subsystems. #![deny(missing_docs)] #![deny(unused_imports)] diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index bc85f54177cb..803a56251495 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -102,11 +102,13 @@ impl RecentlyOutdated { // Aggression has 3 levels: // // * Aggression Level 0: The basic behaviors described above. -// * Aggression Level 1: The originator of a message sends to all peers. Other peers follow the rules above. -// * Aggression Level 2: All peers send all messages to all their row and column neighbors. -// This means that each validator will, on average, receive each message approximately `2*sqrt(n)` times. -// The aggression level of messages pertaining to a block increases when that block is unfinalized and -// is a child of the finalized block. +// * Aggression Level 1: The originator of a message sends to all peers. Other peers follow the +// rules above. +// * Aggression Level 2: All peers send all messages to all their row and column neighbors. This +// means that each validator will, on average, receive each message approximately `2*sqrt(n)` +// times. +// The aggression level of messages pertaining to a block increases when that block is unfinalized +// and is a child of the finalized block. // This means that only one block at a time has its messages propagated with aggression > 0. // // A note on aggression thresholds: changes in propagation apply only to blocks which are the @@ -120,7 +122,8 @@ impl RecentlyOutdated { struct AggressionConfig { /// Aggression level 1: all validators send all their own messages to all peers. l1_threshold: Option, - /// Aggression level 2: level 1 + all validators send all messages to all peers in the X and Y dimensions. + /// Aggression level 2: level 1 + all validators send all messages to all peers in the X and Y + /// dimensions. l2_threshold: Option, /// How often to re-send messages to all targeted recipients. /// This applies to all unfinalized blocks. @@ -167,11 +170,12 @@ struct State { blocks: HashMap, /// Our view updates to our peers can race with `NewBlocks` updates. We store messages received - /// against the directly mentioned blocks in our view in this map until `NewBlocks` is received. + /// against the directly mentioned blocks in our view in this map until `NewBlocks` is + /// received. /// - /// As long as the parent is already in the `blocks` map and `NewBlocks` messages aren't delayed - /// by more than a block length, this strategy will work well for mitigating the race. This is - /// also a race that occurs typically on local networks. + /// As long as the parent is already in the `blocks` map and `NewBlocks` messages aren't + /// delayed by more than a block length, this strategy will work well for mitigating the race. + /// This is also a race that occurs typically on local networks. pending_known: HashMap>, /// Peer data is partially stored here, and partially inline within the [`BlockEntry`]s @@ -947,7 +951,8 @@ impl State { } } - // Invariant: to our knowledge, none of the peers except for the `source` know about the assignment. + // Invariant: to our knowledge, none of the peers except for the `source` know about the + // assignment. metrics.on_assignment_imported(); let topology = self.topologies.get_topology(entry.session); @@ -1239,7 +1244,8 @@ impl State { } } - // Invariant: to our knowledge, none of the peers except for the `source` know about the approval. + // Invariant: to our knowledge, none of the peers except for the `source` know about the + // approval. metrics.on_approval_imported(); let required_routing = match entry.candidates.get_mut(candidate_index as usize) { @@ -1925,9 +1931,9 @@ const fn ensure_size_not_zero(size: usize) -> usize { } /// The maximum amount of assignments per batch is 33% of maximum allowed by protocol. -/// This is an arbitrary value. Bumping this up increases the maximum amount of approvals or assignments -/// we send in a single message to peers. Exceeding `MAX_NOTIFICATION_SIZE` will violate the protocol -/// configuration. +/// This is an arbitrary value. Bumping this up increases the maximum amount of approvals or +/// assignments we send in a single message to peers. Exceeding `MAX_NOTIFICATION_SIZE` will violate +/// the protocol configuration. pub const MAX_ASSIGNMENT_BATCH_SIZE: usize = ensure_size_not_zero( MAX_NOTIFICATION_SIZE as usize / std::mem::size_of::<(IndirectAssignmentCert, CandidateIndex)>() / diff --git a/node/network/availability-distribution/src/requester/fetch_task/mod.rs b/node/network/availability-distribution/src/requester/fetch_task/mod.rs index f87e1888bb10..191ee2acd973 100644 --- a/node/network/availability-distribution/src/requester/fetch_task/mod.rs +++ b/node/network/availability-distribution/src/requester/fetch_task/mod.rs @@ -315,7 +315,8 @@ impl RunningTask { continue }, }; - // We drop the span so that the span is not active whilst we validate and store the chunk. + // We drop the span so that the span is not active whilst we validate and store the + // chunk. drop(_chunk_recombine_span); let _chunk_validate_and_store_span = span .child("validate-and-store-chunk") diff --git a/node/network/availability-distribution/src/requester/mod.rs b/node/network/availability-distribution/src/requester/mod.rs index e27f40982ae8..446988f7cc0d 100644 --- a/node/network/availability-distribution/src/requester/mod.rs +++ b/node/network/availability-distribution/src/requester/mod.rs @@ -114,8 +114,8 @@ impl Requester { .with_string_tag("leaf", format!("{:?}", leaf.hash)) .with_stage(jaeger::Stage::AvailabilityDistribution); - // Order important! We need to handle activated, prior to deactivated, otherwise we might - // cancel still needed jobs. + // Order important! We need to handle activated, prior to deactivated, otherwise we + // might cancel still needed jobs. self.start_requesting_chunks(ctx, runtime, leaf, &span).await?; } @@ -168,8 +168,8 @@ impl Requester { // any tasks separately. // // The next time the subsystem receives leaf update, some of spawned task will be bumped - // to be live in fresh relay parent, while some might get dropped due to the current leaf - // being deactivated. + // to be live in fresh relay parent, while some might get dropped due to the current + // leaf being deactivated. self.add_cores(ctx, runtime, leaf, leaf_session_index, cores, span).await?; } @@ -177,7 +177,6 @@ impl Requester { } /// Stop requesting chunks for obsolete heads. - /// fn stop_requesting_chunks(&mut self, obsolete_leaves: impl Iterator) { let obsolete_leaves: HashSet<_> = obsolete_leaves.collect(); self.fetches.retain(|_, task| { @@ -226,10 +225,10 @@ impl Requester { .with_session_info( context, runtime, - // We use leaf here, the relay_parent must be in the same session as the - // leaf. This is guaranteed by runtime which ensures that cores are cleared - // at session boundaries. At the same time, only leaves are guaranteed to - // be fetchable by the state trie. + // We use leaf here, the relay_parent must be in the same session as + // the leaf. This is guaranteed by runtime which ensures that cores are + // cleared at session boundaries. At the same time, only leaves are + // guaranteed to be fetchable by the state trie. leaf, leaf_session_index, |info| FetchTaskConfig::new(leaf, &core, tx, metrics, info, span), diff --git a/node/network/availability-recovery/src/futures_undead.rs b/node/network/availability-recovery/src/futures_undead.rs index 225f6693a725..04ef3e749399 100644 --- a/node/network/availability-recovery/src/futures_undead.rs +++ b/node/network/availability-recovery/src/futures_undead.rs @@ -23,7 +23,6 @@ //! was almost done, thus we would have wasted time with our impatience. By simply making them //! not count towards length, we can make sure to have enough "live" requests ongoing, while at the //! same time taking advantage of some maybe "late" response from the undead. -//! use std::{ pin::Pin, diff --git a/node/network/availability-recovery/src/lib.rs b/node/network/availability-recovery/src/lib.rs index e8503ee454a2..fb0cdb720571 100644 --- a/node/network/availability-recovery/src/lib.rs +++ b/node/network/availability-recovery/src/lib.rs @@ -111,7 +111,8 @@ const SMALL_POV_LIMIT: usize = 128 * 1024; pub enum RecoveryStrategy { /// We always try the backing group first, then fallback to validator chunks. BackersFirstAlways, - /// We try the backing group first if PoV size is lower than specified, then fallback to validator chunks. + /// We try the backing group first if PoV size is lower than specified, then fallback to + /// validator chunks. BackersFirstIfSizeLower(usize), /// We always recover using validator chunks. ChunksAlways, @@ -132,7 +133,8 @@ impl RecoveryStrategy { } } - /// Returns the PoV size limit in bytes for `BackersFirstIfSizeLower` strategy, otherwise `None`. + /// Returns the PoV size limit in bytes for `BackersFirstIfSizeLower` strategy, otherwise + /// `None`. pub fn pov_size_limit(&self) -> Option { match *self { RecoveryStrategy::BackersFirstIfSizeLower(limit) => Some(limit), @@ -165,8 +167,8 @@ struct RequestChunksFromValidators { /// /// including failed ones. total_received_responses: usize, - /// a random shuffling of the validators which indicates the order in which we connect to the validators and - /// request the chunk from them. + /// a random shuffling of the validators which indicates the order in which we connect to the + /// validators and request the chunk from them. shuffling: VecDeque, /// Chunks received so far. received_chunks: HashMap, @@ -215,7 +217,8 @@ enum ErasureTask { HashMap, oneshot::Sender>, ), - /// Re-encode `AvailableData` into erasure chunks in order to verify the provided root hash of the Merkle tree. + /// Re-encode `AvailableData` into erasure chunks in order to verify the provided root hash of + /// the Merkle tree. Reencode(usize, Hash, AvailableData, oneshot::Sender>), } @@ -808,8 +811,8 @@ where self.params.metrics.on_recovery_started(); loop { - // These only fail if we cannot reach the underlying subsystem, which case there is nothing - // meaningful we can do. + // These only fail if we cannot reach the underlying subsystem, which case there is + // nothing meaningful we can do. match self.source { Source::RequestFromBackers(ref mut from_backers) => { match from_backers.run(&self.params, &mut self.sender).await { @@ -1008,7 +1011,8 @@ async fn launch_recovery_task( ); backing_group = backing_group.filter(|_| { - // We keep the backing group only if `1/3` of chunks sum up to less than `small_pov_limit`. + // We keep the backing group only if `1/3` of chunks sum up to less than + // `small_pov_limit`. prefer_backing_group }); } @@ -1194,18 +1198,21 @@ impl AvailabilityRecoverySubsystem { let (erasure_task_tx, erasure_task_rx) = futures::channel::mpsc::channel(16); let mut erasure_task_rx = erasure_task_rx.fuse(); - // `ThreadPoolBuilder` spawns the tasks using `spawn_blocking`. For each worker there will be a `mpsc` channel created. - // Each of these workers take the `Receiver` and poll it in an infinite loop. - // All of the sender ends of the channel are sent as a vec which we then use to create a `Cycle` iterator. - // We use this iterator to assign work in a round-robin fashion to the workers in the pool. + // `ThreadPoolBuilder` spawns the tasks using `spawn_blocking`. For each worker there will + // be a `mpsc` channel created. Each of these workers take the `Receiver` and poll it in an + // infinite loop. All of the sender ends of the channel are sent as a vec which we then use + // to create a `Cycle` iterator. We use this iterator to assign work in a round-robin + // fashion to the workers in the pool. // // How work is dispatched to the pool from the recovery tasks: - // - Once a recovery task finishes retrieving the availability data, it needs to reconstruct from chunks and/or + // - Once a recovery task finishes retrieving the availability data, it needs to reconstruct + // from chunks and/or // re-encode the data which are heavy CPU computations. - // To do so it sends an `ErasureTask` to the main loop via the `erasure_task` channel, and waits for the results - // over a `oneshot` channel. + // To do so it sends an `ErasureTask` to the main loop via the `erasure_task` channel, and + // waits for the results over a `oneshot` channel. // - In the subsystem main loop we poll the `erasure_task_rx` receiver. - // - We forward the received `ErasureTask` to the `next()` sender yielded by the `Cycle` iterator. + // - We forward the received `ErasureTask` to the `next()` sender yielded by the `Cycle` + // iterator. // - Some worker thread handles it and sends the response over the `oneshot` channel. // Create a thread pool with 2 workers. @@ -1348,11 +1355,13 @@ impl ThreadPoolBuilder { // Creates a pool of `size` workers, where 1 <= `size` <= `MAX_THREADS`. // // Each worker is created by `spawn_blocking` and takes the receiver side of a channel - // while all of the senders are returned to the caller. Each worker runs `erasure_task_thread` that - // polls the `Receiver` for an `ErasureTask` which is expected to be CPU intensive. The larger - // the input (more or larger chunks/availability data), the more CPU cycles will be spent. + // while all of the senders are returned to the caller. Each worker runs `erasure_task_thread` + // that polls the `Receiver` for an `ErasureTask` which is expected to be CPU intensive. The + // larger the input (more or larger chunks/availability data), the more CPU cycles will be + // spent. // - // For example, for 32KB PoVs, we'd expect re-encode to eat as much as 90ms and 500ms for 2.5MiB. + // For example, for 32KB PoVs, we'd expect re-encode to eat as much as 90ms and 500ms for + // 2.5MiB. // // After executing such a task, the worker sends the response via a provided `oneshot` sender. // diff --git a/node/network/availability-recovery/src/tests.rs b/node/network/availability-recovery/src/tests.rs index 26a99e91a5e2..c5647a12f589 100644 --- a/node/network/availability-recovery/src/tests.rs +++ b/node/network/availability-recovery/src/tests.rs @@ -817,7 +817,8 @@ fn wrong_chunk_index_leads_to_recovery_error() { let candidate_hash = test_state.candidate.hash(); - // These chunks should fail the index check as they don't have the correct index for validator. + // These chunks should fail the index check as they don't have the correct index for + // validator. test_state.chunks[1] = test_state.chunks[0].clone(); test_state.chunks[2] = test_state.chunks[0].clone(); test_state.chunks[3] = test_state.chunks[0].clone(); diff --git a/node/network/bridge/src/rx/mod.rs b/node/network/bridge/src/rx/mod.rs index 11a2dc6be83a..950bb3d6e6da 100644 --- a/node/network/bridge/src/rx/mod.rs +++ b/node/network/bridge/src/rx/mod.rs @@ -14,7 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! The Network Bridge Subsystem - handles _incoming_ messages from the network, forwarded to the relevant subsystems. +//! The Network Bridge Subsystem - handles _incoming_ messages from the network, forwarded to the +//! relevant subsystems. use super::*; use always_assert::never; @@ -86,7 +87,8 @@ pub struct NetworkBridgeRx { } impl NetworkBridgeRx { - /// Create a new network bridge subsystem with underlying network service and authority discovery service. + /// Create a new network bridge subsystem with underlying network service and authority + /// discovery service. /// /// This assumes that the network service has had the notifications protocol for the network /// bridge already registered. See [`peers_sets_info`](peers_sets_info). diff --git a/node/network/bridge/src/rx/tests.rs b/node/network/bridge/src/rx/tests.rs index 078f6591ae2a..e18a7e541832 100644 --- a/node/network/bridge/src/rx/tests.rs +++ b/node/network/bridge/src/rx/tests.rs @@ -795,8 +795,9 @@ fn peer_messages_sent_via_overseer() { network_handle.disconnect_peer(peer.clone(), PeerSet::Validation).await; - // Approval distribution message comes first, and the message is only sent to that subsystem. - // then a disconnection event arises that is sent to all validation networking subsystems. + // Approval distribution message comes first, and the message is only sent to that + // subsystem. then a disconnection event arises that is sent to all validation networking + // subsystems. assert_matches!( virtual_overseer.recv().await, diff --git a/node/network/bridge/src/tx/mod.rs b/node/network/bridge/src/tx/mod.rs index 2b54f6f0f06d..93916dd70fec 100644 --- a/node/network/bridge/src/tx/mod.rs +++ b/node/network/bridge/src/tx/mod.rs @@ -61,7 +61,8 @@ pub struct NetworkBridgeTx { } impl NetworkBridgeTx { - /// Create a new network bridge subsystem with underlying network service and authority discovery service. + /// Create a new network bridge subsystem with underlying network service and authority + /// discovery service. /// /// This assumes that the network service has had the notifications protocol for the network /// bridge already registered. See [`peers_sets_info`](peers_sets_info). diff --git a/node/network/bridge/src/validator_discovery.rs b/node/network/bridge/src/validator_discovery.rs index 098416c5b88d..d4d1df3da467 100644 --- a/node/network/bridge/src/validator_discovery.rs +++ b/node/network/bridge/src/validator_discovery.rs @@ -106,9 +106,10 @@ impl Service { /// It will ask the network to connect to the validators and not disconnect /// from them at least until the next request is issued for the same peer set. /// - /// This method will also disconnect from previously connected validators not in the `validator_ids` set. - /// it takes `network_service` and `authority_discovery_service` by value - /// and returns them as a workaround for the Future: Send requirement imposed by async function implementation. + /// This method will also disconnect from previously connected validators not in the + /// `validator_ids` set. it takes `network_service` and `authority_discovery_service` by value + /// and returns them as a workaround for the Future: Send requirement imposed by async function + /// implementation. pub async fn on_request( &mut self, validator_ids: Vec, diff --git a/node/network/collator-protocol/src/collator_side/mod.rs b/node/network/collator-protocol/src/collator_side/mod.rs index 39b23c152cbb..e4adfdc9d941 100644 --- a/node/network/collator-protocol/src/collator_side/mod.rs +++ b/node/network/collator-protocol/src/collator_side/mod.rs @@ -225,8 +225,8 @@ struct State { /// Our validator groups per active leaf. our_validators_groups: HashMap, - /// The mapping from [`PeerId`] to [`HashSet`]. This is filled over time as we learn the [`PeerId`]'s - /// by `PeerConnected` events. + /// The mapping from [`PeerId`] to [`HashSet`]. This is filled over time + /// as we learn the [`PeerId`]'s by `PeerConnected` events. peer_ids: HashMap>, /// Tracks which validators we want to stay connected to. @@ -241,8 +241,8 @@ struct State { /// All collation fetching requests that are still waiting to be answered. /// - /// They are stored per relay parent, when our view changes and the relay parent moves out, we will cancel the fetch - /// request. + /// They are stored per relay parent, when our view changes and the relay parent moves out, we + /// will cancel the fetch request. waiting_collation_fetches: HashMap, /// Active collation fetches. @@ -526,8 +526,8 @@ async fn connect_to_validators( /// Advertise collation to the given `peer`. /// -/// This will only advertise a collation if there exists one for the given `relay_parent` and the given `peer` is -/// set as validator for our para at the given `relay_parent`. +/// This will only advertise a collation if there exists one for the given `relay_parent` and the +/// given `peer` is set as validator for our para at the given `relay_parent`. #[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)] async fn advertise_collation( ctx: &mut Context, @@ -638,7 +638,8 @@ async fn process_msg( ); }, NetworkBridgeUpdate(event) => { - // We should count only this shoulder in the histogram, as other shoulders are just introducing noise + // We should count only this shoulder in the histogram, as other shoulders are just + // introducing noise let _ = state.metrics.time_process_msg(); if let Err(e) = handle_network_msg(ctx, runtime, state, event).await { diff --git a/node/network/collator-protocol/src/collator_side/tests.rs b/node/network/collator-protocol/src/collator_side/tests.rs index 757ef813a3d0..e406e5d869cc 100644 --- a/node/network/collator-protocol/src/collator_side/tests.rs +++ b/node/network/collator-protocol/src/collator_side/tests.rs @@ -160,8 +160,8 @@ impl TestState { /// Generate a new relay parent and inform the subsystem about the new view. /// - /// If `merge_views == true` it means the subsystem will be informed that we are working on the old `relay_parent` - /// and the new one. + /// If `merge_views == true` it means the subsystem will be informed that we are working on the + /// old `relay_parent` and the new one. async fn advance_to_new_round( &mut self, virtual_overseer: &mut VirtualOverseer, @@ -901,7 +901,8 @@ fn collate_on_two_different_relay_chain_blocks() { let old_relay_parent = test_state.relay_parent; - // Advance to a new round, while informing the subsystem that the old and the new relay parent are active. + // Advance to a new round, while informing the subsystem that the old and the new relay + // parent are active. test_state.advance_to_new_round(virtual_overseer, true).await; distribute_collation(virtual_overseer, &test_state, true).await; @@ -1085,7 +1086,8 @@ where .await .unwrap(); - // Keep the feedback channel alive because we need to use it to inform about the finished transfer. + // Keep the feedback channel alive because we need to use it to inform about the + // finished transfer. let feedback_tx = assert_matches!( rx.await, Ok(full_response) => { diff --git a/node/network/collator-protocol/src/collator_side/validators_buffer.rs b/node/network/collator-protocol/src/collator_side/validators_buffer.rs index 851923a6d0d4..13ed3f66e0f1 100644 --- a/node/network/collator-protocol/src/collator_side/validators_buffer.rs +++ b/node/network/collator-protocol/src/collator_side/validators_buffer.rs @@ -23,9 +23,9 @@ //! We keep a simple FIFO buffer of N validator groups and a bitvec for each advertisement, //! 1 indicating we want to be connected to i-th validator in a buffer, 0 otherwise. //! -//! The bit is set to 1 for the whole **group** whenever it's inserted into the buffer. Given a relay -//! parent, one can reset a bit back to 0 for particular **validator**. For example, if a collation -//! was fetched or some timeout has been hit. +//! The bit is set to 1 for the whole **group** whenever it's inserted into the buffer. Given a +//! relay parent, one can reset a bit back to 0 for particular **validator**. For example, if a +//! collation was fetched or some timeout has been hit. //! //! The bitwise OR over known advertisements gives us validators indices for connection request. diff --git a/node/network/collator-protocol/src/validator_side/tests.rs b/node/network/collator-protocol/src/validator_side/tests.rs index a2e92e8c78d2..47409e8d10f3 100644 --- a/node/network/collator-protocol/src/validator_side/tests.rs +++ b/node/network/collator-protocol/src/validator_side/tests.rs @@ -730,7 +730,8 @@ fn reject_connection_to_next_group() { }) } -// Ensure that we fetch a second collation, after the first checked collation was found to be invalid. +// Ensure that we fetch a second collation, after the first checked collation was found to be +// invalid. #[test] fn fetch_next_collation_on_invalid_collation() { let test_state = TestState::default(); diff --git a/node/network/dispute-distribution/src/lib.rs b/node/network/dispute-distribution/src/lib.rs index a39f78358f44..ad99bc41fa64 100644 --- a/node/network/dispute-distribution/src/lib.rs +++ b/node/network/dispute-distribution/src/lib.rs @@ -60,8 +60,8 @@ use self::sender::{DisputeSender, DisputeSenderMessage}; /// ## The receiver [`DisputesReceiver`] /// -/// The receiving side is implemented as `DisputesReceiver` and is run as a separate long running task within -/// this subsystem ([`DisputesReceiver::run`]). +/// The receiving side is implemented as `DisputesReceiver` and is run as a separate long running +/// task within this subsystem ([`DisputesReceiver::run`]). /// /// Conceptually all the receiver has to do, is waiting for incoming requests which are passed in /// via a dedicated channel and forwarding them to the dispute coordinator via @@ -101,8 +101,8 @@ const LOG_TARGET: &'static str = "parachain::dispute-distribution"; /// Rate limit on the `receiver` side. /// -/// If messages from one peer come in at a higher rate than every `RECEIVE_RATE_LIMIT` on average, we -/// start dropping messages from that peer to enforce that limit. +/// If messages from one peer come in at a higher rate than every `RECEIVE_RATE_LIMIT` on average, +/// we start dropping messages from that peer to enforce that limit. pub const RECEIVE_RATE_LIMIT: Duration = Duration::from_millis(100); /// Rate limit on the `sender` side. diff --git a/node/network/dispute-distribution/src/receiver/batches/batch.rs b/node/network/dispute-distribution/src/receiver/batches/batch.rs index 75f37107dff9..11380b7c072e 100644 --- a/node/network/dispute-distribution/src/receiver/batches/batch.rs +++ b/node/network/dispute-distribution/src/receiver/batches/batch.rs @@ -192,8 +192,8 @@ impl Batch { /// Calculate when the next tick should happen. /// - /// This will usually return `now + BATCH_COLLECTING_INTERVAL`, except if the lifetime of this batch - /// would exceed `MAX_BATCH_LIFETIME`. + /// This will usually return `now + BATCH_COLLECTING_INTERVAL`, except if the lifetime of this + /// batch would exceed `MAX_BATCH_LIFETIME`. /// /// # Arguments /// diff --git a/node/network/dispute-distribution/src/receiver/batches/waiting_queue.rs b/node/network/dispute-distribution/src/receiver/batches/waiting_queue.rs index 72f6e80a26a4..9a5e665a5756 100644 --- a/node/network/dispute-distribution/src/receiver/batches/waiting_queue.rs +++ b/node/network/dispute-distribution/src/receiver/batches/waiting_queue.rs @@ -50,8 +50,8 @@ impl WaitingQueue { /// Push a `PendingWake`. /// - /// The next call to `wait_ready` will make sure to wake soon enough to process that new event in a - /// timely manner. + /// The next call to `wait_ready` will make sure to wake soon enough to process that new event + /// in a timely manner. pub fn push(&mut self, wake: PendingWake) { self.pending_wakes.push(wake); // Reset timer as it is potentially obsolete now: diff --git a/node/network/dispute-distribution/src/receiver/mod.rs b/node/network/dispute-distribution/src/receiver/mod.rs index ed108a67fac3..827a77281ccb 100644 --- a/node/network/dispute-distribution/src/receiver/mod.rs +++ b/node/network/dispute-distribution/src/receiver/mod.rs @@ -382,11 +382,11 @@ where if let Err(pending_response) = batch_result { // We don't expect honest peers to send redundant votes within a single batch, // as the timeout for retry is much higher. Still we don't want to punish the - // node as it might not be the node's fault. Some other (malicious) node could have been - // faster sending the same votes in order to harm the reputation of that honest - // node. Given that we already have a rate limit, if a validator chooses to - // waste available rate with redundant votes - so be it. The actual dispute - // resolution is unaffected. + // node as it might not be the node's fault. Some other (malicious) node could + // have been faster sending the same votes in order to harm the reputation of + // that honest node. Given that we already have a rate limit, if a validator + // chooses to waste available rate with redundant votes - so be it. The actual + // dispute resolution is unaffected. gum::debug!( target: LOG_TARGET, ?peer, diff --git a/node/network/dispute-distribution/src/sender/send_task.rs b/node/network/dispute-distribution/src/sender/send_task.rs index fcd670ff9ce9..18c66066d162 100644 --- a/node/network/dispute-distribution/src/sender/send_task.rs +++ b/node/network/dispute-distribution/src/sender/send_task.rs @@ -45,8 +45,8 @@ use crate::{ /// /// The unit of work for a `SendTask` is an authority/validator. pub struct SendTask { - /// The request we are supposed to get out to all `parachain` validators of the dispute's session - /// and to all current authorities. + /// The request we are supposed to get out to all `parachain` validators of the dispute's + /// session and to all current authorities. request: DisputeRequest, /// The set of authorities we need to send our messages to. This set will change at session @@ -185,7 +185,8 @@ impl SendTask { /// Handle a finished response waiting task. /// - /// Called by `DisputeSender` upon reception of the corresponding message from our spawned `wait_response_task`. + /// Called by `DisputeSender` upon reception of the corresponding message from our spawned + /// `wait_response_task`. pub fn on_finished_send(&mut self, authority: &AuthorityDiscoveryId, result: TaskResult) { match result { TaskResult::Failed(err) => { @@ -204,8 +205,8 @@ impl SendTask { TaskResult::Succeeded => { let status = match self.deliveries.get_mut(&authority) { None => { - // Can happen when a sending became irrelevant while the response was already - // queued. + // Can happen when a sending became irrelevant while the response was + // already queued. gum::debug!( target: LOG_TARGET, candidate = ?self.request.0.candidate_receipt.hash(), diff --git a/node/network/gossip-support/src/lib.rs b/node/network/gossip-support/src/lib.rs index 62a071aa6f4c..3c178ad9dfa5 100644 --- a/node/network/gossip-support/src/lib.rs +++ b/node/network/gossip-support/src/lib.rs @@ -246,7 +246,8 @@ where { let mut connections = authorities_past_present_future(sender, leaf).await?; - // Remove all of our locally controlled validator indices so we don't connect to ourself. + // Remove all of our locally controlled validator indices so we don't connect to + // ourself. let connections = if remove_all_controlled(&self.keystore, &mut connections) != 0 { connections diff --git a/node/network/protocol/src/grid_topology.rs b/node/network/protocol/src/grid_topology.rs index 1b356f67617b..99dd513c4d79 100644 --- a/node/network/protocol/src/grid_topology.rs +++ b/node/network/protocol/src/grid_topology.rs @@ -17,17 +17,20 @@ //! Grid topology support implementation //! The basic operation of the 2D grid topology is that: //! * A validator producing a message sends it to its row-neighbors and its column-neighbors -//! * A validator receiving a message originating from one of its row-neighbors sends it to its column-neighbors -//! * A validator receiving a message originating from one of its column-neighbors sends it to its row-neighbors +//! * A validator receiving a message originating from one of its row-neighbors sends it to its +//! column-neighbors +//! * A validator receiving a message originating from one of its column-neighbors sends it to its +//! row-neighbors //! -//! This grid approach defines 2 unique paths for every validator to reach every other validator in at most 2 hops. +//! This grid approach defines 2 unique paths for every validator to reach every other validator in +//! at most 2 hops. //! //! However, we also supplement this with some degree of random propagation: //! every validator, upon seeing a message for the first time, propagates it to 8 random peers. //! This inserts some redundancy in case the grid topology isn't working or is being attacked - //! an adversary doesn't know which peers a validator will send to. -//! This is combined with the property that the adversary doesn't know which validators will elect to check a block. -//! +//! This is combined with the property that the adversary doesn't know which validators will elect +//! to check a block. use crate::PeerId; use polkadot_primitives::{AuthorityDiscoveryId, SessionIndex, ValidatorIndex}; @@ -188,7 +191,8 @@ impl GridNeighbors { (false, false) => RequiredRouting::None, (true, false) => RequiredRouting::GridY, // messages from X go to Y (false, true) => RequiredRouting::GridX, // messages from Y go to X - (true, true) => RequiredRouting::GridXY, // if the grid works as expected, this shouldn't happen. + (true, true) => RequiredRouting::GridXY, /* if the grid works as expected, this + * shouldn't happen. */ } } @@ -213,7 +217,8 @@ impl GridNeighbors { "Grid topology is unexpected, play it safe and send to X AND Y" ); RequiredRouting::GridXY - }, // if the grid works as expected, this shouldn't happen. + }, /* if the grid works as expected, this + * shouldn't happen. */ } } diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index 948c422a82f8..2df926ac55d8 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -91,7 +91,8 @@ impl Into for ObservedRole { /// Specialized wrapper around [`View`]. /// -/// Besides the access to the view itself, it also gives access to the [`jaeger::Span`] per leave/head. +/// Besides the access to the view itself, it also gives access to the [`jaeger::Span`] per +/// leave/head. #[derive(Debug, Clone, Default)] pub struct OurView { view: View, @@ -131,7 +132,8 @@ impl std::ops::Deref for OurView { } } -/// Construct a new [`OurView`] with the given chain heads, finalized number 0 and disabled [`jaeger::Span`]'s. +/// Construct a new [`OurView`] with the given chain heads, finalized number 0 and disabled +/// [`jaeger::Span`]'s. /// /// NOTE: Use for tests only. /// diff --git a/node/network/protocol/src/peer_set.rs b/node/network/protocol/src/peer_set.rs index ce47ac30811a..b9fa80d5c4a2 100644 --- a/node/network/protocol/src/peer_set.rs +++ b/node/network/protocol/src/peer_set.rs @@ -98,7 +98,8 @@ impl PeerSet { max_notification_size, handshake: None, set_config: SetConfig { - // Non-authority nodes don't need to accept incoming connections on this peer set: + // Non-authority nodes don't need to accept incoming connections on this peer + // set: in_peers: if is_authority == IsAuthority::Yes { 100 } else { 0 }, out_peers: 0, reserved_nodes: Vec::new(), diff --git a/node/network/protocol/src/request_response/incoming/mod.rs b/node/network/protocol/src/request_response/incoming/mod.rs index e2b8ad526488..445544838672 100644 --- a/node/network/protocol/src/request_response/incoming/mod.rs +++ b/node/network/protocol/src/request_response/incoming/mod.rs @@ -78,8 +78,8 @@ where /// reputation changes in that case. /// /// Params: - /// - The raw request to decode - /// - Reputation changes to apply for the peer in case decoding fails. + /// - The raw request to decode + /// - Reputation changes to apply for the peer in case decoding fails. fn try_from_raw( raw: sc_network::config::IncomingRequest, reputation_changes: Vec, diff --git a/node/network/protocol/src/request_response/mod.rs b/node/network/protocol/src/request_response/mod.rs index d895a90079cc..912447c0c626 100644 --- a/node/network/protocol/src/request_response/mod.rs +++ b/node/network/protocol/src/request_response/mod.rs @@ -110,9 +110,9 @@ pub const MAX_PARALLEL_STATEMENT_REQUESTS: u32 = 3; /// Response size limit for responses of POV like data. /// /// This is larger than `MAX_POV_SIZE` to account for protocol overhead and for additional data in -/// `CollationFetchingV1` or `AvailableDataFetchingV1` for example. We try to err on larger limits here -/// as a too large limit only allows an attacker to waste our bandwidth some more, a too low limit -/// might have more severe effects. +/// `CollationFetchingV1` or `AvailableDataFetchingV1` for example. We try to err on larger limits +/// here as a too large limit only allows an attacker to waste our bandwidth some more, a too low +/// limit might have more severe effects. const POV_RESPONSE_SIZE: u64 = MAX_POV_SIZE as u64 + 10_000; /// Maximum response sizes for `StatementFetchingV1`. diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 160132011589..4cdf0d8af467 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -185,8 +185,8 @@ struct VcPerPeerTracker { } impl VcPerPeerTracker { - /// Note that the remote should now be aware that a validator has seconded a given candidate (by hash) - /// based on a message that we have sent it from our local pool. + /// Note that the remote should now be aware that a validator has seconded a given candidate (by + /// hash) based on a message that we have sent it from our local pool. fn note_local(&mut self, h: CandidateHash) { if !note_hash(&mut self.local_observed, h) { gum::warn!( @@ -198,8 +198,8 @@ impl VcPerPeerTracker { } } - /// Note that the remote should now be aware that a validator has seconded a given candidate (by hash) - /// based on a message that it has sent us. + /// Note that the remote should now be aware that a validator has seconded a given candidate (by + /// hash) based on a message that it has sent us. /// /// Returns `true` if the peer was allowed to send us such a message, `false` otherwise. fn note_remote(&mut self, h: CandidateHash) -> bool { @@ -226,8 +226,8 @@ fn note_hash( /// knowledge that a peer has about goings-on in a relay parent. #[derive(Default)] struct PeerRelayParentKnowledge { - /// candidates that the peer is aware of because we sent statements to it. This indicates that we can - /// send other statements pertaining to that candidate. + /// candidates that the peer is aware of because we sent statements to it. This indicates that + /// we can send other statements pertaining to that candidate. sent_candidates: HashSet, /// candidates that peer is aware of, because we received statements from it. received_candidates: HashSet, @@ -321,13 +321,13 @@ impl PeerRelayParentKnowledge { } } - /// Attempt to update our view of the peer's knowledge with this statement's fingerprint based on - /// a message we are receiving from the peer. + /// Attempt to update our view of the peer's knowledge with this statement's fingerprint based + /// on a message we are receiving from the peer. /// /// Provide the maximum message count that we can receive per candidate. In practice we should - /// not receive more statements for any one candidate than there are members in the group assigned - /// to that para, but this maximum needs to be lenient to account for equivocations that may be - /// cross-group. As such, a maximum of 2 * `n_validators` is recommended. + /// not receive more statements for any one candidate than there are members in the group + /// assigned to that para, but this maximum needs to be lenient to account for equivocations + /// that may be cross-group. As such, a maximum of 2 * `n_validators` is recommended. /// /// This returns an error if the peer should not have sent us this message according to protocol /// rules for flood protection. @@ -490,13 +490,13 @@ impl PeerData { self.view_knowledge.get(relay_parent).map_or(false, |k| k.can_send(fingerprint)) } - /// Attempt to update our view of the peer's knowledge with this statement's fingerprint based on - /// a message we are receiving from the peer. + /// Attempt to update our view of the peer's knowledge with this statement's fingerprint based + /// on a message we are receiving from the peer. /// /// Provide the maximum message count that we can receive per candidate. In practice we should - /// not receive more statements for any one candidate than there are members in the group assigned - /// to that para, but this maximum needs to be lenient to account for equivocations that may be - /// cross-group. As such, a maximum of 2 * `n_validators` is recommended. + /// not receive more statements for any one candidate than there are members in the group + /// assigned to that para, but this maximum needs to be lenient to account for equivocations + /// that may be cross-group. As such, a maximum of 2 * `n_validators` is recommended. /// /// This returns an error if the peer should not have sent us this message according to protocol /// rules for flood protection. @@ -600,8 +600,8 @@ enum NotedStatement<'a> { /// Large statement fetching status. enum LargeStatementStatus { - /// We are currently fetching the statement data from a remote peer. We keep a list of other nodes - /// claiming to have that data and will fallback on them. + /// We are currently fetching the statement data from a remote peer. We keep a list of other + /// nodes claiming to have that data and will fallback on them. Fetching(FetchingInfo), /// Statement data is fetched or we got it locally via `StatementDistributionMessage::Share`. FetchedOrShared(CommittedCandidateReceipt), @@ -712,8 +712,8 @@ impl ActiveHeadData { /// to have been checked, including that the validator index is not out-of-bounds and /// the signature is valid. /// - /// Any other statements or those that reference a candidate we are not aware of cannot be accepted - /// and will return `NotedStatement::NotUseful`. + /// Any other statements or those that reference a candidate we are not aware of cannot be + /// accepted and will return `NotedStatement::NotUseful`. fn note_statement(&mut self, statement: SignedFullStatement) -> NotedStatement { let validator_index = statement.validator_index(); let comparator = StoredStatementComparator { @@ -1272,9 +1272,9 @@ async fn retrieve_statement_from_message<'a, Context>( } }, protocol_v1::StatementDistributionMessage::Statement(_, s) => { - // No fetch in progress, safe to return any statement immediately (we don't bother - // about normal network jitter which might cause `Valid` statements to arrive early - // for now.). + // No fetch in progress, safe to return any statement immediately (we don't + // bother about normal network jitter which might cause `Valid` statements to + // arrive early for now.). return Some(s) }, } @@ -1470,7 +1470,8 @@ async fn handle_incoming_message<'a, Context>( ); match rep { - // This happens when a Valid statement has been received but there is no corresponding Seconded + // This happens when a Valid statement has been received but there is no corresponding + // Seconded COST_UNEXPECTED_STATEMENT_UNKNOWN_CANDIDATE => { metrics.on_unexpected_statement_valid(); // Report peer merely if this is not a duplicate out-of-view statement that diff --git a/node/network/statement-distribution/src/tests.rs b/node/network/statement-distribution/src/tests.rs index 3f3e6e589616..62167f77a1e0 100644 --- a/node/network/statement-distribution/src/tests.rs +++ b/node/network/statement-distribution/src/tests.rs @@ -824,8 +824,8 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() { }) .await; - // receive a seconded statement from peer A. it should be propagated onwards to peer B and to - // candidate backing. + // receive a seconded statement from peer A. it should be propagated onwards to peer B and + // to candidate backing. let statement = { let signing_context = SigningContext { parent_hash: hash_a, session_index }; @@ -2536,8 +2536,8 @@ fn handle_multiple_seconded_statements() { }) .await; - // receive a seconded statement from peer A. it should be propagated onwards to peer B and to - // candidate backing. + // receive a seconded statement from peer A. it should be propagated onwards to peer B and + // to candidate backing. let statement = { let signing_context = SigningContext { parent_hash: relay_parent_hash, session_index }; diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index a2d553779fdc..ebf33d5247b1 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -211,10 +211,10 @@ impl Handle { /// Wait for a block with the given hash to be in the active-leaves set. /// - /// The response channel responds if the hash was activated and is closed if the hash was deactivated. - /// Note that due the fact the overseer doesn't store the whole active-leaves set, only deltas, - /// the response channel may never return if the hash was deactivated before this call. - /// In this case, it's the caller's responsibility to ensure a timeout is set. + /// The response channel responds if the hash was activated and is closed if the hash was + /// deactivated. Note that due the fact the overseer doesn't store the whole active-leaves set, + /// only deltas, the response channel may never return if the hash was deactivated before this + /// call. In this case, it's the caller's responsibility to ensure a timeout is set. pub async fn wait_for_activation( &mut self, hash: Hash, @@ -355,7 +355,6 @@ pub async fn forward_events>(client: Arc

, mut hand /// +-----------+ /// | | /// +-----------+ -/// /// ``` /// /// [`Subsystem`]: trait.Subsystem.html @@ -363,8 +362,8 @@ pub async fn forward_events>(client: Arc

, mut hand /// # Example /// /// The [`Subsystems`] may be any type as long as they implement an expected interface. -/// Here, we create a mock validation subsystem and a few dummy ones and start the `Overseer` with them. -/// For the sake of simplicity the termination of the example is done with a timeout. +/// Here, we create a mock validation subsystem and a few dummy ones and start the `Overseer` with +/// them. For the sake of simplicity the termination of the example is done with a timeout. /// ``` /// # use std::time::Duration; /// # use futures::{executor, pin_mut, select, FutureExt}; @@ -394,11 +393,11 @@ pub async fn forward_events>(client: Arc

, mut hand /// impl overseer::Subsystem for ValidationSubsystem /// where /// Ctx: overseer::SubsystemContext< -/// Message=CandidateValidationMessage, -/// AllMessages=AllMessages, -/// Signal=OverseerSignal, -/// Error=SubsystemError, -/// >, +/// Message=CandidateValidationMessage, +/// AllMessages=AllMessages, +/// Signal=OverseerSignal, +/// Error=SubsystemError, +/// >, /// { /// fn start( /// self, @@ -426,10 +425,10 @@ pub async fn forward_events>(client: Arc

, mut hand /// /// let spawner = sp_core::testing::TaskExecutor::new(); /// let (overseer, _handle) = dummy_overseer_builder(spawner, AlwaysSupportsParachains, None) -/// .unwrap() -/// .replace_candidate_validation(|_| ValidationSubsystem) -/// .build() -/// .unwrap(); +/// .unwrap() +/// .replace_candidate_validation(|_| ValidationSubsystem) +/// .build() +/// .unwrap(); /// /// let timer = Delay::new(Duration::from_millis(50)).fuse(); /// @@ -825,7 +824,8 @@ where // If there are no leaves being deactivated, we don't need to send an update. // - // Our peers will be informed about our finalized block the next time we activating/deactivating some leaf. + // Our peers will be informed about our finalized block the next time we + // activating/deactivating some leaf. if !update.is_empty() { self.broadcast_signal(OverseerSignal::ActiveLeaves(update)).await?; } diff --git a/node/primitives/src/disputes/message.rs b/node/primitives/src/disputes/message.rs index 992d70ba1324..89d3ea6c0af9 100644 --- a/node/primitives/src/disputes/message.rs +++ b/node/primitives/src/disputes/message.rs @@ -105,8 +105,8 @@ impl DisputeMessage { /// - the invalid statement is indeed an invalid one /// - the valid statement is indeed a valid one /// - The passed `CandidateReceipt` has the correct hash (as signed in the statements). - /// - the given validator indices match with the given `ValidatorId`s in the statements, - /// given a `SessionInfo`. + /// - the given validator indices match with the given `ValidatorId`s in the statements, given a + /// `SessionInfo`. /// /// We don't check whether the given `SessionInfo` matches the `SessionIndex` in the /// statements, because we can't without doing a runtime query. Nevertheless this smart diff --git a/node/primitives/src/disputes/status.rs b/node/primitives/src/disputes/status.rs index 309225edc94b..d93c3ec846ce 100644 --- a/node/primitives/src/disputes/status.rs +++ b/node/primitives/src/disputes/status.rs @@ -16,7 +16,8 @@ use parity_scale_codec::{Decode, Encode}; -/// Timestamp based on the 1 Jan 1970 UNIX base, which is persistent across node restarts and OS reboots. +/// Timestamp based on the 1 Jan 1970 UNIX base, which is persistent across node restarts and OS +/// reboots. pub type Timestamp = u64; /// The status of dispute. @@ -88,8 +89,8 @@ impl DisputeStatus { } } - /// Transition the status to a new status after observing the dispute has concluded for the candidate. - /// This may be a no-op if the status was already concluded. + /// Transition the status to a new status after observing the dispute has concluded for the + /// candidate. This may be a no-op if the status was already concluded. pub fn conclude_for(self, now: Timestamp) -> DisputeStatus { match self { DisputeStatus::Active | DisputeStatus::Confirmed => DisputeStatus::ConcludedFor(now), @@ -98,8 +99,8 @@ impl DisputeStatus { } } - /// Transition the status to a new status after observing the dispute has concluded against the candidate. - /// This may be a no-op if the status was already concluded. + /// Transition the status to a new status after observing the dispute has concluded against the + /// candidate. This may be a no-op if the status was already concluded. pub fn conclude_against(self, now: Timestamp) -> DisputeStatus { match self { DisputeStatus::Active | DisputeStatus::Confirmed => diff --git a/node/primitives/src/lib.rs b/node/primitives/src/lib.rs index 1177dbc17caa..d49cd806d54e 100644 --- a/node/primitives/src/lib.rs +++ b/node/primitives/src/lib.rs @@ -180,8 +180,8 @@ impl std::fmt::Debug for Statement { impl Statement { /// Get the candidate hash referenced by this statement. /// - /// If this is a `Statement::Seconded`, this does hash the candidate receipt, which may be expensive - /// for large candidates. + /// If this is a `Statement::Seconded`, this does hash the candidate receipt, which may be + /// expensive for large candidates. pub fn candidate_hash(&self) -> CandidateHash { match *self { Statement::Valid(ref h) => *h, @@ -215,8 +215,8 @@ impl EncodeAs for Statement { /// /// Signing context and validator set should be apparent from context. /// -/// This statement is "full" in the sense that the `Seconded` variant includes the candidate receipt. -/// Only the compact `SignedStatement` is suitable for submission to the chain. +/// This statement is "full" in the sense that the `Seconded` variant includes the candidate +/// receipt. Only the compact `SignedStatement` is suitable for submission to the chain. pub type SignedFullStatement = Signed; /// Variant of `SignedFullStatement` where the signature has not yet been verified. @@ -256,8 +256,8 @@ pub enum InvalidCandidate { /// Result of the validation of the candidate. #[derive(Debug)] pub enum ValidationResult { - /// Candidate is valid. The validation process yields these outputs and the persisted validation - /// data used to form inputs. + /// Candidate is valid. The validation process yields these outputs and the persisted + /// validation data used to form inputs. Valid(CandidateCommitments, PersistedValidationData), /// Candidate is invalid. Invalid(InvalidCandidate), @@ -321,7 +321,8 @@ pub struct Collation { pub proof_of_validity: MaybeCompressedPoV, /// The number of messages processed from the DMQ. pub processed_downward_messages: u32, - /// The mark which specifies the block number up to which all inbound HRMP messages are processed. + /// The mark which specifies the block number up to which all inbound HRMP messages are + /// processed. pub hrmp_watermark: BlockNumber, } @@ -344,9 +345,9 @@ pub struct CollationResult { pub collation: Collation, /// An optional result sender that should be informed about a successfully seconded collation. /// - /// There is no guarantee that this sender is informed ever about any result, it is completely okay to just drop it. - /// However, if it is called, it should be called with the signed statement of a parachain validator seconding the - /// collation. + /// There is no guarantee that this sender is informed ever about any result, it is completely + /// okay to just drop it. However, if it is called, it should be called with the signed + /// statement of a parachain validator seconding the collation. pub result_sender: Option>, } @@ -362,8 +363,9 @@ impl CollationResult { /// Collation function. /// -/// Will be called with the hash of the relay chain block the parachain block should be build on and the -/// [`ValidationData`] that provides information about the state of the parachain on the relay chain. +/// Will be called with the hash of the relay chain block the parachain block should be build on and +/// the [`ValidationData`] that provides information about the state of the parachain on the relay +/// chain. /// /// Returns an optional [`CollationResult`]. #[cfg(not(target_os = "unknown"))] diff --git a/node/service/src/chain_spec.rs b/node/service/src/chain_spec.rs index a9e6b45f3b2d..7aabfa6e9185 100644 --- a/node/service/src/chain_spec.rs +++ b/node/service/src/chain_spec.rs @@ -529,11 +529,12 @@ fn kusama_staging_testnet_config_genesis(wasm_binary: &[u8]) -> kusama::RuntimeG hex!["12b782529c22032ed4694e0f6e7d486be7daa6d12088f6bc74d593b3900b8438"].into(), ]; - // for i in 1 2 3 4; do for j in stash controller; do subkey inspect "$SECRET//$i//$j"; done; done - // for i in 1 2 3 4; do for j in babe; do subkey --sr25519 inspect "$SECRET//$i//$j"; done; done - // for i in 1 2 3 4; do for j in grandpa; do subkey --ed25519 inspect "$SECRET//$i//$j"; done; done - // for i in 1 2 3 4; do for j in im_online; do subkey --sr25519 inspect "$SECRET//$i//$j"; done; done - // for i in 1 2 3 4; do for j in para_validator para_assignment; do subkey --sr25519 inspect "$SECRET//$i//$j"; done; done + // for i in 1 2 3 4; do for j in stash controller; do subkey inspect "$SECRET//$i//$j"; done; + // done for i in 1 2 3 4; do for j in babe; do subkey --sr25519 inspect "$SECRET//$i//$j"; done; + // done for i in 1 2 3 4; do for j in grandpa; do subkey --ed25519 inspect "$SECRET//$i//$j"; + // done; done for i in 1 2 3 4; do for j in im_online; do subkey --sr25519 inspect + // "$SECRET//$i//$j"; done; done for i in 1 2 3 4; do for j in para_validator para_assignment; + // do subkey --sr25519 inspect "$SECRET//$i//$j"; done; done let initial_authorities: Vec<( AccountId, AccountId, diff --git a/node/service/src/fake_runtime_api.rs b/node/service/src/fake_runtime_api.rs index b322114cbb75..d9553afa024b 100644 --- a/node/service/src/fake_runtime_api.rs +++ b/node/service/src/fake_runtime_api.rs @@ -16,7 +16,8 @@ //! Provides "fake" runtime API implementations //! -//! These are used to provide a type that implements these runtime APIs without requiring to import the native runtimes. +//! These are used to provide a type that implements these runtime APIs without requiring to import +//! the native runtimes. use beefy_primitives::ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; use grandpa_primitives::AuthorityId as GrandpaId; diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index fa8cb8ec77f7..4dda57110825 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -696,9 +696,10 @@ pub const AVAILABILITY_CONFIG: AvailabilityConfig = AvailabilityConfig { /// This is an advanced feature and not recommended for general use. Generally, `build_full` is /// a better choice. /// -/// `overseer_enable_anyways` always enables the overseer, based on the provided `OverseerGenerator`, -/// regardless of the role the node has. The relay chain selection (longest or disputes-aware) is -/// still determined based on the role of the node. Likewise for authority discovery. +/// `overseer_enable_anyways` always enables the overseer, based on the provided +/// `OverseerGenerator`, regardless of the role the node has. The relay chain selection (longest or +/// disputes-aware) is still determined based on the role of the node. Likewise for authority +/// discovery. /// /// `workers_path` is used to get the path to the directory where auxiliary worker binaries reside. /// If not specified, the main binary's directory is searched first, then `/usr/lib/polkadot` is @@ -1331,9 +1332,10 @@ pub fn new_chain_ops( /// The actual "flavor", aka if it will use `Polkadot`, `Rococo` or `Kusama` is determined based on /// [`IdentifyVariant`] using the chain spec. /// -/// `overseer_enable_anyways` always enables the overseer, based on the provided `OverseerGenerator`, -/// regardless of the role the node has. The relay chain selection (longest or disputes-aware) is -/// still determined based on the role of the node. Likewise for authority discovery. +/// `overseer_enable_anyways` always enables the overseer, based on the provided +/// `OverseerGenerator`, regardless of the role the node has. The relay chain selection (longest or +/// disputes-aware) is still determined based on the role of the node. Likewise for authority +/// discovery. #[cfg(feature = "full-node")] pub fn build_full( config: Configuration, diff --git a/node/service/src/relay_chain_selection.rs b/node/service/src/relay_chain_selection.rs index afc0ce320610..189073783f0d 100644 --- a/node/service/src/relay_chain_selection.rs +++ b/node/service/src/relay_chain_selection.rs @@ -472,8 +472,8 @@ where let lag = initial_leaf_number.saturating_sub(subchain_number); self.metrics.note_approval_checking_finality_lag(lag); - // Messages sent to `approval-distrbution` are known to have high `ToF`, we need to spawn a task for sending - // the message to not block here and delay finality. + // Messages sent to `approval-distrbution` are known to have high `ToF`, we need to spawn a + // task for sending the message to not block here and delay finality. if let Some(spawn_handle) = &self.spawn_handle { let mut overseer_handle = self.overseer.clone(); let lag_update_task = async move { @@ -537,9 +537,10 @@ where error = ?e, "Call to `DetermineUndisputedChain` failed", ); - // We need to return a sane finality target. But, we are unable to ensure we are not - // finalizing something that is being disputed or has been concluded as invalid. We will be - // conservative here and not vote for finality above the ancestor passed in. + // We need to return a sane finality target. But, we are unable to ensure we + // are not finalizing something that is being disputed or has been concluded + // as invalid. We will be conservative here and not vote for finality above + // the ancestor passed in. return Ok(target_hash) }, }; diff --git a/node/service/src/tests.rs b/node/service/src/tests.rs index 424af4d22a26..95d5765bad45 100644 --- a/node/service/src/tests.rs +++ b/node/service/src/tests.rs @@ -498,8 +498,8 @@ struct CaseVars { /// ```raw /// genesis -- 0xA1 --- 0xA2 --- 0xA3 --- 0xA4(!avail) --- 0xA5(!avail) -/// \ -/// `- 0xB2 +/// \ +/// `- 0xB2 /// ``` fn chain_undisputed() -> CaseVars { let head: Hash = ChainBuilder::GENESIS_HASH; @@ -529,8 +529,8 @@ fn chain_undisputed() -> CaseVars { /// ```raw /// genesis -- 0xA1 --- 0xA2 --- 0xA3(disputed) --- 0xA4(!avail) --- 0xA5(!avail) -/// \ -/// `- 0xB2 +/// \ +/// `- 0xB2 /// ``` fn chain_0() -> CaseVars { let head: Hash = ChainBuilder::GENESIS_HASH; @@ -560,8 +560,8 @@ fn chain_0() -> CaseVars { /// ```raw /// genesis -- 0xA1 --- 0xA2(disputed) --- 0xA3 -/// \ -/// `- 0xB2 --- 0xB3(!available) +/// \ +/// `- 0xB2 --- 0xB3(!available) /// ``` fn chain_1() -> CaseVars { let head: Hash = ChainBuilder::GENESIS_HASH; @@ -588,8 +588,8 @@ fn chain_1() -> CaseVars { /// ```raw /// genesis -- 0xA1 --- 0xA2(disputed) --- 0xA3 -/// \ -/// `- 0xB2 --- 0xB3 +/// \ +/// `- 0xB2 --- 0xB3 /// ``` fn chain_2() -> CaseVars { let head: Hash = ChainBuilder::GENESIS_HASH; @@ -616,8 +616,8 @@ fn chain_2() -> CaseVars { /// ```raw /// genesis -- 0xA1 --- 0xA2 --- 0xA3(disputed) -/// \ -/// `- 0xB2 --- 0xB3 +/// \ +/// `- 0xB2 --- 0xB3 /// ``` fn chain_3() -> CaseVars { let head: Hash = ChainBuilder::GENESIS_HASH; @@ -644,10 +644,10 @@ fn chain_3() -> CaseVars { /// ```raw /// genesis -- 0xA1 --- 0xA2 --- 0xA3(disputed) -/// \ -/// `- 0xB2 --- 0xB3 +/// \ +/// `- 0xB2 --- 0xB3 /// -/// ? --- NEX(does_not_exist) +/// ? --- NEX(does_not_exist) /// ``` fn chain_4() -> CaseVars { let head: Hash = ChainBuilder::GENESIS_HASH; diff --git a/node/subsystem-test-helpers/src/lib.rs b/node/subsystem-test-helpers/src/lib.rs index 4170f22c5b86..fb908278aa7d 100644 --- a/node/subsystem-test-helpers/src/lib.rs +++ b/node/subsystem-test-helpers/src/lib.rs @@ -310,7 +310,8 @@ pub fn make_buffered_subsystem_context( /// Test a subsystem, mocking the overseer /// -/// Pass in two async closures: one mocks the overseer, the other runs the test from the perspective of a subsystem. +/// Pass in two async closures: one mocks the overseer, the other runs the test from the perspective +/// of a subsystem. /// /// Times out in 5 seconds. pub fn subsystem_test_harness( diff --git a/node/subsystem-types/src/lib.rs b/node/subsystem-types/src/lib.rs index 88c7165bcd80..f438a09592c1 100644 --- a/node/subsystem-types/src/lib.rs +++ b/node/subsystem-types/src/lib.rs @@ -82,8 +82,8 @@ pub struct ActivatedLeaf { pub status: LeafStatus, /// An associated [`jaeger::Span`]. /// - /// NOTE: Each span should only be kept active as long as the leaf is considered active and should be dropped - /// when the leaf is deactivated. + /// NOTE: Each span should only be kept active as long as the leaf is considered active and + /// should be dropped when the leaf is deactivated. pub span: Arc, } diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 8f2e3375b6f1..d5dcea7a2565 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -16,8 +16,8 @@ //! Message types for the overseer and subsystems. //! -//! These messages are intended to define the protocol by which different subsystems communicate with each -//! other and signals that they receive from an overseer to coordinate their work. +//! These messages are intended to define the protocol by which different subsystems communicate +//! with each other and signals that they receive from an overseer to coordinate their work. //! This is intended for use with the `polkadot-overseer` crate. //! //! Subsystems' APIs are defined separately from their implementation, leading to easier mocking. @@ -62,12 +62,13 @@ pub enum CandidateBackingMessage { /// Requests a set of backable candidates that could be backed in a child of the given /// relay-parent, referenced by its hash. GetBackedCandidates(Hash, Vec, oneshot::Sender>), - /// Note that the Candidate Backing subsystem should second the given candidate in the context of the - /// given relay-parent (ref. by hash). This candidate must be validated. + /// Note that the Candidate Backing subsystem should second the given candidate in the context + /// of the given relay-parent (ref. by hash). This candidate must be validated. Second(Hash, CandidateReceipt, PoV), - /// Note a validator's statement about a particular candidate. Disagreements about validity must be escalated - /// to a broader check by the Disputes Subsystem, though that escalation is deferred until the approval voting - /// stage to guarantee availability. Agreements are simply tallied until a quorum is reached. + /// Note a validator's statement about a particular candidate. Disagreements about validity + /// must be escalated to a broader check by the Disputes Subsystem, though that escalation is + /// deferred until the approval voting stage to guarantee availability. Agreements are simply + /// tallied until a quorum is reached. Statement(Hash, SignedFullStatement), } @@ -143,8 +144,8 @@ pub enum CandidateValidationMessage { /// Try to compile the given validation code and send back /// the outcome. /// - /// The validation code is specified by the hash and will be queried from the runtime API at the - /// given relay-parent. + /// The validation code is specified by the hash and will be queried from the runtime API at + /// the given relay-parent. PreCheck( // Relay-parent Hash, @@ -157,16 +158,16 @@ pub enum CandidateValidationMessage { #[derive(Debug, derive_more::From)] pub enum CollatorProtocolMessage { /// Signal to the collator protocol that it should connect to validators with the expectation - /// of collating on the given para. This is only expected to be called once, early on, if at all, - /// and only by the Collation Generation subsystem. As such, it will overwrite the value of - /// the previous signal. + /// of collating on the given para. This is only expected to be called once, early on, if at + /// all, and only by the Collation Generation subsystem. As such, it will overwrite the value + /// of the previous signal. /// /// This should be sent before any `DistributeCollation` message. CollateOn(ParaId), /// Provide a collation to distribute to validators with an optional result sender. /// - /// The result sender should be informed when at least one parachain validator seconded the collation. It is also - /// completely okay to just drop the sender. + /// The result sender should be informed when at least one parachain validator seconded the + /// collation. It is also completely okay to just drop the sender. DistributeCollation(CandidateReceipt, PoV, Option>), /// Report a collator as having provided an invalid collation. This should lead to disconnect /// and blacklist of the collator. @@ -174,7 +175,8 @@ pub enum CollatorProtocolMessage { /// Get a network bridge update. #[from] NetworkBridgeUpdate(NetworkBridgeEvent), - /// We recommended a particular candidate to be seconded, but it was invalid; penalize the collator. + /// We recommended a particular candidate to be seconded, but it was invalid; penalize the + /// collator. /// /// The hash is the relay parent. Invalid(Hash, CandidateReceipt), @@ -198,14 +200,15 @@ impl Default for CollatorProtocolMessage { pub enum DisputeCoordinatorMessage { /// Import statements by validators about a candidate. /// - /// The subsystem will silently discard ancient statements or sets of only dispute-specific statements for - /// candidates that are previously unknown to the subsystem. The former is simply because ancient - /// data is not relevant and the latter is as a DoS prevention mechanism. Both backing and approval - /// statements already undergo anti-DoS procedures in their respective subsystems, but statements - /// cast specifically for disputes are not necessarily relevant to any candidate the system is - /// already aware of and thus present a DoS vector. Our expectation is that nodes will notify each - /// other of disputes over the network by providing (at least) 2 conflicting statements, of which one is either - /// a backing or validation statement. + /// The subsystem will silently discard ancient statements or sets of only dispute-specific + /// statements for candidates that are previously unknown to the subsystem. The former is + /// simply because ancient data is not relevant and the latter is as a DoS prevention + /// mechanism. Both backing and approval statements already undergo anti-DoS procedures in + /// their respective subsystems, but statements cast specifically for disputes are not + /// necessarily relevant to any candidate the system is already aware of and thus present a DoS + /// vector. Our expectation is that nodes will notify each other of disputes over the network + /// by providing (at least) 2 conflicting statements, of which one is either a backing or + /// validation statement. /// /// This does not do any checking of the message signature. ImportStatements { @@ -222,16 +225,16 @@ pub enum DisputeCoordinatorMessage { /// /// This is: /// - we discarded the votes because - /// - they were ancient or otherwise invalid (result: `InvalidImport`) - /// - or we were not able to recover availability for an unknown candidate (result: + /// - they were ancient or otherwise invalid (result: `InvalidImport`) + /// - or we were not able to recover availability for an unknown candidate (result: /// `InvalidImport`) - /// - or were known already (in that case the result will still be `ValidImport`) + /// - or were known already (in that case the result will still be `ValidImport`) /// - or we recorded them because (`ValidImport`) - /// - we cast our own vote already on that dispute - /// - or we have approval votes on that candidate - /// - or other explicit votes on that candidate already recorded - /// - or recovered availability for the candidate - /// - or the imported statements are backing/approval votes, which are always accepted. + /// - we cast our own vote already on that dispute + /// - or we have approval votes on that candidate + /// - or other explicit votes on that candidate already recorded + /// - or recovered availability for the candidate + /// - or the imported statements are backing/approval votes, which are always accepted. pending_confirmation: Option>, }, /// Fetch a list of all recent disputes the coordinator is aware of. @@ -246,15 +249,17 @@ pub enum DisputeCoordinatorMessage { Vec<(SessionIndex, CandidateHash)>, oneshot::Sender>, ), - /// Sign and issue local dispute votes. A value of `true` indicates validity, and `false` invalidity. + /// Sign and issue local dispute votes. A value of `true` indicates validity, and `false` + /// invalidity. IssueLocalStatement(SessionIndex, CandidateHash, CandidateReceipt, bool), /// Determine the highest undisputed block within the given chain, based on where candidates /// were included. If even the base block should not be finalized due to a dispute, /// then `None` should be returned on the channel. /// - /// The block descriptions begin counting upwards from the block after the given `base_number`. The `base_number` - /// is typically the number of the last finalized block but may be slightly higher. This block - /// is inevitably going to be finalized so it is not accounted for by this function. + /// The block descriptions begin counting upwards from the block after the given `base_number`. + /// The `base_number` is typically the number of the last finalized block but may be slightly + /// higher. This block is inevitably going to be finalized so it is not accounted for by this + /// function. DetermineUndisputedChain { /// The lowest possible block to vote on. base: (BlockNumber, Hash), @@ -369,8 +374,8 @@ pub enum NetworkBridgeTxMessage { /// authority discovery has failed to resolve. failed: oneshot::Sender, }, - /// Alternative to `ConnectToValidators` in case you already know the `Multiaddrs` you want to be - /// connected to. + /// Alternative to `ConnectToValidators` in case you already know the `Multiaddrs` you want to + /// be connected to. ConnectToResolvedValidators { /// Each entry corresponds to the addresses of an already resolved validator. validator_addrs: Vec>, @@ -576,8 +581,8 @@ pub enum RuntimeApiRequest { OccupiedCoreAssumption, RuntimeApiSender>, ), - /// Get the persisted validation data for a particular para along with the current validation code - /// hash, matching the data hash against an expected one. + /// Get the persisted validation data for a particular para along with the current validation + /// code hash, matching the data hash against an expected one. AssumedValidationData( ParaId, Hash, @@ -595,10 +600,11 @@ pub enum RuntimeApiRequest { /// will inform on how the validation data should be computed if the para currently /// occupies a core. ValidationCode(ParaId, OccupiedCoreAssumption, RuntimeApiSender>), - /// Get validation code by its hash, either past, current or future code can be returned, as long as state is still - /// available. + /// Get validation code by its hash, either past, current or future code can be returned, as + /// long as state is still available. ValidationCodeByHash(ValidationCodeHash, RuntimeApiSender>), - /// Get a the candidate pending availability for a particular parachain by parachain / core index + /// Get a the candidate pending availability for a particular parachain by parachain / core + /// index CandidatePendingAvailability(ParaId, RuntimeApiSender>), /// Get all events concerning candidates (backing, inclusion, time-out) in the parent of /// the block in whose state this request is executed. @@ -623,8 +629,9 @@ pub enum RuntimeApiRequest { SubmitPvfCheckStatement(PvfCheckStatement, ValidatorSignature, RuntimeApiSender<()>), /// Returns code hashes of PVFs that require pre-checking by validators in the active set. PvfsRequirePrecheck(RuntimeApiSender>), - /// Get the validation code used by the specified para, taking the given `OccupiedCoreAssumption`, which - /// will inform on how the validation data should be computed if the para currently occupies a core. + /// Get the validation code used by the specified para, taking the given + /// `OccupiedCoreAssumption`, which will inform on how the validation data should be computed + /// if the para currently occupies a core. ValidationCodeHash( ParaId, OccupiedCoreAssumption, @@ -686,13 +693,15 @@ pub enum StatementDistributionMessage { NetworkBridgeUpdate(NetworkBridgeEvent), } -/// This data becomes intrinsics or extrinsics which should be included in a future relay chain block. +/// This data becomes intrinsics or extrinsics which should be included in a future relay chain +/// block. // It needs to be cloneable because multiple potential block authors can request copies. #[derive(Debug, Clone)] pub enum ProvisionableData { /// This bitfield indicates the availability of various candidate blocks. Bitfield(Hash, SignedAvailabilityBitfield), - /// The Candidate Backing subsystem believes that this candidate is valid, pending availability. + /// The Candidate Backing subsystem believes that this candidate is valid, pending + /// availability. BackedCandidate(CandidateReceipt), /// Misbehavior reports are self-contained proofs of validator misbehavior. MisbehaviorReport(Hash, ValidatorIndex, Misbehavior), @@ -716,11 +725,11 @@ pub struct ProvisionerInherentData { /// In all cases, the Hash is that of the relay parent. #[derive(Debug)] pub enum ProvisionerMessage { - /// This message allows external subsystems to request the set of bitfields and backed candidates - /// associated with a particular potential block hash. + /// This message allows external subsystems to request the set of bitfields and backed + /// candidates associated with a particular potential block hash. /// - /// This is expected to be used by a proposer, to inject that information into the `InherentData` - /// where it can be assembled into the `ParaInherent`. + /// This is expected to be used by a proposer, to inject that information into the + /// `InherentData` where it can be assembled into the `ParaInherent`. RequestInherentData(Hash, oneshot::Sender), /// This data should become part of a relay chain block ProvisionableData(Hash, ProvisionableData), diff --git a/node/subsystem-types/src/runtime_client.rs b/node/subsystem-types/src/runtime_client.rs index 196b928ad62b..4d8eddde73e9 100644 --- a/node/subsystem-types/src/runtime_client.rs +++ b/node/subsystem-types/src/runtime_client.rs @@ -138,7 +138,7 @@ pub trait RuntimeApiSubsystemClient { async fn on_chain_votes(&self, at: Hash) -> Result>, ApiError>; - /***** Added in v2 *****/ + /***** Added in v2 **** */ /// Get the session info for the given session, if stored. /// @@ -164,7 +164,8 @@ pub trait RuntimeApiSubsystemClient { /// NOTE: This function is only available since parachain host version 2. async fn pvfs_require_precheck(&self, at: Hash) -> Result, ApiError>; - /// Fetch the hash of the validation code used by a para, making the given `OccupiedCoreAssumption`. + /// Fetch the hash of the validation code used by a para, making the given + /// `OccupiedCoreAssumption`. /// /// NOTE: This function is only available since parachain host version 2. async fn validation_code_hash( @@ -174,7 +175,7 @@ pub trait RuntimeApiSubsystemClient { assumption: OccupiedCoreAssumption, ) -> Result, ApiError>; - /***** Added in v3 *****/ + /***** Added in v3 **** */ /// Returns all onchain disputes. /// This is a staging method! Do not use on production runtimes! diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index de869bd91f12..e0b81608ff2f 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -20,7 +20,8 @@ //! or determining what their validator ID is. These common interests are factored into //! this module. //! -//! This crate also reexports Prometheus metric types which are expected to be implemented by subsystems. +//! This crate also reexports Prometheus metric types which are expected to be implemented by +//! subsystems. #![warn(missing_docs)] @@ -60,7 +61,8 @@ pub use polkadot_node_network_protocol::MIN_GOSSIP_PEERS; pub use determine_new_blocks::determine_new_blocks; -/// These reexports are required so that external crates can use the `delegated_subsystem` macro properly. +/// These reexports are required so that external crates can use the `delegated_subsystem` macro +/// properly. pub mod reexports { pub use polkadot_overseer::gen::{SpawnedSubsystem, Spawner, Subsystem, SubsystemContext}; } @@ -367,7 +369,8 @@ pub struct Validator { } impl Validator { - /// Get a struct representing this node's validator if this node is in fact a validator in the context of the given block. + /// Get a struct representing this node's validator if this node is in fact a validator in the + /// context of the given block. pub async fn new(parent: Hash, keystore: KeystorePtr, sender: &mut S) -> Result where S: SubsystemSender, diff --git a/node/subsystem-util/src/nesting_sender.rs b/node/subsystem-util/src/nesting_sender.rs index 4417efbefb04..5d80dbf78101 100644 --- a/node/subsystem-util/src/nesting_sender.rs +++ b/node/subsystem-util/src/nesting_sender.rs @@ -33,14 +33,14 @@ //! //! This module helps with this in part. It does not break the multithreaded by default approach, //! but it breaks the `spawn everything` approach. So once you `spawn` you will still be -//! multithreaded by default, despite that for most tasks we spawn (which just wait for network or some -//! message to arrive), that is very much pointless and needless overhead. You will just spawn less in -//! the first place. +//! multithreaded by default, despite that for most tasks we spawn (which just wait for network or +//! some message to arrive), that is very much pointless and needless overhead. You will just spawn +//! less in the first place. //! //! By default your code is single threaded, except when actually needed: -//! - need to wait for long running synchronous IO (a threaded runtime is actually useful here) -//! - need to wait for some async event (message to arrive) -//! - need to do some hefty CPU bound processing (a thread is required here as well) +//! - need to wait for long running synchronous IO (a threaded runtime is actually useful here) +//! - need to wait for some async event (message to arrive) +//! - need to do some hefty CPU bound processing (a thread is required here as well) //! //! and it is not acceptable to block the main task for waiting for the result, because we actually //! really have other things to do or at least need to stay responsive just in case. @@ -48,7 +48,8 @@ //! With the types and traits in this module you can achieve exactly that: You write modules which //! just execute logic and can call into the functions of other modules - yes we are calling normal //! functions. For the case a module you are calling into requires an occasional background task, -//! you provide it with a `NestingSender` that it can pass to any spawned tasks. +//! you provide it with a `NestingSender` that it can pass to any spawned +//! tasks. //! //! This way you don't have to spawn a task for each module just for it to be able to handle //! asynchronous events. The module relies on the using/enclosing code/module to forward it any @@ -65,9 +66,9 @@ //! Because the wrapping is optional and transparent to the lower modules, each module can also be //! used at the top directly without any wrapping, e.g. for standalone use or for testing purposes. //! -//! Checkout the documentation of [`NestingSender`][nesting_sender::NestingSender] below for a basic usage example. For a real -//! world usage I would like to point you to the dispute-distribution subsystem which makes use of -//! this architecture. +//! Checkout the documentation of [`NestingSender`][nesting_sender::NestingSender] below for a basic +//! usage example. For a real world usage I would like to point you to the dispute-distribution +//! subsystem which makes use of this architecture. //! //! ## Limitations //! diff --git a/node/subsystem-util/src/reputation.rs b/node/subsystem-util/src/reputation.rs index 09c00bb4688a..89e3eb64df9b 100644 --- a/node/subsystem-util/src/reputation.rs +++ b/node/subsystem-util/src/reputation.rs @@ -48,7 +48,8 @@ impl ReputationAggregator { /// /// * `send_immediately_if` - A function, takes `UnifiedReputationChange`, /// results shows if we need to send the changes right away. - /// By default, it is used for sending `UnifiedReputationChange::Malicious` changes immediately and for testing. + /// By default, it is used for sending `UnifiedReputationChange::Malicious` changes immediately + /// and for testing. pub fn new(send_immediately_if: fn(UnifiedReputationChange) -> bool) -> Self { Self { by_peer: Default::default(), send_immediately_if } } diff --git a/node/test/client/src/block_builder.rs b/node/test/client/src/block_builder.rs index 88160e782a70..0987cef55c1f 100644 --- a/node/test/client/src/block_builder.rs +++ b/node/test/client/src/block_builder.rs @@ -32,15 +32,16 @@ use sp_state_machine::BasicExternalities; pub trait InitPolkadotBlockBuilder { /// Init a Polkadot specific block builder that works for the test runtime. /// - /// This will automatically create and push the inherents for you to make the block valid for the test runtime. + /// This will automatically create and push the inherents for you to make the block valid for + /// the test runtime. fn init_polkadot_block_builder( &self, ) -> sc_block_builder::BlockBuilder; /// Init a Polkadot specific block builder at a specific block that works for the test runtime. /// - /// Same as [`InitPolkadotBlockBuilder::init_polkadot_block_builder`] besides that it takes a [`BlockId`] to say - /// which should be the parent block of the block that is being build. + /// Same as [`InitPolkadotBlockBuilder::init_polkadot_block_builder`] besides that it takes a + /// [`BlockId`] to say which should be the parent block of the block that is being build. fn init_polkadot_block_builder_at( &self, hash: ::Hash, @@ -60,7 +61,8 @@ impl InitPolkadotBlockBuilder for Client { let last_timestamp = self.runtime_api().get_last_timestamp(hash).expect("Get last timestamp"); - // `MinimumPeriod` is a storage parameter type that requires externalities to access the value. + // `MinimumPeriod` is a storage parameter type that requires externalities to access the + // value. let minimum_period = BasicExternalities::new_empty() .execute_with(|| polkadot_test_runtime::MinimumPeriod::get()); @@ -73,7 +75,8 @@ impl InitPolkadotBlockBuilder for Client { last_timestamp + minimum_period }; - // `SlotDuration` is a storage parameter type that requires externalities to access the value. + // `SlotDuration` is a storage parameter type that requires externalities to access the + // value. let slot_duration = BasicExternalities::new_empty() .execute_with(|| polkadot_test_runtime::SlotDuration::get()); @@ -130,9 +133,9 @@ impl InitPolkadotBlockBuilder for Client { pub trait BlockBuilderExt { /// Push a Polkadot test runtime specific extrinsic to the block. /// - /// This will internally use the [`BlockBuilder::push`] method, but this method expects a opaque extrinsic. So, - /// we provide this wrapper which converts a test runtime specific extrinsic to a opaque extrinsic and pushes it to - /// the block. + /// This will internally use the [`BlockBuilder::push`] method, but this method expects a opaque + /// extrinsic. So, we provide this wrapper which converts a test runtime specific extrinsic to a + /// opaque extrinsic and pushes it to the block. /// /// Returns the result of the application of the extrinsic. fn push_polkadot_extrinsic( diff --git a/node/test/service/src/lib.rs b/node/test/service/src/lib.rs index a2c1b1941003..ed25d28d2925 100644 --- a/node/test/service/src/lib.rs +++ b/node/test/service/src/lib.rs @@ -257,7 +257,8 @@ pub struct PolkadotTestNode { pub client: Arc, /// A handle to Overseer. pub overseer_handle: Handle, - /// The `MultiaddrWithPeerId` to this node. This is useful if you want to pass it as "boot node" to other nodes. + /// The `MultiaddrWithPeerId` to this node. This is useful if you want to pass it as "boot + /// node" to other nodes. pub addr: MultiaddrWithPeerId, /// `RPCHandlers` to make RPC queries. pub rpc_handlers: RpcHandlers, @@ -312,14 +313,15 @@ impl PolkadotTestNode { self.send_sudo(call, Sr25519Keyring::Alice, 1).await } - /// Wait for `count` blocks to be imported in the node and then exit. This function will not return if no blocks - /// are ever created, thus you should restrict the maximum amount of time of the test execution. + /// Wait for `count` blocks to be imported in the node and then exit. This function will not + /// return if no blocks are ever created, thus you should restrict the maximum amount of time of + /// the test execution. pub fn wait_for_blocks(&self, count: usize) -> impl Future { self.client.wait_for_blocks(count) } - /// Wait for `count` blocks to be finalized and then exit. Similarly with `wait_for_blocks` this function will - /// not return if no block are ever finalized. + /// Wait for `count` blocks to be finalized and then exit. Similarly with `wait_for_blocks` this + /// function will not return if no block are ever finalized. pub async fn wait_for_finalized_blocks(&self, count: usize) { let mut import_notification_stream = self.client.finality_notification_stream(); let mut blocks = HashSet::new(); diff --git a/parachain/src/primitives.rs b/parachain/src/primitives.rs index 18da89aa97a1..55577618c469 100644 --- a/parachain/src/primitives.rs +++ b/parachain/src/primitives.rs @@ -287,13 +287,13 @@ impl IsSystem for Sibling { } } -/// A type that uniquely identifies an HRMP channel. An HRMP channel is established between two paras. -/// In text, we use the notation `(A, B)` to specify a channel between A and B. The channels are -/// unidirectional, meaning that `(A, B)` and `(B, A)` refer to different channels. The convention is -/// that we use the first item tuple for the sender and the second for the recipient. Only one channel -/// is allowed between two participants in one direction, i.e. there cannot be 2 different channels -/// identified by `(A, B)`. A channel with the same para id in sender and recipient is invalid. That -/// is, however, not enforced. +/// A type that uniquely identifies an HRMP channel. An HRMP channel is established between two +/// paras. In text, we use the notation `(A, B)` to specify a channel between A and B. The channels +/// are unidirectional, meaning that `(A, B)` and `(B, A)` refer to different channels. The +/// convention is that we use the first item tuple for the sender and the second for the recipient. +/// Only one channel is allowed between two participants in one direction, i.e. there cannot be 2 +/// different channels identified by `(A, B)`. A channel with the same para id in sender and +/// recipient is invalid. That is, however, not enforced. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Encode, Decode, RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "std", derive(Hash))] pub struct HrmpChannelId { @@ -414,6 +414,7 @@ pub struct ValidationResult { /// /// It is expected that the Parachain processes them from first to last. pub processed_downward_messages: u32, - /// The mark which specifies the block number up to which all inbound HRMP messages are processed. + /// The mark which specifies the block number up to which all inbound HRMP messages are + /// processed. pub hrmp_watermark: RelayChainBlockNumber, } diff --git a/parachain/test-parachains/adder/collator/src/lib.rs b/parachain/test-parachains/adder/collator/src/lib.rs index 02a4598f9e47..1ac561dda2ba 100644 --- a/parachain/test-parachains/adder/collator/src/lib.rs +++ b/parachain/test-parachains/adder/collator/src/lib.rs @@ -147,7 +147,8 @@ impl Collator { /// Create the collation function. /// - /// This collation function can be plugged into the overseer to generate collations for the adder parachain. + /// This collation function can be plugged into the overseer to generate collations for the + /// adder parachain. pub fn create_collation_function( &self, spawner: impl SpawnNamed + Clone + 'static, @@ -228,8 +229,9 @@ impl Collator { /// Wait until `seconded` collations of this collator are seconded by a parachain validator. /// - /// The internal counter isn't de-duplicating the collations when counting the number of seconded collations. This - /// means when one collation is seconded by X validators, we record X seconded messages. + /// The internal counter isn't de-duplicating the collations when counting the number of + /// seconded collations. This means when one collation is seconded by X validators, we record X + /// seconded messages. pub async fn wait_for_seconded_collations(&self, seconded: u32) { let seconded_collations = self.seconded_collations.clone(); loop { diff --git a/parachain/test-parachains/adder/collator/tests/integration.rs b/parachain/test-parachains/adder/collator/tests/integration.rs index 9ab1c0c337a6..b891b29db59c 100644 --- a/parachain/test-parachains/adder/collator/tests/integration.rs +++ b/parachain/test-parachains/adder/collator/tests/integration.rs @@ -19,7 +19,8 @@ const PUPPET_EXE: &str = env!("CARGO_BIN_EXE_adder_collator_puppet_worker"); -// If this test is failing, make sure to run all tests with the `real-overseer` feature being enabled. +// If this test is failing, make sure to run all tests with the `real-overseer` feature being +// enabled. #[substrate_test_utils::test(flavor = "multi_thread")] async fn collating_using_adder_collator() { diff --git a/parachain/test-parachains/undying/collator/src/lib.rs b/parachain/test-parachains/undying/collator/src/lib.rs index 838590fa16f5..cc0f592dc253 100644 --- a/parachain/test-parachains/undying/collator/src/lib.rs +++ b/parachain/test-parachains/undying/collator/src/lib.rs @@ -221,7 +221,8 @@ impl Collator { /// Create the collation function. /// - /// This collation function can be plugged into the overseer to generate collations for the undying parachain. + /// This collation function can be plugged into the overseer to generate collations for the + /// undying parachain. pub fn create_collation_function( &self, spawner: impl SpawnNamed + Clone + 'static, @@ -309,8 +310,9 @@ impl Collator { /// Wait until `seconded` collations of this collator are seconded by a parachain validator. /// - /// The internal counter isn't de-duplicating the collations when counting the number of seconded collations. This - /// means when one collation is seconded by X validators, we record X seconded messages. + /// The internal counter isn't de-duplicating the collations when counting the number of + /// seconded collations. This means when one collation is seconded by X validators, we record X + /// seconded messages. pub async fn wait_for_seconded_collations(&self, seconded: u32) { let seconded_collations = self.seconded_collations.clone(); loop { diff --git a/parachain/test-parachains/undying/collator/tests/integration.rs b/parachain/test-parachains/undying/collator/tests/integration.rs index 8ca6eec9aa62..21d174fb06c7 100644 --- a/parachain/test-parachains/undying/collator/tests/integration.rs +++ b/parachain/test-parachains/undying/collator/tests/integration.rs @@ -19,7 +19,8 @@ const PUPPET_EXE: &str = env!("CARGO_BIN_EXE_undying_collator_puppet_worker"); -// If this test is failing, make sure to run all tests with the `real-overseer` feature being enabled. +// If this test is failing, make sure to run all tests with the `real-overseer` feature being +// enabled. #[substrate_test_utils::test(flavor = "multi_thread")] async fn collating_using_undying_collator() { use polkadot_primitives::Id as ParaId; diff --git a/primitives/src/runtime_api.rs b/primitives/src/runtime_api.rs index ec05beea9d5f..c3a150a642e0 100644 --- a/primitives/src/runtime_api.rs +++ b/primitives/src/runtime_api.rs @@ -30,10 +30,9 @@ //! The versioning is achieved with the `api_version` attribute. It can be //! placed on: //! * trait declaration - represents the base version of the API. -//! * method declaration (inside a trait declaration) - represents a versioned -//! method, which is not available in the base version. -//! * trait implementation - represents which version of the API is being -//! implemented. +//! * method declaration (inside a trait declaration) - represents a versioned method, which is not +//! available in the base version. +//! * trait implementation - represents which version of the API is being implemented. //! //! Let's see a quick example: //! @@ -90,14 +89,14 @@ //! # How versioned methods are used for `ParachainHost` //! //! Let's introduce two types of `ParachainHost` API implementation: -//! * stable - used on stable production networks like Polkadot and Kusama. There is only one -//! stable API at a single point in time. +//! * stable - used on stable production networks like Polkadot and Kusama. There is only one stable +//! API at a single point in time. //! * staging - methods that are ready for production, but will be released on Rococo first. We can //! batch together multiple changes and then release all of them to production, by making staging //! production (bump base version). We can not change or remove any method in staging after a -//! release, as this would break Rococo. It should be ok to keep adding methods to staging -//! across several releases. For experimental methods, you have to keep them on a separate branch -//! until ready. +//! release, as this would break Rococo. It should be ok to keep adding methods to staging across +//! several releases. For experimental methods, you have to keep them on a separate branch until +//! ready. //! //! The stable version of `ParachainHost` is indicated by the base version of the API. Any staging //! method must use `api_version` attribute so that it is assigned to a specific version of a @@ -111,8 +110,8 @@ //! ``` //! indicates a function from the stable `v2` API. //! -//! All staging API functions should use primitives from `vstaging`. They should be clearly separated -//! from the stable primitives. +//! All staging API functions should use primitives from `vstaging`. They should be clearly +//! separated from the stable primitives. use crate::{ vstaging, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, diff --git a/primitives/src/v5/metrics.rs b/primitives/src/v5/metrics.rs index f947c7392dcb..97f7678e4373 100644 --- a/primitives/src/v5/metrics.rs +++ b/primitives/src/v5/metrics.rs @@ -164,8 +164,8 @@ pub mod metric_definitions { }; /// Counts the number of `imported`, `current` and `concluded_invalid` dispute statements sets - /// processed in `process_inherent_data`. The `current` label refers to the disputes statement sets of - /// the current session. + /// processed in `process_inherent_data`. The `current` label refers to the disputes statement + /// sets of the current session. pub const PARACHAIN_INHERENT_DATA_DISPUTE_SETS_PROCESSED: CounterVecDefinition = CounterVecDefinition { name: "polkadot_parachain_inherent_data_dispute_sets_processed", @@ -174,7 +174,8 @@ pub mod metric_definitions { labels: &["category"], }; - /// Counts the number of `valid` and `invalid` bitfields signature checked in `process_inherent_data`. + /// Counts the number of `valid` and `invalid` bitfields signature checked in + /// `process_inherent_data`. pub const PARACHAIN_CREATE_INHERENT_BITFIELDS_SIGNATURE_CHECKS: CounterVecDefinition = CounterVecDefinition { name: "polkadot_parachain_create_inherent_bitfields_signature_checks", @@ -183,7 +184,8 @@ pub mod metric_definitions { labels: &["validity"], }; - /// Measures how much time does it take to verify a single validator signature of a dispute statement + /// Measures how much time does it take to verify a single validator signature of a dispute + /// statement pub const PARACHAIN_VERIFY_DISPUTE_SIGNATURE: HistogramDefinition = HistogramDefinition { name: "polkadot_parachain_verify_dispute_signature", diff --git a/primitives/src/v5/mod.rs b/primitives/src/v5/mod.rs index 3498c0762d4c..bdd10e623190 100644 --- a/primitives/src/v5/mod.rs +++ b/primitives/src/v5/mod.rs @@ -103,7 +103,8 @@ pub trait TypeIndex { fn type_index(&self) -> usize; } -/// Index of the validator is used as a lightweight replacement of the `ValidatorId` when appropriate. +/// Index of the validator is used as a lightweight replacement of the `ValidatorId` when +/// appropriate. #[derive(Eq, Ord, PartialEq, PartialOrd, Copy, Clone, Encode, Decode, TypeInfo, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash))] pub struct ValidatorIndex(pub u32); @@ -589,25 +590,27 @@ impl Ord for CommittedCandidateReceipt { } } -/// The validation data provides information about how to create the inputs for validation of a candidate. -/// This information is derived from the chain state and will vary from para to para, although some -/// fields may be the same for every para. +/// The validation data provides information about how to create the inputs for validation of a +/// candidate. This information is derived from the chain state and will vary from para to para, +/// although some fields may be the same for every para. /// -/// Since this data is used to form inputs to the validation function, it needs to be persisted by the -/// availability system to avoid dependence on availability of the relay-chain state. +/// Since this data is used to form inputs to the validation function, it needs to be persisted by +/// the availability system to avoid dependence on availability of the relay-chain state. /// -/// Furthermore, the validation data acts as a way to authorize the additional data the collator needs -/// to pass to the validation function. For example, the validation function can check whether the incoming -/// messages (e.g. downward messages) were actually sent by using the data provided in the validation data -/// using so called MQC heads. +/// Furthermore, the validation data acts as a way to authorize the additional data the collator +/// needs to pass to the validation function. For example, the validation function can check whether +/// the incoming messages (e.g. downward messages) were actually sent by using the data provided in +/// the validation data using so called MQC heads. /// -/// Since the commitments of the validation function are checked by the relay-chain, secondary checkers -/// can rely on the invariant that the relay-chain only includes para-blocks for which these checks have -/// already been done. As such, there is no need for the validation data used to inform validators and -/// collators about the checks the relay-chain will perform to be persisted by the availability system. +/// Since the commitments of the validation function are checked by the relay-chain, secondary +/// checkers can rely on the invariant that the relay-chain only includes para-blocks for which +/// these checks have already been done. As such, there is no need for the validation data used to +/// inform validators and collators about the checks the relay-chain will perform to be persisted by +/// the availability system. /// -/// The `PersistedValidationData` should be relatively lightweight primarily because it is constructed -/// during inclusion for each candidate and therefore lies on the critical path of inclusion. +/// The `PersistedValidationData` should be relatively lightweight primarily because it is +/// constructed during inclusion for each candidate and therefore lies on the critical path of +/// inclusion. #[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Default))] pub struct PersistedValidationData { @@ -642,7 +645,8 @@ pub struct CandidateCommitments { pub head_data: HeadData, /// The number of messages processed from the DMQ. pub processed_downward_messages: u32, - /// The mark which specifies the block number up to which all inbound HRMP messages are processed. + /// The mark which specifies the block number up to which all inbound HRMP messages are + /// processed. pub hrmp_watermark: N, } @@ -677,7 +681,8 @@ pub type UncheckedSignedAvailabilityBitfield = UncheckedSigned; -/// A set of unchecked signed availability bitfields. Should be sorted by validator index, ascending. +/// A set of unchecked signed availability bitfields. Should be sorted by validator index, +/// ascending. pub type UncheckedSignedAvailabilityBitfields = Vec; /// A backed (or backable, depending on context) candidate. @@ -975,8 +980,9 @@ pub enum CoreState { /// variant. #[codec(index = 1)] Scheduled(ScheduledCore), - /// The core is currently free and there is nothing scheduled. This can be the case for parathread - /// cores when there are no parathread blocks queued. Parachain cores will never be left idle. + /// The core is currently free and there is nothing scheduled. This can be the case for + /// parathread cores when there are no parathread blocks queued. Parachain cores will never be + /// left idle. #[codec(index = 2)] Free, } @@ -1079,8 +1085,8 @@ impl From for u8 { } } -/// Abridged version of `HostConfiguration` (from the `Configuration` parachains host runtime module) -/// meant to be used by a parachain or PDK such as cumulus. +/// Abridged version of `HostConfiguration` (from the `Configuration` parachains host runtime +/// module) meant to be used by a parachain or PDK such as cumulus. #[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "std", derive(PartialEq))] pub struct AbridgedHostConfiguration { @@ -1156,17 +1162,18 @@ pub enum UpgradeRestriction { #[derive(Copy, Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] pub enum UpgradeGoAhead { /// Abort the upgrade process. There is something wrong with the validation code previously - /// submitted by the parachain. This variant can also be used to prevent upgrades by the governance - /// should an emergency emerge. + /// submitted by the parachain. This variant can also be used to prevent upgrades by the + /// governance should an emergency emerge. /// /// The expected reaction on this variant is that the parachain will admit this message and /// remove all the data about the pending upgrade. Depending on the nature of the problem (to - /// be examined offchain for now), it can try to send another validation code or just retry later. + /// be examined offchain for now), it can try to send another validation code or just retry + /// later. #[codec(index = 0)] Abort, - /// Apply the pending code change. The parablock that is built on a relay-parent that is descendant - /// of the relay-parent where the parachain observed this signal must use the upgraded validation - /// code. + /// Apply the pending code change. The parablock that is built on a relay-parent that is + /// descendant of the relay-parent where the parachain observed this signal must use the + /// upgraded validation code. #[codec(index = 1)] GoAhead, } @@ -1646,7 +1653,7 @@ pub const fn supermajority_threshold(n: usize) -> usize { #[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "std", derive(PartialEq))] pub struct SessionInfo { - /****** New in v2 *******/ + /****** New in v2 ****** */ /// All the validators actively participating in parachain consensus. /// Indices are into the broader validator set. pub active_validator_indices: Vec, @@ -1655,11 +1662,11 @@ pub struct SessionInfo { /// The amount of sessions to keep for disputes. pub dispute_period: SessionIndex, - /****** Old fields ******/ + /****** Old fields ***** */ /// Validators in canonical ordering. /// - /// NOTE: There might be more authorities in the current session, than `validators` participating - /// in parachain consensus. See + /// NOTE: There might be more authorities in the current session, than `validators` + /// participating in parachain consensus. See /// [`max_validators`](https://github.com/paritytech/polkadot/blob/a52dca2be7840b23c19c153cf7e110b1e3e475f8/runtime/parachains/src/configuration.rs#L148). /// /// `SessionInfo::validators` will be limited to to `max_validators` when set. @@ -1667,8 +1674,8 @@ pub struct SessionInfo { /// Validators' authority discovery keys for the session in canonical ordering. /// /// NOTE: The first `validators.len()` entries will match the corresponding validators in - /// `validators`, afterwards any remaining authorities can be found. This is any authorities not - /// participating in parachain consensus - see + /// `validators`, afterwards any remaining authorities can be found. This is any authorities + /// not participating in parachain consensus - see /// [`max_validators`](https://github.com/paritytech/polkadot/blob/a52dca2be7840b23c19c153cf7e110b1e3e475f8/runtime/parachains/src/configuration.rs#L148) pub discovery_keys: Vec, /// The assignment keys for validators. @@ -1679,8 +1686,8 @@ pub struct SessionInfo { /// /// Therefore: /// ```ignore - /// assignment_keys.len() == validators.len() && validators.len() <= discovery_keys.len() - /// ``` + /// assignment_keys.len() == validators.len() && validators.len() <= discovery_keys.len() + /// ``` pub assignment_keys: Vec, /// Validators in shuffled ordering - these are the validator groups as produced /// by the `Scheduler` module for the session and are typically referred to by diff --git a/primitives/test-helpers/src/lib.rs b/primitives/test-helpers/src/lib.rs index ac7af5b5fa7d..a8fc0f7ccc26 100644 --- a/primitives/test-helpers/src/lib.rs +++ b/primitives/test-helpers/src/lib.rs @@ -17,7 +17,8 @@ #![forbid(unused_crate_dependencies)] #![forbid(unused_extern_crates)] -//! A set of primitive constructors, to aid in crafting meaningful testcase while reducing repetition. +//! A set of primitive constructors, to aid in crafting meaningful testcase while reducing +//! repetition. //! //! Note that `dummy_` prefixed values are meant to be fillers, that should not matter, and will //! contain randomness based data. diff --git a/runtime/common/slot_range_helper/src/lib.rs b/runtime/common/slot_range_helper/src/lib.rs index 626232032fbd..bbe5b61ae1f3 100644 --- a/runtime/common/slot_range_helper/src/lib.rs +++ b/runtime/common/slot_range_helper/src/lib.rs @@ -36,15 +36,15 @@ pub use sp_std::{ops::Add, result}; /// /// This will generate an enum `SlotRange` with the following properties: /// -/// * Enum variants will range from all consecutive combinations of inputs, i.e. -/// `ZeroZero`, `ZeroOne`, `ZeroTwo`, `ZeroThree`, `OneOne`, `OneTwo`, `OneThree`... +/// * Enum variants will range from all consecutive combinations of inputs, i.e. `ZeroZero`, +/// `ZeroOne`, `ZeroTwo`, `ZeroThree`, `OneOne`, `OneTwo`, `OneThree`... /// * A constant `LEASE_PERIODS_PER_SLOT` will count the number of lease periods. /// * A constant `SLOT_RANGE_COUNT` will count the total number of enum variants. /// * A function `as_pair` will return a tuple representation of the `SlotRange`. /// * A function `intersects` will tell you if two slot ranges intersect with one another. /// * A function `len` will tell you the length of occupying a `SlotRange`. -/// * A function `new_bounded` will generate a `SlotRange` from an input of the current -/// lease period, the starting lease period, and the final lease period. +/// * A function `new_bounded` will generate a `SlotRange` from an input of the current lease +/// period, the starting lease period, and the final lease period. #[macro_export] macro_rules! generate_slot_range{ // Entry point diff --git a/runtime/common/src/assigned_slots.rs b/runtime/common/src/assigned_slots.rs index 4424738c9835..b3c1381c9ec9 100644 --- a/runtime/common/src/assigned_slots.rs +++ b/runtime/common/src/assigned_slots.rs @@ -322,7 +322,8 @@ pub mod pallet { }, Err(err) => { // Treat failed lease creation as warning .. slot will be allocated a lease - // in a subsequent lease period by the `allocate_temporary_slot_leases` function. + // in a subsequent lease period by the `allocate_temporary_slot_leases` + // function. log::warn!(target: "assigned_slots", "Failed to allocate a temp slot for para {:?} at period {:?}: {:?}", id, current_lease_period, err @@ -398,7 +399,8 @@ impl Pallet { /// total number of lease (lower first), and then when they last a turn (older ones first). /// If any remaining ex-aequo, we just take the para ID in ascending order as discriminator. /// - /// Assigned slots with a `period_begin` bigger than current lease period are not considered (yet). + /// Assigned slots with a `period_begin` bigger than current lease period are not considered + /// (yet). /// /// The function will call out to `Leaser::lease_out` to create the appropriate slot leases. fn allocate_temporary_slot_leases(lease_period_index: LeasePeriodOf) -> DispatchResult { @@ -525,7 +527,8 @@ impl Pallet { /// Handles start of a lease period. fn manage_lease_period_start(lease_period_index: LeasePeriodOf) -> Weight { - // Note: leases that have ended in previous lease period, should have been cleaned in slots pallet. + // Note: leases that have ended in previous lease period, should have been cleaned in slots + // pallet. if let Err(err) = Self::allocate_temporary_slot_leases(lease_period_index) { log::error!(target: "assigned_slots", "Allocating slots failed for lease period {:?}, with: {:?}", diff --git a/runtime/common/src/auctions.rs b/runtime/common/src/auctions.rs index 7ab12eec7998..901c9c27da28 100644 --- a/runtime/common/src/auctions.rs +++ b/runtime/common/src/auctions.rs @@ -138,8 +138,8 @@ pub mod pallet { Reserved { bidder: T::AccountId, extra_reserved: BalanceOf, total_amount: BalanceOf }, /// Funds were unreserved since bidder is no longer active. `[bidder, amount]` Unreserved { bidder: T::AccountId, amount: BalanceOf }, - /// Someone attempted to lease the same slot twice for a parachain. The amount is held in reserve - /// but no parachain slot has been leased. + /// Someone attempted to lease the same slot twice for a parachain. The amount is held in + /// reserve but no parachain slot has been leased. ReserveConfiscated { para_id: ParaId, leaser: T::AccountId, amount: BalanceOf }, /// A new bid has been accepted as the current winner. BidAccepted { @@ -149,7 +149,8 @@ pub mod pallet { first_slot: LeasePeriodOf, last_slot: LeasePeriodOf, }, - /// The winning offset was chosen for an auction. This will map into the `Winning` storage map. + /// The winning offset was chosen for an auction. This will map into the `Winning` storage + /// map. WinningOffset { auction_index: AuctionIndex, block_number: BlockNumberFor }, } @@ -217,9 +218,9 @@ pub mod pallet { fn on_initialize(n: BlockNumberFor) -> Weight { let mut weight = T::DbWeight::get().reads(1); - // If the current auction was in its ending period last block, then ensure that the (sub-)range - // winner information is duplicated from the previous block in case no bids happened in the - // last block. + // If the current auction was in its ending period last block, then ensure that the + // (sub-)range winner information is duplicated from the previous block in case no bids + // happened in the last block. if let AuctionStatus::EndingPeriod(offset, _sub_sample) = Self::auction_status(n) { weight = weight.saturating_add(T::DbWeight::get().reads(1)); if !Winning::::contains_key(&offset) { @@ -555,8 +556,9 @@ impl Pallet { }); let res = Winning::::get(offset) .unwrap_or([Self::EMPTY; SlotRange::SLOT_RANGE_COUNT]); - // This `remove_all` statement should remove at most `EndingPeriod` / `SampleLength` items, - // which should be bounded and sensibly configured in the runtime. + // This `remove_all` statement should remove at most `EndingPeriod` / + // `SampleLength` items, which should be bounded and sensibly configured in the + // runtime. #[allow(deprecated)] Winning::::remove_all(None); AuctionInfo::::kill(); @@ -574,8 +576,8 @@ impl Pallet { auction_lease_period_index: LeasePeriodOf, winning_ranges: WinningData, ) { - // First, unreserve all amounts that were reserved for the bids. We will later re-reserve the - // amounts from the bidders that ended up being assigned the slot so there's no need to + // First, unreserve all amounts that were reserved for the bids. We will later re-reserve + // the amounts from the bidders that ended up being assigned the slot so there's no need to // special-case them here. for ((bidder, _), amount) in ReservedAmounts::::drain() { CurrencyOf::::unreserve(&bidder, amount); @@ -596,12 +598,12 @@ impl Pallet { Err(LeaseError::ReserveFailed) | Err(LeaseError::AlreadyEnded) | Err(LeaseError::NoLeasePeriod) => { - // Should never happen since we just unreserved this amount (and our offset is from the - // present period). But if it does, there's not much we can do. + // Should never happen since we just unreserved this amount (and our offset is + // from the present period). But if it does, there's not much we can do. }, Err(LeaseError::AlreadyLeased) => { - // The leaser attempted to get a second lease on the same para ID, possibly griefing us. Let's - // keep the amount reserved and let governance sort it out. + // The leaser attempted to get a second lease on the same para ID, possibly + // griefing us. Let's keep the amount reserved and let governance sort it out. if CurrencyOf::::reserve(&leaser, amount).is_ok() { Self::deposit_event(Event::::ReserveConfiscated { para_id: para, @@ -1123,11 +1125,11 @@ mod tests { Auctions::auction_status(System::block_number()), AuctionStatus::::EndingPeriod(2, 0) ); - // This will prevent the auction's winner from being decided in the next block, since the random - // seed was known before the final bids were made. + // This will prevent the auction's winner from being decided in the next block, since + // the random seed was known before the final bids were made. set_last_random(H256::zero(), 8); - // Auction definitely ended now, but we don't know exactly when in the last 3 blocks yet since - // no randomness available yet. + // Auction definitely ended now, but we don't know exactly when in the last 3 blocks yet + // since no randomness available yet. run_to_block(9); // Auction has now ended... But auction winner still not yet decided, so no leases yet. assert_eq!( @@ -1136,8 +1138,8 @@ mod tests { ); assert_eq!(leases(), vec![]); - // Random seed now updated to a value known at block 9, when the auction ended. This means - // that the winner can now be chosen. + // Random seed now updated to a value known at block 9, when the auction ended. This + // means that the winner can now be chosen. set_last_random(H256::zero(), 9); run_to_block(10); // Auction ended and winner selected diff --git a/runtime/common/src/claims.rs b/runtime/common/src/claims.rs index 6a41a8f3f472..9cc06b2bede2 100644 --- a/runtime/common/src/claims.rs +++ b/runtime/common/src/claims.rs @@ -193,8 +193,8 @@ pub mod pallet { SignerHasNoClaim, /// Account ID sending transaction has no claim. SenderHasNoClaim, - /// There's not enough in the pot to pay out some unvested amount. Generally implies a logic - /// error. + /// There's not enough in the pot to pay out some unvested amount. Generally implies a + /// logic error. PotUnderflow, /// A needed statement was not included. InvalidStatement, @@ -288,8 +288,8 @@ pub mod pallet { /// /// Parameters: /// - `dest`: The destination account to payout the claim. - /// - `ethereum_signature`: The signature of an ethereum signed message - /// matching the format described above. + /// - `ethereum_signature`: The signature of an ethereum signed message matching the format + /// described above. /// /// /// The weight of this call is invariant over the input parameters. @@ -368,9 +368,10 @@ pub mod pallet { /// /// Parameters: /// - `dest`: The destination account to payout the claim. - /// - `ethereum_signature`: The signature of an ethereum signed message - /// matching the format described above. - /// - `statement`: The identity of the statement which is being attested to in the signature. + /// - `ethereum_signature`: The signature of an ethereum signed message matching the format + /// described above. + /// - `statement`: The identity of the statement which is being attested to in the + /// signature. /// /// /// The weight of this call is invariant over the input parameters. @@ -400,14 +401,16 @@ pub mod pallet { /// Attest to a statement, needed to finalize the claims process. /// - /// WARNING: Insecure unless your chain includes `PrevalidateAttests` as a `SignedExtension`. + /// WARNING: Insecure unless your chain includes `PrevalidateAttests` as a + /// `SignedExtension`. /// /// Unsigned Validation: /// A call to attest is deemed valid if the sender has a `Preclaim` registered /// and provides a `statement` which is expected for the account. /// /// Parameters: - /// - `statement`: The identity of the statement which is being attested to in the signature. + /// - `statement`: The identity of the statement which is being attested to in the + /// signature. /// /// /// The weight of this call is invariant over the input parameters. diff --git a/runtime/common/src/crowdloan/migration.rs b/runtime/common/src/crowdloan/migration.rs index 4a47f3283de3..03c4ab6c3119 100644 --- a/runtime/common/src/crowdloan/migration.rs +++ b/runtime/common/src/crowdloan/migration.rs @@ -134,8 +134,8 @@ pub mod crowdloan_index_migration { Ok(()) } - /// This migration converts crowdloans to use a crowdloan index rather than the parachain id as a - /// unique identifier. This makes it easier to swap two crowdloans between parachains. + /// This migration converts crowdloans to use a crowdloan index rather than the parachain id as + /// a unique identifier. This makes it easier to swap two crowdloans between parachains. pub fn migrate() -> frame_support::weights::Weight { let mut weight = Weight::zero(); diff --git a/runtime/common/src/crowdloan/mod.rs b/runtime/common/src/crowdloan/mod.rs index 18c86e68e5df..1db046c52701 100644 --- a/runtime/common/src/crowdloan/mod.rs +++ b/runtime/common/src/crowdloan/mod.rs @@ -45,9 +45,9 @@ //! slot auction enters its ending period, then parachains will each place a bid; the bid will be //! raised once per block if the parachain had additional funds contributed since the last bid. //! -//! Successful funds remain tracked (in the `Funds` storage item and the associated child trie) as long as -//! the parachain remains active. Users can withdraw their funds once the slot is completed and funds are -//! returned to the crowdloan account. +//! Successful funds remain tracked (in the `Funds` storage item and the associated child trie) as +//! long as the parachain remains active. Users can withdraw their funds once the slot is completed +//! and funds are returned to the crowdloan account. pub mod migration; @@ -164,11 +164,11 @@ pub struct FundInfo { /// If this is `Ending(n)`, this fund received a contribution during the current ending period, /// where `n` is how far into the ending period the contribution was made. pub last_contribution: LastContribution, - /// First lease period in range to bid on; it's actually a `LeasePeriod`, but that's the same type - /// as `BlockNumber`. + /// First lease period in range to bid on; it's actually a `LeasePeriod`, but that's the same + /// type as `BlockNumber`. pub first_period: LeasePeriod, - /// Last lease period in range to bid on; it's actually a `LeasePeriod`, but that's the same type - /// as `BlockNumber`. + /// Last lease period in range to bid on; it's actually a `LeasePeriod`, but that's the same + /// type as `BlockNumber`. pub last_period: LeasePeriod, /// Unique index used to represent this fund. pub fund_index: FundIndex, @@ -192,15 +192,16 @@ pub mod pallet { pub trait Config: frame_system::Config { type RuntimeEvent: From> + IsType<::RuntimeEvent>; - /// `PalletId` for the crowdloan pallet. An appropriate value could be `PalletId(*b"py/cfund")` + /// `PalletId` for the crowdloan pallet. An appropriate value could be + /// `PalletId(*b"py/cfund")` #[pallet::constant] type PalletId: Get; /// The amount to be held on deposit by the depositor of a crowdloan. type SubmissionDeposit: Get>; - /// The minimum amount that may be contributed into a crowdloan. Should almost certainly be at - /// least `ExistentialDeposit`. + /// The minimum amount that may be contributed into a crowdloan. Should almost certainly be + /// at least `ExistentialDeposit`. #[pallet::constant] type MinContribution: Get>; @@ -208,8 +209,8 @@ pub mod pallet { #[pallet::constant] type RemoveKeysLimit: Get; - /// The parachain registrar type. We just use this to ensure that only the manager of a para is able to - /// start a crowdloan for its slot. + /// The parachain registrar type. We just use this to ensure that only the manager of a para + /// is able to start a crowdloan for its slot. type Registrar: Registrar; /// The type representing the auctioning system. @@ -314,7 +315,8 @@ pub mod pallet { FundNotEnded, /// There are no contributions stored in this crowdloan. NoContributions, - /// The crowdloan is not ready to dissolve. Potentially still has a slot or in retirement period. + /// The crowdloan is not ready to dissolve. Potentially still has a slot or in retirement + /// period. NotReadyToDissolve, /// Invalid signature. InvalidSignature, @@ -342,8 +344,9 @@ pub mod pallet { for (fund, para_id) in new_raise.into_iter().filter_map(|i| Self::funds(i).map(|f| (f, i))) { - // Care needs to be taken by the crowdloan creator that this function will succeed given - // the crowdloaning configuration. We do some checks ahead of time in crowdloan `create`. + // Care needs to be taken by the crowdloan creator that this function will + // succeed given the crowdloaning configuration. We do some checks ahead of time + // in crowdloan `create`. let result = T::Auctioneer::place_bid( Self::fund_account_id(fund.fund_index), para_id, @@ -363,7 +366,8 @@ pub mod pallet { #[pallet::call] impl Pallet { - /// Create a new crowdloaning campaign for a parachain slot with the given lease period range. + /// Create a new crowdloaning campaign for a parachain slot with the given lease period + /// range. /// /// This applies a lock to your parachain configuration, ensuring that it cannot be changed /// by the parachain manager. @@ -462,16 +466,16 @@ pub mod pallet { /// /// Origin must be signed, but can come from anyone. /// - /// The fund must be either in, or ready for, retirement. For a fund to be *in* retirement, then the retirement - /// flag must be set. For a fund to be ready for retirement, then: + /// The fund must be either in, or ready for, retirement. For a fund to be *in* retirement, + /// then the retirement flag must be set. For a fund to be ready for retirement, then: /// - it must not already be in retirement; /// - the amount of raised funds must be bigger than the _free_ balance of the account; /// - and either: /// - the block number must be at least `end`; or /// - the current lease period must be greater than the fund's `last_period`. /// - /// In this case, the fund's retirement flag is set and its `end` is reset to the current block - /// number. + /// In this case, the fund's retirement flag is set and its `end` is reset to the current + /// block number. /// /// - `who`: The account whose contribution should be withdrawn. /// - `index`: The parachain to whose crowdloan the contribution was made. @@ -653,8 +657,9 @@ pub mod pallet { Ok(()) } - /// Contribute your entire balance to a crowd sale. This will transfer the entire balance of a user over to fund a parachain - /// slot. It will be withdrawable when the crowdloan has ended and the funds are unused. + /// Contribute your entire balance to a crowd sale. This will transfer the entire balance of + /// a user over to fund a parachain slot. It will be withdrawable when the crowdloan has + /// ended and the funds are unused. #[pallet::call_index(8)] #[pallet::weight(T::WeightInfo::contribute())] pub fn contribute_all( @@ -719,8 +724,8 @@ impl Pallet { } /// This function checks all conditions which would qualify a crowdloan has ended. - /// * If we have reached the `fund.end` block OR the first lease period the fund is - /// trying to bid for has started already. + /// * If we have reached the `fund.end` block OR the first lease period the fund is trying to + /// bid for has started already. /// * And, if the fund has enough free funds to refund full raised amount. fn ensure_crowdloan_ended( now: BlockNumberFor, @@ -775,8 +780,8 @@ impl Pallet { Error::::BidOrLeaseActive ); - // We disallow any crowdloan contributions during the VRF Period, so that people do not sneak their - // contributions into the auction when it would not impact the outcome. + // We disallow any crowdloan contributions during the VRF Period, so that people do not + // sneak their contributions into the auction when it would not impact the outcome. ensure!(!T::Auctioneer::auction_status(now).is_vrf(), Error::::VrfDelayInProgress); let (old_balance, memo) = Self::contribution_get(fund.fund_index, &who); @@ -1287,7 +1292,8 @@ mod tests { ); // Cannot create a crowdloan with nonsense end date - // This crowdloan would end in lease period 2, but is bidding for some slot that starts in lease period 1. + // This crowdloan would end in lease period 2, but is bidding for some slot that starts + // in lease period 1. assert_noop!( Crowdloan::create(RuntimeOrigin::signed(1), para, 1000, 1, 4, 41, None), Error::::EndTooFarInFuture @@ -1457,7 +1463,8 @@ mod tests { let para_2 = new_para(); let index = NextFundIndex::::get(); assert_ok!(Crowdloan::create(RuntimeOrigin::signed(1), para_2, 1000, 1, 4, 40, None)); - // Emulate a win by leasing out and putting a deposit. Slots pallet would normally do this. + // Emulate a win by leasing out and putting a deposit. Slots pallet would normally do + // this. let crowdloan_account = Crowdloan::fund_account_id(index); set_winner(para_2, crowdloan_account, true); assert_noop!( @@ -1465,8 +1472,8 @@ mod tests { Error::::BidOrLeaseActive ); - // Move past lease period 1, should not be allowed to have further contributions with a crowdloan - // that has starting period 1. + // Move past lease period 1, should not be allowed to have further contributions with a + // crowdloan that has starting period 1. let para_3 = new_para(); assert_ok!(Crowdloan::create(RuntimeOrigin::signed(1), para_3, 1000, 1, 4, 40, None)); run_to_block(40); diff --git a/runtime/common/src/integration_tests.rs b/runtime/common/src/integration_tests.rs index fa21fbf9ef69..34a49bc230b6 100644 --- a/runtime/common/src/integration_tests.rs +++ b/runtime/common/src/integration_tests.rs @@ -471,7 +471,8 @@ fn basic_end_to_end_works() { ); assert_eq!( slots::Leases::::get(ParaId::from(para_2)), - // -- 1 --- 2 --- 3 --- 4 --- 5 ---------------- 6 --------------------------- 7 ---------------- + // -- 1 --- 2 --- 3 --- 4 --- 5 ---------------- 6 --------------------------- 7 + // ---------------- vec![ None, None, @@ -599,7 +600,8 @@ fn basic_errors_fail() { #[test] fn competing_slots() { - // This test will verify that competing slots, from different sources will resolve appropriately. + // This test will verify that competing slots, from different sources will resolve + // appropriately. new_test_ext().execute_with(|| { assert!(System::block_number().is_one()); let max_bids = 10u32; @@ -789,7 +791,8 @@ fn competing_bids() { let crowdloan_1 = Crowdloan::fund_account_id(fund_1.fund_index); assert_eq!( slots::Leases::::get(ParaId::from(2000)), - // -- 1 --- 2 --- 3 --- 4 --- 5 ------------- 6 ------------------------ 7 ------------- + // -- 1 --- 2 --- 3 --- 4 --- 5 ------------- 6 ------------------------ 7 + // ------------- vec![ None, None, diff --git a/runtime/common/src/paras_registrar.rs b/runtime/common/src/paras_registrar.rs index 550f443a5a78..57d9e21bcf53 100644 --- a/runtime/common/src/paras_registrar.rs +++ b/runtime/common/src/paras_registrar.rs @@ -107,9 +107,9 @@ pub mod pallet { type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The aggregated origin type must support the `parachains` origin. We require that we can - /// infallibly convert between this origin and the system origin, but in reality, they're the - /// same type, we just can't express that to the Rust type system without writing a `where` - /// clause everywhere. + /// infallibly convert between this origin and the system origin, but in reality, they're + /// the same type, we just can't express that to the Rust type system without writing a + /// `where` clause everywhere. type RuntimeOrigin: From<::RuntimeOrigin> + Into::RuntimeOrigin>>; @@ -163,14 +163,15 @@ pub mod pallet { CannotDowngrade, /// Cannot schedule upgrade of parathread to parachain CannotUpgrade, - /// Para is locked from manipulation by the manager. Must use parachain or relay chain governance. + /// Para is locked from manipulation by the manager. Must use parachain or relay chain + /// governance. ParaLocked, /// The ID given for registration has not been reserved. NotReserved, /// Registering parachain with empty code is not allowed. EmptyCode, - /// Cannot perform a parachain slot / lifecycle swap. Check that the state of both paras are - /// correct for the swap to work. + /// Cannot perform a parachain slot / lifecycle swap. Check that the state of both paras + /// are correct for the swap to work. CannotSwap, } @@ -180,8 +181,8 @@ pub mod pallet { /// Amount held on deposit for each para and the original depositor. /// - /// The given account ID is responsible for registering the code and initial head data, but may only do - /// so if it isn't yet registered. (After that, it's up to governance to do so.) + /// The given account ID is responsible for registering the code and initial head data, but may + /// only do so if it isn't yet registered. (After that, it's up to governance to do so.) #[pallet::storage] pub type Paras = StorageMap<_, Twox64Concat, ParaId, ParaInfo>>; @@ -224,8 +225,8 @@ pub mod pallet { /// - `validation_code`: The initial validation code of the parachain/thread. /// /// ## Deposits/Fees - /// The origin signed account must reserve a corresponding deposit for the registration. Anything already - /// reserved previously for this para ID is accounted for. + /// The origin signed account must reserve a corresponding deposit for the registration. + /// Anything already reserved previously for this para ID is accounted for. /// /// ## Events /// The `Registered` event is emitted in case of success. @@ -264,7 +265,8 @@ pub mod pallet { /// Deregister a Para Id, freeing all data and returning any deposit. /// - /// The caller must be Root, the `para` owner, or the `para` itself. The para must be a parathread. + /// The caller must be Root, the `para` owner, or the `para` itself. The para must be a + /// parathread. #[pallet::call_index(2)] #[pallet::weight(::WeightInfo::deregister())] pub fn deregister(origin: OriginFor, id: ParaId) -> DispatchResult { @@ -345,17 +347,20 @@ pub mod pallet { /// Reserve a Para Id on the relay chain. /// /// This function will reserve a new Para Id to be owned/managed by the origin account. - /// The origin account is able to register head data and validation code using `register` to create - /// a parathread. Using the Slots pallet, a parathread can then be upgraded to get a parachain slot. + /// The origin account is able to register head data and validation code using `register` to + /// create a parathread. Using the Slots pallet, a parathread can then be upgraded to get a + /// parachain slot. /// /// ## Arguments - /// - `origin`: Must be called by a `Signed` origin. Becomes the manager/owner of the new para ID. + /// - `origin`: Must be called by a `Signed` origin. Becomes the manager/owner of the new + /// para ID. /// /// ## Deposits/Fees /// The origin must reserve a deposit of `ParaDeposit` for the registration. /// /// ## Events - /// The `Reserved` event is emitted in case of success, which provides the ID reserved for use. + /// The `Reserved` event is emitted in case of success, which provides the ID reserved for + /// use. #[pallet::call_index(5)] #[pallet::weight(::WeightInfo::reserve())] pub fn reserve(origin: OriginFor) -> DispatchResult { @@ -369,7 +374,8 @@ pub mod pallet { /// Add a manager lock from a para. This will prevent the manager of a /// para to deregister or swap a para. /// - /// Can be called by Root, the parachain, or the parachain manager if the parachain is unlocked. + /// Can be called by Root, the parachain, or the parachain manager if the parachain is + /// unlocked. #[pallet::call_index(6)] #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] pub fn add_lock(origin: OriginFor, para: ParaId) -> DispatchResult { @@ -380,7 +386,8 @@ pub mod pallet { /// Schedule a parachain upgrade. /// - /// Can be called by Root, the parachain, or the parachain manager if the parachain is unlocked. + /// Can be called by Root, the parachain, or the parachain manager if the parachain is + /// unlocked. #[pallet::call_index(7)] #[pallet::weight(::WeightInfo::schedule_code_upgrade(new_code.0.len() as u32))] pub fn schedule_code_upgrade( @@ -395,7 +402,8 @@ pub mod pallet { /// Set the parachain's current head. /// - /// Can be called by Root, the parachain, or the parachain manager if the parachain is unlocked. + /// Can be called by Root, the parachain, or the parachain manager if the parachain is + /// unlocked. #[pallet::call_index(8)] #[pallet::weight(::WeightInfo::set_current_head(new_head.0.len() as u32))] pub fn set_current_head( diff --git a/runtime/common/src/paras_sudo_wrapper.rs b/runtime/common/src/paras_sudo_wrapper.rs index 8944e932e9ef..d18eb8650aaf 100644 --- a/runtime/common/src/paras_sudo_wrapper.rs +++ b/runtime/common/src/paras_sudo_wrapper.rs @@ -45,8 +45,8 @@ pub mod pallet { ParaDoesntExist, /// The specified parachain or parathread is already registered. ParaAlreadyExists, - /// A DMP message couldn't be sent because it exceeds the maximum size allowed for a downward - /// message. + /// A DMP message couldn't be sent because it exceeds the maximum size allowed for a + /// downward message. ExceedsMaxMessageSize, /// Could not schedule para cleanup. CouldntCleanup, @@ -127,8 +127,8 @@ pub mod pallet { /// Send a downward XCM to the given para. /// - /// The given parachain should exist and the payload should not exceed the preconfigured size - /// `config.max_downward_message_size`. + /// The given parachain should exist and the payload should not exceed the preconfigured + /// size `config.max_downward_message_size`. #[pallet::call_index(4)] #[pallet::weight((1_000, DispatchClass::Operational))] pub fn sudo_queue_downward_xcm( diff --git a/runtime/common/src/purchase.rs b/runtime/common/src/purchase.rs index 246511a5d3d8..72795a733ea9 100644 --- a/runtime/common/src/purchase.rs +++ b/runtime/common/src/purchase.rs @@ -82,7 +82,8 @@ pub struct AccountStatus { locked_balance: Balance, /// Their sr25519/ed25519 signature verifying they have signed our required statement. signature: Vec, - /// The percentage of VAT the purchaser is responsible for. This is already factored into account balance. + /// The percentage of VAT the purchaser is responsible for. This is already factored into + /// account balance. vat: Permill, } @@ -333,12 +334,14 @@ pub mod pallet { if !status.locked_balance.is_zero() { let unlock_block = UnlockBlock::::get(); - // We allow some configurable portion of the purchased locked DOTs to be unlocked for basic usage. + // We allow some configurable portion of the purchased locked DOTs to be + // unlocked for basic usage. let unlocked = (T::UnlockedProportion::get() * status.locked_balance) .min(T::MaxUnlocked::get()); let locked = status.locked_balance.saturating_sub(unlocked); - // We checked that this account has no existing vesting schedule. So this function should - // never fail, however if it does, not much we can do about it at this point. + // We checked that this account has no existing vesting schedule. So this + // function should never fail, however if it does, not much we can do about + // it at this point. let _ = T::VestingSchedule::add_vesting_schedule( // Apply vesting schedule to this user &who, @@ -351,7 +354,8 @@ pub mod pallet { ); } - // Setting the user account to `Completed` ends the purchase process for this user. + // Setting the user account to `Completed` ends the purchase process for this + // user. status.validity = AccountValidity::Completed; Self::deposit_event(Event::::PaymentComplete { who: who.clone(), @@ -645,17 +649,20 @@ mod tests { } fn alice_signature() -> [u8; 64] { - // echo -n "Hello, World" | subkey -s sign "bottom drive obey lake curtain smoke basket hold race lonely fit walk//Alice" + // echo -n "Hello, World" | subkey -s sign "bottom drive obey lake curtain smoke basket hold + // race lonely fit walk//Alice" hex_literal::hex!("20e0faffdf4dfe939f2faa560f73b1d01cde8472e2b690b7b40606a374244c3a2e9eb9c8107c10b605138374003af8819bd4387d7c24a66ee9253c2e688ab881") } fn bob_signature() -> [u8; 64] { - // echo -n "Hello, World" | subkey -s sign "bottom drive obey lake curtain smoke basket hold race lonely fit walk//Bob" + // echo -n "Hello, World" | subkey -s sign "bottom drive obey lake curtain smoke basket hold + // race lonely fit walk//Bob" hex_literal::hex!("d6d460187ecf530f3ec2d6e3ac91b9d083c8fbd8f1112d92a82e4d84df552d18d338e6da8944eba6e84afaacf8a9850f54e7b53a84530d649be2e0119c7ce889") } fn alice_signature_ed25519() -> [u8; 64] { - // echo -n "Hello, World" | subkey -e sign "bottom drive obey lake curtain smoke basket hold race lonely fit walk//Alice" + // echo -n "Hello, World" | subkey -e sign "bottom drive obey lake curtain smoke basket hold + // race lonely fit walk//Alice" hex_literal::hex!("ee3f5a6cbfc12a8f00c18b811dc921b550ddf272354cda4b9a57b1d06213fcd8509f5af18425d39a279d13622f14806c3e978e2163981f2ec1c06e9628460b0e") } diff --git a/runtime/common/src/slots/mod.rs b/runtime/common/src/slots/mod.rs index 0be75fcba2b1..b4e136b1211c 100644 --- a/runtime/common/src/slots/mod.rs +++ b/runtime/common/src/slots/mod.rs @@ -14,12 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Parathread and parachains leasing system. Allows para IDs to be claimed, the code and data to be initialized and -//! parachain slots (i.e. continuous scheduling) to be leased. Also allows for parachains and parathreads to be -//! swapped. +//! Parathread and parachains leasing system. Allows para IDs to be claimed, the code and data to be +//! initialized and parachain slots (i.e. continuous scheduling) to be leased. Also allows for +//! parachains and parathreads to be swapped. //! -//! This doesn't handle the mechanics of determining which para ID actually ends up with a parachain lease. This -//! must handled by a separately, through the trait interface that this pallet provides or the root dispatchables. +//! This doesn't handle the mechanics of determining which para ID actually ends up with a parachain +//! lease. This must handled by a separately, through the trait interface that this pallet provides +//! or the root dispatchables. pub mod migration; @@ -98,8 +99,8 @@ pub mod pallet { /// Amounts held on deposit for each (possibly future) leased parachain. /// - /// The actual amount locked on its behalf by any account at any time is the maximum of the second values - /// of the items in this list whose first value is the account. + /// The actual amount locked on its behalf by any account at any time is the maximum of the + /// second values of the items in this list whose first value is the account. /// /// The first item in the list is the amount locked for the current Lease Period. Following /// items are for the subsequent lease periods. @@ -160,8 +161,8 @@ pub mod pallet { #[pallet::call] impl Pallet { - /// Just a connect into the `lease_out` call, in case Root wants to force some lease to happen - /// independently of any other on-chain mechanism to use it. + /// Just a connect into the `lease_out` call, in case Root wants to force some lease to + /// happen independently of any other on-chain mechanism to use it. /// /// The dispatch origin for this call must match `T::ForceOrigin`. #[pallet::call_index(0)] @@ -268,8 +269,8 @@ impl Pallet { // deposit for the parachain. let now_held = Self::deposit_held(para, &ended_lease.0); - // If this is less than what we were holding for this leaser's now-ended lease, then - // unreserve it. + // If this is less than what we were holding for this leaser's now-ended lease, + // then unreserve it. if let Some(rebate) = ended_lease.1.checked_sub(&now_held) { T::Currency::unreserve(&ended_lease.0, rebate); } @@ -392,8 +393,8 @@ impl Leaser> for Pallet { } } - // Figure out whether we already have some funds of `leaser` held in reserve for `para_id`. - // If so, then we can deduct those from the amount that we need to reserve. + // Figure out whether we already have some funds of `leaser` held in reserve for + // `para_id`. If so, then we can deduct those from the amount that we need to reserve. let maybe_additional = amount.checked_sub(&Self::deposit_held(para, &leaser)); if let Some(ref additional) = maybe_additional { T::Currency::reserve(&leaser, *additional) @@ -403,7 +404,8 @@ impl Leaser> for Pallet { let reserved = maybe_additional.unwrap_or_default(); // Check if current lease period is same as period begin, and onboard them directly. - // This will allow us to support onboarding new parachains in the middle of a lease period. + // This will allow us to support onboarding new parachains in the middle of a lease + // period. if current_lease_period == period_begin { // Best effort. Not much we can do if this fails. let _ = T::Registrar::make_parachain(para); @@ -481,7 +483,8 @@ impl Leaser> for Pallet { None => return true, }; - // Get the leases, and check each item in the vec which is part of the range we are checking. + // Get the leases, and check each item in the vec which is part of the range we are + // checking. let leases = Leases::::get(para_id); for slot in offset..=offset + period_count { if let Some(Some(_)) = leases.get(slot) { diff --git a/runtime/common/src/traits.rs b/runtime/common/src/traits.rs index f24a5b977968..940c3dfa2fb3 100644 --- a/runtime/common/src/traits.rs +++ b/runtime/common/src/traits.rs @@ -113,11 +113,12 @@ pub trait Leaser { /// /// `leaser` shall have a total of `amount` balance reserved by the implementer of this trait. /// - /// Note: The implementer of the trait (the leasing system) is expected to do all reserve/unreserve calls. The - /// caller of this trait *SHOULD NOT* pre-reserve the deposit (though should ensure that it is reservable). + /// Note: The implementer of the trait (the leasing system) is expected to do all + /// reserve/unreserve calls. The caller of this trait *SHOULD NOT* pre-reserve the deposit + /// (though should ensure that it is reservable). /// - /// The lease will last from `period_begin` for `period_count` lease periods. It is undefined if the `para` - /// already has a slot leased during those periods. + /// The lease will last from `period_begin` for `period_count` lease periods. It is undefined if + /// the `para` already has a slot leased during those periods. /// /// Returns `Err` in the case of an error, and in which case nothing is changed. fn lease_out( @@ -128,8 +129,8 @@ pub trait Leaser { period_count: Self::LeasePeriod, ) -> Result<(), LeaseError>; - /// Return the amount of balance currently held in reserve on `leaser`'s account for leasing `para`. This won't - /// go down outside a lease period. + /// Return the amount of balance currently held in reserve on `leaser`'s account for leasing + /// `para`. This won't go down outside a lease period. fn deposit_held( para: ParaId, leaser: &Self::AccountId, @@ -147,7 +148,8 @@ pub trait Leaser { fn lease_period_index(block: BlockNumber) -> Option<(Self::LeasePeriod, bool)>; /// Returns true if the parachain already has a lease in any of lease periods in the inclusive - /// range `[first_period, last_period]`, intersected with the unbounded range [`current_lease_period`..] . + /// range `[first_period, last_period]`, intersected with the unbounded range + /// [`current_lease_period`..] . fn already_leased( para_id: ParaId, first_period: Self::LeasePeriod, @@ -169,7 +171,8 @@ pub enum AuctionStatus { /// will be `EndingPeriod(1, 5)`. EndingPeriod(BlockNumber, BlockNumber), /// We have completed the bidding process and are waiting for the VRF to return some acceptable - /// randomness to select the winner. The number represents how many blocks we have been waiting. + /// randomness to select the winner. The number represents how many blocks we have been + /// waiting. VrfDelay(BlockNumber), } @@ -224,9 +227,9 @@ pub trait Auctioneer { /// - `last_slot`: The last lease period index of the range to be bid on (inclusive). /// - `amount`: The total amount to be the bid for deposit over the range. /// - /// The account `Bidder` must have at least `amount` available as a free balance in `Currency`. The - /// implementation *MUST* remove or reserve `amount` funds from `bidder` and those funds should be returned - /// or freed once the bid is rejected or lease has ended. + /// The account `Bidder` must have at least `amount` available as a free balance in `Currency`. + /// The implementation *MUST* remove or reserve `amount` funds from `bidder` and those funds + /// should be returned or freed once the bid is rejected or lease has ended. fn place_bid( bidder: Self::AccountId, para: ParaId, diff --git a/runtime/kusama/src/xcm_config.rs b/runtime/kusama/src/xcm_config.rs index 59e32f2ca544..5725f54eddd5 100644 --- a/runtime/kusama/src/xcm_config.rs +++ b/runtime/kusama/src/xcm_config.rs @@ -63,8 +63,8 @@ parameter_types! { pub LocalCheckAccount: (AccountId, MintLocation) = (CheckAccount::get(), MintLocation::Local); } -/// The canonical means of converting a `MultiLocation` into an `AccountId`, used when we want to determine -/// the sovereign account controlled by a location. +/// The canonical means of converting a `MultiLocation` into an `AccountId`, used when we want to +/// determine the sovereign account controlled by a location. pub type SovereignAccountOf = ( // We can convert a child parachain using the standard `AccountId` conversion. ChildParachainConvertsVia, @@ -72,8 +72,8 @@ pub type SovereignAccountOf = ( AccountId32Aliases, ); -/// Our asset transactor. This is what allows us to interest with the runtime facilities from the point of -/// view of XCM-only concepts like `MultiLocation` and `MultiAsset`. +/// Our asset transactor. This is what allows us to interest with the runtime facilities from the +/// point of view of XCM-only concepts like `MultiLocation` and `MultiAsset`. /// /// Ours is only aware of the Balances pallet, which is mapped to `TokenLocation`. pub type LocalAssetTransactor = XcmCurrencyAdapter< @@ -360,8 +360,8 @@ parameter_types! { pub ReachableDest: Option = Some(Parachain(1000).into()); } -/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior location -/// of this chain. +/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior +/// location of this chain. pub type LocalOriginToLocation = ( // And a usual Signed origin to be used in XCM as a corresponding AccountId32 SignedToAccountId32, @@ -374,8 +374,8 @@ pub type StakingAdminToPlurality = /// Type to convert the Fellows origin to a Plurality `MultiLocation` value. pub type FellowsToPlurality = OriginToPluralityVoice; -/// Type to convert a pallet `Origin` type value into a `MultiLocation` value which represents an interior location -/// of this chain for a destination chain. +/// Type to convert a pallet `Origin` type value into a `MultiLocation` value which represents an +/// interior location of this chain for a destination chain. pub type LocalPalletOriginToLocation = ( // StakingAdmin origin to be used in XCM as a corresponding Plurality `MultiLocation` value. StakingAdminToPlurality, @@ -386,16 +386,17 @@ pub type LocalPalletOriginToLocation = ( impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We only allow the root, the council, fellows and the staking admin to send messages. - // This is basically safe to enable for everyone (safe the possibility of someone spamming the parachain - // if they're willing to pay the KSM to send from the Relay-chain), but it's useless until we bring in XCM v3 - // which will make `DescendOrigin` a bit more useful. + // This is basically safe to enable for everyone (safe the possibility of someone spamming the + // parachain if they're willing to pay the KSM to send from the Relay-chain), but it's useless + // until we bring in XCM v3 which will make `DescendOrigin` a bit more useful. type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; type XcmRouter = XcmRouter; // Anyone can execute XCM messages locally. type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; type XcmExecuteFilter = Everything; type XcmExecutor = xcm_executor::XcmExecutor; - // Anyone is able to use teleportation regardless of who they are and what they want to teleport. + // Anyone is able to use teleportation regardless of who they are and what they want to + // teleport. type XcmTeleportFilter = Everything; // Anyone is able to use reserve transfers regardless of who they are and what they want to // transfer. @@ -450,7 +451,8 @@ fn karura_liquid_staking_xcm_has_sane_weight_upper_limt() { else { panic!("no Transact instruction found") }; - // should be pallet_utility.as_derivative { index: 0, call: pallet_staking::bond_extra { max_additional: 2490000000000 } } + // should be pallet_utility.as_derivative { index: 0, call: pallet_staking::bond_extra { + // max_additional: 2490000000000 } } let message_call = call.take_decoded().expect("can't decode Transact call"); let call_weight = message_call.get_dispatch_info().weight; // Ensure that the Transact instruction is giving a sensible `require_weight_at_most` value diff --git a/runtime/parachains/src/builder.rs b/runtime/parachains/src/builder.rs index e46c9f59b957..892e934e6dfc 100644 --- a/runtime/parachains/src/builder.rs +++ b/runtime/parachains/src/builder.rs @@ -174,7 +174,8 @@ impl BenchBuilder { configuration::Pallet::::config().max_validators.unwrap_or(200) } - /// Maximum number of validators participating in parachains consensus (a.k.a. active validators). + /// Maximum number of validators participating in parachains consensus (a.k.a. active + /// validators). fn max_validators(&self) -> u32 { self.max_validators.unwrap_or(Self::fallback_max_validators()) } @@ -186,8 +187,8 @@ impl BenchBuilder { self } - /// Maximum number of validators per core (a.k.a. max validators per group). This value is used if none is - /// explicitly set on the builder. + /// Maximum number of validators per core (a.k.a. max validators per group). This value is used + /// if none is explicitly set on the builder. pub(crate) fn fallback_max_validators_per_core() -> u32 { configuration::Pallet::::config().max_validators_per_core.unwrap_or(5) } @@ -479,7 +480,8 @@ impl BenchBuilder { /// Create backed candidates for `cores_with_backed_candidates`. You need these cores to be /// scheduled _within_ paras inherent, which requires marking the available bitfields as fully /// available. - /// - `cores_with_backed_candidates` Mapping of `para_id`/`core_idx`/`group_idx` seed to number of + /// - `cores_with_backed_candidates` Mapping of `para_id`/`core_idx`/`group_idx` seed to number + /// of /// validity votes. fn create_backed_candidates( &self, @@ -687,9 +689,9 @@ impl BenchBuilder { ); assert_eq!(inclusion::PendingAvailability::::iter().count(), used_cores as usize,); - // Mark all the used cores as occupied. We expect that their are `backed_and_concluding_cores` - // that are pending availability and that there are `used_cores - backed_and_concluding_cores ` - // which are about to be disputed. + // Mark all the used cores as occupied. We expect that their are + // `backed_and_concluding_cores` that are pending availability and that there are + // `used_cores - backed_and_concluding_cores ` which are about to be disputed. scheduler::AvailabilityCores::::set(vec![ Some(CoreOccupied::Parachain); used_cores as usize diff --git a/runtime/parachains/src/configuration.rs b/runtime/parachains/src/configuration.rs index 38a24211fb67..d4ad8619f16e 100644 --- a/runtime/parachains/src/configuration.rs +++ b/runtime/parachains/src/configuration.rs @@ -54,12 +54,12 @@ const LOG_TARGET: &str = "runtime::configuration"; serde::Deserialize, )] pub struct HostConfiguration { - // NOTE: This structure is used by parachains via merkle proofs. Therefore, this struct requires - // special treatment. + // NOTE: This structure is used by parachains via merkle proofs. Therefore, this struct + // requires special treatment. // - // A parachain requested this struct can only depend on the subset of this struct. Specifically, - // only a first few fields can be depended upon. These fields cannot be changed without - // corresponding migration of the parachains. + // A parachain requested this struct can only depend on the subset of this struct. + // Specifically, only a first few fields can be depended upon. These fields cannot be changed + // without corresponding migration of the parachains. /** * The parameters that are required for the parachains. */ @@ -88,9 +88,9 @@ pub struct HostConfiguration { pub hrmp_max_message_num_per_candidate: u32, /// The minimum period, in blocks, between which parachains can update their validation code. /// - /// This number is used to prevent parachains from spamming the relay chain with validation code - /// upgrades. The only thing it controls is the number of blocks the `UpgradeRestrictionSignal` - /// is set for the parachain in question. + /// This number is used to prevent parachains from spamming the relay chain with validation + /// code upgrades. The only thing it controls is the number of blocks the + /// `UpgradeRestrictionSignal` is set for the parachain in question. /// /// If PVF pre-checking is enabled this should be greater than the maximum number of blocks /// PVF pre-checking can take. Intuitively, this number should be greater than the duration @@ -113,14 +113,15 @@ pub struct HostConfiguration { /// been completed. /// /// Note, there are situations in which `expected_at` in the past. For example, if - /// [`chain_availability_period`] or [`thread_availability_period`] is less than the delay set by - /// this field or if PVF pre-check took more time than the delay. In such cases, the upgrade is - /// further at the earliest possible time determined by [`minimum_validation_upgrade_delay`]. + /// [`chain_availability_period`] or [`thread_availability_period`] is less than the delay set + /// by this field or if PVF pre-check took more time than the delay. In such cases, the upgrade + /// is further at the earliest possible time determined by + /// [`minimum_validation_upgrade_delay`]. /// /// The rationale for this delay has to do with relay-chain reversions. In case there is an - /// invalid candidate produced with the new version of the code, then the relay-chain can revert - /// [`validation_upgrade_delay`] many blocks back and still find the new code in the storage by - /// hash. + /// invalid candidate produced with the new version of the code, then the relay-chain can + /// revert [`validation_upgrade_delay`] many blocks back and still find the new code in the + /// storage by hash. /// /// [#4601]: https://github.com/paritytech/polkadot/issues/4601 pub validation_upgrade_delay: BlockNumber, @@ -179,13 +180,13 @@ pub struct HostConfiguration { /// Must be non-zero. pub group_rotation_frequency: BlockNumber, /// The availability period, in blocks, for parachains. This is the amount of blocks - /// after inclusion that validators have to make the block available and signal its availability to - /// the chain. + /// after inclusion that validators have to make the block available and signal its + /// availability to the chain. /// /// Must be at least 1. pub chain_availability_period: BlockNumber, - /// The availability period, in blocks, for parathreads. Same as the `chain_availability_period`, - /// but a differing timeout due to differing requirements. + /// The availability period, in blocks, for parathreads. Same as the + /// `chain_availability_period`, but a differing timeout due to differing requirements. /// /// Must be at least 1. pub thread_availability_period: BlockNumber, @@ -217,8 +218,8 @@ pub struct HostConfiguration { pub needed_approvals: u32, /// The number of samples to do of the `RelayVRFModulo` approval assignment criterion. pub relay_vrf_modulo_samples: u32, - /// If an active PVF pre-checking vote observes this many number of sessions it gets automatically - /// rejected. + /// If an active PVF pre-checking vote observes this many number of sessions it gets + /// automatically rejected. /// /// 0 means PVF pre-checking will be rejected on the first observed session unless the voting /// gained supermajority before that the session change. @@ -849,7 +850,8 @@ pub mod pallet { }) } - /// Sets the maximum total size of items that can present in a upward dispatch queue at once. + /// Sets the maximum total size of items that can present in a upward dispatch queue at + /// once. #[pallet::call_index(24)] #[pallet::weight(( T::WeightInfo::set_config_with_u32(), @@ -1257,8 +1259,8 @@ impl Pallet { // 3. pending_configs = [(cur+1, X)] // There is a pending configuration scheduled and it will be applied in the next session. // - // We will use X as the base configuration. We need to schedule a new configuration change - // for the `scheduled_session` and use X as the base for the new configuration. + // We will use X as the base configuration. We need to schedule a new configuration + // change for the `scheduled_session` and use X as the base for the new configuration. // // 4. pending_configs = [(cur+1, X), (cur+2, Y)] // There is a pending configuration change in the next session and for the scheduled diff --git a/runtime/parachains/src/configuration/migration/v7.rs b/runtime/parachains/src/configuration/migration/v7.rs index cdff80a31a3a..78a7cf9e4dc0 100644 --- a/runtime/parachains/src/configuration/migration/v7.rs +++ b/runtime/parachains/src/configuration/migration/v7.rs @@ -182,10 +182,12 @@ mod tests { // Steps: // 1. Go to Polkadot.js -> Developer -> Chain state -> Storage: https://polkadot.js.org/apps/#/chainstate // 2. Set these parameters: - // 2.1. selected state query: configuration; activeConfig(): PolkadotRuntimeParachainsConfigurationHostConfiguration - // 2.2. blockhash to query at: 0xf89d3ab5312c5f70d396dc59612f0aa65806c798346f9db4b35278baed2e0e53 (the hash of the block) - // 2.3. Note the value of encoded storage key -> 0x06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385 for the referenced block. - // 2.4. You'll also need the decoded values to update the test. + // 2.1. selected state query: configuration; activeConfig(): + // PolkadotRuntimeParachainsConfigurationHostConfiguration 2.2. blockhash to query at: + // 0xf89d3ab5312c5f70d396dc59612f0aa65806c798346f9db4b35278baed2e0e53 (the hash of the + // block) 2.3. Note the value of encoded storage key -> + // 0x06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385 for the referenced + // block. 2.4. You'll also need the decoded values to update the test. // 3. Go to Polkadot.js -> Developer -> Chain state -> Raw storage // 3.1 Enter the encoded storage key and you get the raw config. @@ -196,8 +198,8 @@ mod tests { let v6 = V6HostConfiguration::::decode(&mut &raw_config[..]).unwrap(); - // We check only a sample of the values here. If we missed any fields or messed up data types - // that would skew all the fields coming after. + // We check only a sample of the values here. If we missed any fields or messed up data + // types that would skew all the fields coming after. assert_eq!(v6.max_code_size, 3_145_728); assert_eq!(v6.validation_upgrade_cooldown, 200); assert_eq!(v6.max_pov_size, 5_242_880); @@ -209,8 +211,8 @@ mod tests { #[test] fn test_migrate_to_v7() { - // Host configuration has lots of fields. However, in this migration we only remove one field. - // The most important part to check are a couple of the last fields. We also pick + // Host configuration has lots of fields. However, in this migration we only remove one + // field. The most important part to check are a couple of the last fields. We also pick // extra fields to check arbitrarily, e.g. depending on their position (i.e. the middle) and // also their type. // @@ -291,7 +293,8 @@ mod tests { }); } - // Test that migration doesn't panic in case there're no pending configurations upgrades in pallet's storage. + // Test that migration doesn't panic in case there're no pending configurations upgrades in + // pallet's storage. #[test] fn test_migrate_to_v7_no_pending() { let v6 = V6HostConfiguration::::default(); diff --git a/runtime/parachains/src/disputes.rs b/runtime/parachains/src/disputes.rs index 7b03cde8ed28..cf2e99e7359a 100644 --- a/runtime/parachains/src/disputes.rs +++ b/runtime/parachains/src/disputes.rs @@ -887,8 +887,8 @@ impl Pallet { #[allow(deprecated)] >::remove_prefix(to_prune, None); - // This is larger, and will be extracted to the `shared` pallet for more proper pruning. - // TODO: https://github.com/paritytech/polkadot/issues/3469 + // This is larger, and will be extracted to the `shared` pallet for more proper + // pruning. TODO: https://github.com/paritytech/polkadot/issues/3469 #[allow(deprecated)] >::remove_prefix(to_prune, None); } @@ -1178,7 +1178,8 @@ impl Pallet { >::insert(&session, &candidate_hash, &summary.state); - // Freeze if the INVALID votes against some local candidate are above the byzantine threshold + // Freeze if the INVALID votes against some local candidate are above the byzantine + // threshold if summary.new_flags.contains(DisputeStateFlags::AGAINST_BYZANTINE) { if let Some(revert_to) = >::get(&session, &candidate_hash) { Self::revert_and_freeze(revert_to); diff --git a/runtime/parachains/src/disputes/migration.rs b/runtime/parachains/src/disputes/migration.rs index af216fa0408e..ccd367e41b36 100644 --- a/runtime/parachains/src/disputes/migration.rs +++ b/runtime/parachains/src/disputes/migration.rs @@ -79,14 +79,16 @@ pub mod v1 { } } - /// Migrates the pallet storage to the most recent version, checking and setting the `StorageVersion`. + /// Migrates the pallet storage to the most recent version, checking and setting the + /// `StorageVersion`. pub fn migrate_to_v1() -> Weight { let mut weight: Weight = Weight::zero(); // SpamSlots should not contain too many keys so removing everything at once should be safe let res = SpamSlots::::clear(u32::MAX, None); // `loops` is the number of iterations => used to calculate read weights - // `backend` is the number of keys removed from the backend => used to calculate write weights + // `backend` is the number of keys removed from the backend => used to calculate write + // weights weight = weight .saturating_add(T::DbWeight::get().reads_writes(res.loops as u64, res.backend as u64)); diff --git a/runtime/parachains/src/disputes/tests.rs b/runtime/parachains/src/disputes/tests.rs index 93dcd58264b2..acdba343274c 100644 --- a/runtime/parachains/src/disputes/tests.rs +++ b/runtime/parachains/src/disputes/tests.rs @@ -871,7 +871,8 @@ mod unconfirmed_disputes { use assert_matches::assert_matches; use sp_runtime::ModuleError; - // Shared initialization code between `test_unconfirmed_are_ignored` and `test_unconfirmed_disputes_cause_block_import_error` + // Shared initialization code between `test_unconfirmed_are_ignored` and + // `test_unconfirmed_disputes_cause_block_import_error` fn generate_dispute_statement_set_and_run_to_block() -> DisputeStatementSet { // 7 validators needed for byzantine threshold of 2. let v0 = ::Pair::generate().0; @@ -2060,7 +2061,8 @@ fn deduplication_and_sorting_works() { ) .unwrap_err(); - // assert ordering of local only disputes, and at the same time, and being free of duplicates + // assert ordering of local only disputes, and at the same time, and being free of + // duplicates assert_eq!(disputes_orig.len(), disputes.len() + 1); let are_these_equal = |a: &DisputeStatementSet, b: &DisputeStatementSet| { diff --git a/runtime/parachains/src/hrmp.rs b/runtime/parachains/src/hrmp.rs index c876749e853d..1be2fe57b1df 100644 --- a/runtime/parachains/src/hrmp.rs +++ b/runtime/parachains/src/hrmp.rs @@ -117,12 +117,12 @@ pub struct HrmpOpenChannelRequest { #[derive(Encode, Decode, TypeInfo)] #[cfg_attr(test, derive(Debug))] pub struct HrmpChannel { - // NOTE: This structure is used by parachains via merkle proofs. Therefore, this struct requires - // special treatment. + // NOTE: This structure is used by parachains via merkle proofs. Therefore, this struct + // requires special treatment. // - // A parachain requested this struct can only depend on the subset of this struct. Specifically, - // only a first few fields can be depended upon (See `AbridgedHrmpChannel`). These fields cannot - // be changed without corresponding migration of parachains. + // A parachain requested this struct can only depend on the subset of this struct. + // Specifically, only a first few fields can be depended upon (See `AbridgedHrmpChannel`). + // These fields cannot be changed without corresponding migration of parachains. /// The maximum number of messages that can be pending in the channel at once. pub max_capacity: u32, /// The maximum total size of the messages that can be pending in the channel at once. @@ -370,7 +370,8 @@ pub mod pallet { /// The HRMP watermark associated with each para. /// Invariant: - /// - each para `P` used here as a key should satisfy `Paras::is_valid_para(P)` within a session. + /// - each para `P` used here as a key should satisfy `Paras::is_valid_para(P)` within a + /// session. #[pallet::storage] pub type HrmpWatermarks = StorageMap<_, Twox64Concat, ParaId, BlockNumberFor>; @@ -968,9 +969,9 @@ impl Pallet { out_hrmp_msgs.iter().enumerate().map(|(idx, out_msg)| (idx as u32, out_msg)) { match last_recipient { - // the messages must be sorted in ascending order and there must be no two messages sent - // to the same recipient. Thus we can check that every recipient is strictly greater than - // the previous one. + // the messages must be sorted in ascending order and there must be no two messages + // sent to the same recipient. Thus we can check that every recipient is strictly + // greater than the previous one. Some(last_recipient) if out_msg.recipient <= last_recipient => return Err(OutboundHrmpAcceptanceErr::NotSorted { idx }), _ => last_recipient = Some(out_msg.recipient), diff --git a/runtime/parachains/src/inclusion/mod.rs b/runtime/parachains/src/inclusion/mod.rs index c71657d1ac43..f4ef3b95065e 100644 --- a/runtime/parachains/src/inclusion/mod.rs +++ b/runtime/parachains/src/inclusion/mod.rs @@ -17,8 +17,8 @@ //! The inclusion pallet is responsible for inclusion and availability of scheduled parachains //! and parathreads. //! -//! It is responsible for carrying candidates from being backable to being backed, and then from backed -//! to included. +//! It is responsible for carrying candidates from being backable to being backed, and then from +//! backed to included. use crate::{ configuration::{self, HostConfiguration}, @@ -76,8 +76,8 @@ impl WeightInfo for () { /// Maximum value that `config.max_upward_message_size` can be set to. /// -/// This is used for benchmarking sanely bounding relevant storage items. It is expected from the `configuration` -/// pallet to check these values before setting. +/// This is used for benchmarking sanely bounding relevant storage items. It is expected from the +/// `configuration` pallet to check these values before setting. pub const MAX_UPWARD_MESSAGE_SIZE_BOUND: u32 = 128 * 1024; /// A bitfield signed by a validator indicating that it is keeping its piece of the erasure-coding @@ -354,8 +354,8 @@ pub mod pallet { InvalidOutboundHrmp, /// The validation code hash of the candidate is not valid. InvalidValidationCodeHash, - /// The `para_head` hash in the candidate descriptor doesn't match the hash of the actual para head in the - /// commitments. + /// The `para_head` hash in the candidate descriptor doesn't match the hash of the actual + /// para head in the commitments. ParaHeadMismatch, /// A bitfield that references a freed core, /// either intentionally or as part of a concluded @@ -492,8 +492,8 @@ impl Pallet { /// /// Updates storage items `PendingAvailability` and `AvailabilityBitfields`. /// - /// Returns a `Vec` of `CandidateHash`es and their respective `AvailabilityCore`s that became available, - /// and cores free. + /// Returns a `Vec` of `CandidateHash`es and their respective `AvailabilityCore`s that became + /// available, and cores free. pub(crate) fn update_pending_availability_and_get_freed_cores( expected_bits: usize, validators: &[ValidatorId], @@ -530,8 +530,8 @@ impl Pallet { continue }; - // defensive check - this is constructed by loading the availability bitfield record, - // which is always `Some` if the core is occupied - that's why we're here. + // defensive check - this is constructed by loading the availability bitfield + // record, which is always `Some` if the core is occupied - that's why we're here. let validator_index = validator_index.0 as usize; if let Some(mut bit) = pending_availability.as_mut().and_then(|candidate_pending_availability| { @@ -591,8 +591,8 @@ impl Pallet { freed_cores } - /// Process candidates that have been backed. Provide the relay storage root, a set of candidates - /// and scheduled cores. + /// Process candidates that have been backed. Provide the relay storage root, a set of + /// candidates and scheduled cores. /// /// Both should be sorted ascending by core index, and the candidates should be a subset of /// scheduled cores. If these conditions are not met, the execution of the function fails. @@ -968,7 +968,8 @@ impl Pallet { }) } // make sure that the queue is not overfilled. - // we do it here only once since returning false invalidates the whole relay-chain block. + // we do it here only once since returning false invalidates the whole relay-chain + // block. if para_queue_size.saturating_add(msg_size as u64) > config.max_upward_queue_size as u64 { return Err(UmpAcceptanceCheckErr::TotalSizeExceeded { diff --git a/runtime/parachains/src/initializer.rs b/runtime/parachains/src/initializer.rs index b9ecc3038ca2..e006c38e6dec 100644 --- a/runtime/parachains/src/initializer.rs +++ b/runtime/parachains/src/initializer.rs @@ -17,7 +17,8 @@ //! This module is responsible for maintaining a consistent initialization order for all other //! parachains modules. It's also responsible for finalization and session change notifications. //! -//! This module can throw fatal errors if session-change notifications are received after initialization. +//! This module can throw fatal errors if session-change notifications are received after +//! initialization. use crate::{ configuration::{self, HostConfiguration}, @@ -128,9 +129,9 @@ pub mod pallet { /// Semantically a `bool`, but this guarantees it should never hit the trie, /// as this is cleared in `on_finalize` and Frame optimizes `None` values to be empty values. /// - /// As a `bool`, `set(false)` and `remove()` both lead to the next `get()` being false, but one of - /// them writes to the trie and one does not. This confusion makes `Option<()>` more suitable for - /// the semantics of this variable. + /// As a `bool`, `set(false)` and `remove()` both lead to the next `get()` being false, but one + /// of them writes to the trie and one does not. This confusion makes `Option<()>` more suitable + /// for the semantics of this variable. #[pallet::storage] pub(super) type HasInitialized = StorageValue<_, ()>; @@ -190,7 +191,8 @@ pub mod pallet { // Apply buffered session changes as the last thing. This way the runtime APIs and the // next block will observe the next session. // - // Note that we only apply the last session as all others lasted less than a block (weirdly). + // Note that we only apply the last session as all others lasted less than a block + // (weirdly). if let Some(BufferedSessionChange { session_index, validators, queued }) = BufferedSessionChanges::::take().pop() { diff --git a/runtime/parachains/src/origin.rs b/runtime/parachains/src/origin.rs index 14f8c3786c96..c83fec1b8923 100644 --- a/runtime/parachains/src/origin.rs +++ b/runtime/parachains/src/origin.rs @@ -38,7 +38,6 @@ where /// belongs to. /// /// This module fulfills only the single purpose of housing the `Origin` in `construct_runtime`. -/// // ideally, though, the `construct_runtime` should support a free-standing origin. #[frame_support::pallet] pub mod pallet { diff --git a/runtime/parachains/src/paras/mod.rs b/runtime/parachains/src/paras/mod.rs index 98c5075a4c94..4570bb2b13bd 100644 --- a/runtime/parachains/src/paras/mod.rs +++ b/runtime/parachains/src/paras/mod.rs @@ -43,10 +43,10 @@ //! //! The conditions that must be met before the para can use the new validation code are: //! -//! 1. The validation code should have been "soaked" in the storage for a given number of blocks. That -//! is, the validation code should have been stored in on-chain storage for some time, so that in -//! case of a revert with a non-extreme height difference, that validation code can still be -//! found on-chain. +//! 1. The validation code should have been "soaked" in the storage for a given number of blocks. +//! That is, the validation code should have been stored in on-chain storage for some time, so +//! that in case of a revert with a non-extreme height difference, that validation code can still +//! be found on-chain. //! //! 2. The validation code was vetted by the validators and declared as non-malicious in a processes //! known as PVF pre-checking. @@ -105,7 +105,6 @@ //! start──────▶│reset│ //! └─────┘ //! ``` -//! use crate::{ configuration, @@ -152,8 +151,8 @@ pub struct ReplacementTimes { /// first parablock included with a relay-parent with number >= this value. expected_at: N, /// The relay-chain block number at which the parablock activating the code upgrade was - /// actually included. This means considered included and available, so this is the time at which - /// that parablock enters the acceptance period in this fork of the relay-chain. + /// actually included. This means considered included and available, so this is the time at + /// which that parablock enters the acceptance period in this fork of the relay-chain. activated_at: N, } @@ -332,7 +331,8 @@ impl<'de> Deserialize<'de> for ParaKind { } } -// Manual encoding, decoding, and TypeInfo as the parakind field in ParaGenesisArgs used to be a bool +// Manual encoding, decoding, and TypeInfo as the parakind field in ParaGenesisArgs used to be a +// bool impl Encode for ParaKind { fn size_hint(&self) -> usize { true.size_hint() @@ -373,12 +373,15 @@ pub(crate) enum PvfCheckCause { Onboarding(ParaId), /// PVF vote was initiated by signalling of an upgrade by the given para. Upgrade { - /// The ID of the parachain that initiated or is waiting for the conclusion of pre-checking. + /// The ID of the parachain that initiated or is waiting for the conclusion of + /// pre-checking. id: ParaId, - /// The relay-chain block number of **inclusion** of candidate that that initiated the upgrade. + /// The relay-chain block number of **inclusion** of candidate that that initiated the + /// upgrade. /// - /// It's important to count upgrade enactment delay from the inclusion of this candidate instead - /// of its relay parent -- in order to keep PVF available in case of chain reversions. + /// It's important to count upgrade enactment delay from the inclusion of this candidate + /// instead of its relay parent -- in order to keep PVF available in case of chain + /// reversions. /// /// See https://github.com/paritytech/polkadot/issues/4601 for detailed explanation. included_at: BlockNumber, @@ -681,11 +684,11 @@ pub mod pallet { pub(super) type PastCodeMeta = StorageMap<_, Twox64Concat, ParaId, ParaPastCodeMeta>, ValueQuery>; - /// Which paras have past code that needs pruning and the relay-chain block at which the code was replaced. - /// Note that this is the actual height of the included block, not the expected height at which the - /// code upgrade would be applied, although they may be equal. - /// This is to ensure the entire acceptance period is covered, not an offset acceptance period starting - /// from the time at which the parachain perceives a code upgrade as having occurred. + /// Which paras have past code that needs pruning and the relay-chain block at which the code + /// was replaced. Note that this is the actual height of the included block, not the expected + /// height at which the code upgrade would be applied, although they may be equal. + /// This is to ensure the entire acceptance period is covered, not an offset acceptance period + /// starting from the time at which the parachain perceives a code upgrade as having occurred. /// Multiple entries for a single para are permitted. Ordered ascending by block number. #[pallet::storage] pub(super) type PastCodePruning = @@ -706,12 +709,13 @@ pub mod pallet { pub(super) type FutureCodeHash = StorageMap<_, Twox64Concat, ParaId, ValidationCodeHash>; - /// This is used by the relay-chain to communicate to a parachain a go-ahead with in the upgrade procedure. + /// This is used by the relay-chain to communicate to a parachain a go-ahead with in the upgrade + /// procedure. /// /// This value is absent when there are no upgrades scheduled or during the time the relay chain - /// performs the checks. It is set at the first relay-chain block when the corresponding parachain - /// can switch its upgrade function. As soon as the parachain's block is included, the value - /// gets reset to `None`. + /// performs the checks. It is set at the first relay-chain block when the corresponding + /// parachain can switch its upgrade function. As soon as the parachain's block is included, the + /// value gets reset to `None`. /// /// NOTE that this field is used by parachains via merkle storage proofs, therefore changing /// the format will require migration of parachains. @@ -896,8 +900,9 @@ pub mod pallet { /// Otherwise, the code will be added into the storage. Note that the code will be added /// into storage with reference count 0. This is to account the fact that there are no users /// for this code yet. The caller will have to make sure that this code eventually gets - /// used by some parachain or removed from the storage to avoid storage leaks. For the latter - /// prefer to use the `poke_unused_validation_code` dispatchable to raw storage manipulation. + /// used by some parachain or removed from the storage to avoid storage leaks. For the + /// latter prefer to use the `poke_unused_validation_code` dispatchable to raw storage + /// manipulation. /// /// This function is mainly meant to be used for upgrading parachains that do not follow /// the go-ahead signal while the PVF pre-checking feature is enabled. @@ -1569,10 +1574,11 @@ impl Pallet { match cause { PvfCheckCause::Onboarding(id) => { - // Here we need to undo everything that was done during `schedule_para_initialize`. - // Essentially, the logic is similar to offboarding, with exception that before - // actual onboarding the parachain did not have a chance to reach to upgrades. - // Therefore we can skip all the upgrade related storage items here. + // Here we need to undo everything that was done during + // `schedule_para_initialize`. Essentially, the logic is similar to offboarding, + // with exception that before actual onboarding the parachain did not have a + // chance to reach to upgrades. Therefore we can skip all the upgrade related + // storage items here. weight += T::DbWeight::get().writes(3); UpcomingParasGenesis::::remove(&id); CurrentCodeHash::::remove(&id); @@ -1629,8 +1635,8 @@ impl Pallet { // // - Doing it within the context of the PR that introduces this change is undesirable, since // it is already a big change, and that change would require a migration. Moreover, if we - // run the new version of the runtime, there will be less things to worry about during - // the eventual proper migration. + // run the new version of the runtime, there will be less things to worry about during the + // eventual proper migration. // // - This data type already is used for generating genesis, and changing it will probably // introduce some unnecessary burden. @@ -1641,8 +1647,8 @@ impl Pallet { // get rid of hashing of the validation code when onboarding. // // - Replace `validation_code` with a sentinel value: an empty vector. This should be fine - // as long we do not allow registering parachains with empty code. At the moment of writing - // this should already be the case. + // as long we do not allow registering parachains with empty code. At the moment of + // writing this should already be the case. // // - Empty value is treated as the current code is already inserted during the onboarding. // @@ -1670,7 +1676,8 @@ impl Pallet { /// /// Will return error if either is true: /// - /// - para is not a stable parachain or parathread (i.e. [`ParaLifecycle::is_stable`] is `false`) + /// - para is not a stable parachain or parathread (i.e. [`ParaLifecycle::is_stable`] is + /// `false`) /// - para has a pending upgrade. /// - para has unprocessed messages in its UMP queue. /// @@ -1683,7 +1690,8 @@ impl Pallet { // ongoing PVF pre-checking votes. It also removes some nasty edge cases. // // However, an upcoming upgrade on its own imposes no restrictions. An upgrade is enacted - // with a new para head, so if a para never progresses we still should be able to offboard it. + // with a new para head, so if a para never progresses we still should be able to offboard + // it. // // This implicitly assumes that the given para exists, i.e. it's lifecycle != None. if let Some(future_code_hash) = FutureCodeHash::::get(&id) { @@ -1768,13 +1776,14 @@ impl Pallet { /// the relay-chain block number will be determined at which the upgrade will take place. We /// call that block `expected_at`. /// - /// Once the candidate with the relay-parent >= `expected_at` is enacted, the new validation code - /// will be applied. Therefore, the new code will be used to validate the next candidate. + /// Once the candidate with the relay-parent >= `expected_at` is enacted, the new validation + /// code will be applied. Therefore, the new code will be used to validate the next candidate. /// /// The new code should not be equal to the current one, otherwise the upgrade will be aborted. /// If there is already a scheduled code upgrade for the para, this is a no-op. /// - /// Inclusion block number specifies relay parent which enacted candidate initiating the upgrade. + /// Inclusion block number specifies relay parent which enacted candidate initiating the + /// upgrade. pub(crate) fn schedule_code_upgrade( id: ParaId, new_code: ValidationCode, @@ -1905,8 +1914,8 @@ impl Pallet { // We increase the code RC here in any case. Intuitively the parachain that requested this // action is now a user of that PVF. // - // If the result of the pre-checking is reject, then we would decrease the RC for each cause, - // including the current. + // If the result of the pre-checking is reject, then we would decrease the RC for each + // cause, including the current. // // If the result of the pre-checking is accept, then we do nothing to the RC because the PVF // will continue be used by the same users. @@ -1918,9 +1927,9 @@ impl Pallet { weight } - /// Note that a para has progressed to a new head, where the new head was executed in the context - /// of a relay-chain block with given number. This will apply pending code upgrades based - /// on the relay-parent block number provided. + /// Note that a para has progressed to a new head, where the new head was executed in the + /// context of a relay-chain block with given number. This will apply pending code upgrades + /// based on the relay-parent block number provided. pub(crate) fn note_new_head( id: ParaId, new_head: HeadData, diff --git a/runtime/parachains/src/paras/tests.rs b/runtime/parachains/src/paras/tests.rs index 2bf30bb273e5..4a3be6d7d50e 100644 --- a/runtime/parachains/src/paras/tests.rs +++ b/runtime/parachains/src/paras/tests.rs @@ -649,7 +649,8 @@ fn submit_code_change_when_not_allowed_is_err() { Paras::schedule_code_upgrade(para_id, newer_code.clone(), 2, &Configuration::config()); assert_eq!( FutureCodeUpgrades::::get(¶_id), - Some(1 + validation_upgrade_delay), // did not change since the same assertion from the last time. + Some(1 + validation_upgrade_delay), /* did not change since the same assertion from + * the last time. */ ); assert_eq!(FutureCodeHash::::get(¶_id), Some(new_code.hash())); check_code_is_not_stored(&newer_code); @@ -1554,8 +1555,9 @@ fn increase_code_ref_doesnt_have_allergy_on_add_trusted_validation_code() { #[test] fn add_trusted_validation_code_insta_approval() { - // In particular, this tests that `kick_off_pvf_check` reacts to the `add_trusted_validation_code` - // and uses the `CodeByHash::contains_key` which is what `add_trusted_validation_code` uses. + // In particular, this tests that `kick_off_pvf_check` reacts to the + // `add_trusted_validation_code` and uses the `CodeByHash::contains_key` which is what + // `add_trusted_validation_code` uses. let para_id = 100.into(); let validation_code = ValidationCode(vec![1, 2, 3]); let validation_upgrade_delay = 25; diff --git a/runtime/parachains/src/paras_inherent/mod.rs b/runtime/parachains/src/paras_inherent/mod.rs index 61be0d4adae8..a40a3422a669 100644 --- a/runtime/parachains/src/paras_inherent/mod.rs +++ b/runtime/parachains/src/paras_inherent/mod.rs @@ -285,8 +285,9 @@ pub mod pallet { } impl Pallet { - /// Create the `ParachainsInherentData` that gets passed to [`Self::enter`] in [`Self::create_inherent`]. - /// This code is pulled out of [`Self::create_inherent`] so it can be unit tested. + /// Create the `ParachainsInherentData` that gets passed to [`Self::enter`] in + /// [`Self::create_inherent`]. This code is pulled out of [`Self::create_inherent`] so it can be + /// unit tested. fn create_inherent_inner(data: &InherentData) -> Option>> { let parachains_inherent_data = match data.get_data(&Self::INHERENT_IDENTIFIER) { Ok(Some(d)) => d, @@ -313,11 +314,11 @@ impl Pallet { /// The given inherent data is processed and state is altered accordingly. If any data could /// not be applied (inconsitencies, weight limit, ...) it is removed. /// - /// When called from `create_inherent` the `context` must be set to `ProcessInherentDataContext::ProvideInherent` - /// so it guarantees the invariant that inherent is not overweight. - /// - /// It is **mandatory** that calls from `enter` set `context` to `ProcessInherentDataContext::Enter` to ensure - /// the weight invariant is checked. + /// When called from `create_inherent` the `context` must be set to + /// `ProcessInherentDataContext::ProvideInherent` so it guarantees the invariant that inherent + /// is not overweight. + /// It is **mandatory** that calls from `enter` set `context` to + /// `ProcessInherentDataContext::Enter` to ensure the weight invariant is checked. /// /// Returns: Result containing processed inherent data and weight, the processed inherent would /// consume. @@ -379,8 +380,8 @@ impl Pallet { let dispatch_class = DispatchClass::Mandatory; let max_block_weight_full = ::BlockWeights::get(); log::debug!(target: LOG_TARGET, "Max block weight: {}", max_block_weight_full.max_block); - // Get max block weight for the mandatory class if defined, otherwise total max weight of - // the block. + // Get max block weight for the mandatory class if defined, otherwise total max weight + // of the block. let max_weight = max_block_weight_full .per_class .get(dispatch_class) @@ -412,7 +413,8 @@ impl Pallet { T::DisputesHandler::filter_dispute_data(set, post_conclusion_acceptance_period) }; - // Limit the disputes first, since the following statements depend on the votes include here. + // Limit the disputes first, since the following statements depend on the votes include + // here. let (checked_disputes_sets, checked_disputes_sets_consumed_weight) = limit_and_sanitize_disputes::( disputes, @@ -449,8 +451,8 @@ impl Pallet { } all_weight_after } else { - // This check is performed in the context of block execution. Ensures inherent weight invariants guaranteed - // by `create_inherent_data` for block authorship. + // This check is performed in the context of block execution. Ensures inherent weight + // invariants guaranteed by `create_inherent_data` for block authorship. if all_weight_before.any_gt(max_block_weight) { log::error!( "Overweight para inherent data reached the runtime {:?}: {} > {}", @@ -714,13 +716,14 @@ fn random_sel Weight>( /// If there is sufficient space, all bitfields and all candidates /// will be included. /// -/// Otherwise tries to include all disputes, and then tries to fill the remaining space with bitfields and then candidates. +/// Otherwise tries to include all disputes, and then tries to fill the remaining space with +/// bitfields and then candidates. /// -/// The selection process is random. For candidates, there is an exception for code upgrades as they are preferred. -/// And for disputes, local and older disputes are preferred (see `limit_and_sanitize_disputes`). -/// for backed candidates, since with a increasing number of parachains their chances of -/// inclusion become slim. All backed candidates are checked beforehands in `fn create_inherent_inner` -/// which guarantees sanity. +/// The selection process is random. For candidates, there is an exception for code upgrades as they +/// are preferred. And for disputes, local and older disputes are preferred (see +/// `limit_and_sanitize_disputes`). for backed candidates, since with a increasing number of +/// parachains their chances of inclusion become slim. All backed candidates are checked +/// beforehands in `fn create_inherent_inner` which guarantees sanity. /// /// Assumes disputes are already filtered by the time this is called. /// @@ -977,7 +980,8 @@ fn compute_entropy(parent_hash: T::Hash) -> [u8; 32] { /// 1. If weight is exceeded by locals, pick the older ones (lower indices) /// until the weight limit is reached. /// -/// Returns the consumed weight amount, that is guaranteed to be less than the provided `max_consumable_weight`. +/// Returns the consumed weight amount, that is guaranteed to be less than the provided +/// `max_consumable_weight`. fn limit_and_sanitize_disputes< T: Config, CheckValidityFn: FnMut(DisputeStatementSet) -> Option, diff --git a/runtime/parachains/src/paras_inherent/tests.rs b/runtime/parachains/src/paras_inherent/tests.rs index c2e80e7525fb..faf52b555ba3 100644 --- a/runtime/parachains/src/paras_inherent/tests.rs +++ b/runtime/parachains/src/paras_inherent/tests.rs @@ -68,9 +68,9 @@ mod enter { } #[test] - // Validate that if we create 2 backed candidates which are assigned to 2 cores that will be freed via - // becoming fully available, the backed candidates will not be filtered out in `create_inherent` and - // will not cause `enter` to early. + // Validate that if we create 2 backed candidates which are assigned to 2 cores that will be + // freed via becoming fully available, the backed candidates will not be filtered out in + // `create_inherent` and will not cause `enter` to early. fn include_backed_candidates() { new_test_ext(MockGenesisConfig::default()).execute_with(|| { let dispute_statements = BTreeMap::new(); @@ -252,7 +252,8 @@ mod enter { let expected_para_inherent_data = scenario.data.clone(); // Check the para inherent data is as expected: - // * 1 bitfield per validator (5 validators per core, 3 disputes => 3 cores, 15 validators) + // * 1 bitfield per validator (5 validators per core, 3 disputes => 3 cores, 15 + // validators) assert_eq!(expected_para_inherent_data.bitfields.len(), 15); // * 0 backed candidate per core assert_eq!(expected_para_inherent_data.backed_candidates.len(), 0); @@ -389,7 +390,8 @@ mod enter { let expected_para_inherent_data = scenario.data.clone(); // Check the para inherent data is as expected: - // * 1 bitfield per validator (4 validators per core, 2 backed candidates, 3 disputes => 4*5 = 20) + // * 1 bitfield per validator (4 validators per core, 2 backed candidates, 3 disputes => + // 4*5 = 20) assert_eq!(expected_para_inherent_data.bitfields.len(), 20); // * 2 backed candidates assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2); @@ -408,7 +410,8 @@ mod enter { Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); assert!(limit_inherent_data != expected_para_inherent_data); - // Three disputes is over weight (see previous test), so we expect to only see 2 disputes + // Three disputes is over weight (see previous test), so we expect to only see 2 + // disputes assert_eq!(limit_inherent_data.disputes.len(), 2); // Ensure disputes are filtered as expected assert_eq!(limit_inherent_data.disputes[0].session, 1); @@ -418,7 +421,8 @@ mod enter { limit_inherent_data.bitfields.len(), expected_para_inherent_data.bitfields.len() ); - // Ensure that all backed candidates are filtered out as either would make the block over weight + // Ensure that all backed candidates are filtered out as either would make the block + // over weight assert_eq!(limit_inherent_data.backed_candidates.len(), 0); assert_ok!(Pallet::::enter( @@ -470,7 +474,8 @@ mod enter { let expected_para_inherent_data = scenario.data.clone(); // Check the para inherent data is as expected: - // * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes => 4*5 = 20), + // * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes => + // 4*5 = 20), assert_eq!(expected_para_inherent_data.bitfields.len(), 25); // * 2 backed candidates, assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2); @@ -493,14 +498,16 @@ mod enter { assert!(inherent_data_weight(&limit_inherent_data) .all_lte(max_block_weight_proof_size_adjusted())); - // Three disputes is over weight (see previous test), so we expect to only see 2 disputes + // Three disputes is over weight (see previous test), so we expect to only see 2 + // disputes assert_eq!(limit_inherent_data.disputes.len(), 2); // Ensure disputes are filtered as expected assert_eq!(limit_inherent_data.disputes[0].session, 1); assert_eq!(limit_inherent_data.disputes[1].session, 2); // Ensure all bitfields are included as these are still not over weight assert_eq!(limit_inherent_data.bitfields.len(), 20,); - // Ensure that all backed candidates are filtered out as either would make the block over weight + // Ensure that all backed candidates are filtered out as either would make the block + // over weight assert_eq!(limit_inherent_data.backed_candidates.len(), 0); assert_ok!(Pallet::::enter( @@ -551,7 +558,8 @@ mod enter { let expected_para_inherent_data = scenario.data.clone(); // Check the para inherent data is as expected: - // * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes => 5*5 = 25) + // * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes => + // 5*5 = 25) assert_eq!(expected_para_inherent_data.bitfields.len(), 25); // * 2 backed candidates assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2); @@ -632,7 +640,8 @@ mod enter { .any_lt(inherent_data_weight(&expected_para_inherent_data))); // Check the para inherent data is as expected: - // * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes => 5*5 = 25) + // * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes => + // 5*5 = 25) assert_eq!(expected_para_inherent_data.bitfields.len(), 25); // * 2 backed candidates assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2); @@ -645,7 +654,8 @@ mod enter { let limit_inherent_data = Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); - // Expect that inherent data is filtered to include only 1 backed candidate and 2 disputes + // Expect that inherent data is filtered to include only 1 backed candidate and 2 + // disputes assert!(limit_inherent_data != expected_para_inherent_data); assert!( max_block_weight_proof_size_adjusted() @@ -727,7 +737,8 @@ mod enter { .unwrap(); let limit_inherent_data = Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); - // Expect that inherent data is filtered to include only 1 backed candidate and 2 disputes + // Expect that inherent data is filtered to include only 1 backed candidate and 2 + // disputes assert!(limit_inherent_data != expected_para_inherent_data); assert!( max_block_weight_proof_size_adjusted() @@ -792,7 +803,8 @@ mod enter { let limit_inherent_data = Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); - // Expect that inherent data is filtered to include only 1 backed candidate and 2 disputes + // Expect that inherent data is filtered to include only 1 backed candidate and 2 + // disputes assert!(limit_inherent_data != expected_para_inherent_data); assert!( max_block_weight_proof_size_adjusted() @@ -841,7 +853,8 @@ mod enter { .any_lt(inherent_data_weight(&expected_para_inherent_data))); // Check the para inherent data is as expected: - // * 1 bitfield per validator (5 validators per core, 2 backed candidates, 0 disputes => 2*5 = 10) + // * 1 bitfield per validator (5 validators per core, 2 backed candidates, 0 disputes => + // 2*5 = 10) assert_eq!(expected_para_inherent_data.bitfields.len(), 10); // * 2 backed candidates assert_eq!(expected_para_inherent_data.backed_candidates.len(), 2); @@ -854,7 +867,8 @@ mod enter { let limit_inherent_data = Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); - // Expect that inherent data is filtered to include only 1 backed candidate and 2 disputes + // Expect that inherent data is filtered to include only 1 backed candidate and 2 + // disputes assert!(limit_inherent_data != expected_para_inherent_data); assert!( max_block_weight_proof_size_adjusted() @@ -903,7 +917,8 @@ mod enter { .any_lt(inherent_data_weight(&expected_para_inherent_data))); // Check the para inherent data is as expected: - // * 1 bitfield per validator (5 validators per core, 30 backed candidates, 3 disputes => 5*33 = 165) + // * 1 bitfield per validator (5 validators per core, 30 backed candidates, 3 disputes + // => 5*33 = 165) assert_eq!(expected_para_inherent_data.bitfields.len(), 165); // * 30 backed candidates assert_eq!(expected_para_inherent_data.backed_candidates.len(), 30); diff --git a/runtime/parachains/src/runtime_api_impl/v5.rs b/runtime/parachains/src/runtime_api_impl/v5.rs index 1257c0c91702..4c9c8c911f62 100644 --- a/runtime/parachains/src/runtime_api_impl/v5.rs +++ b/runtime/parachains/src/runtime_api_impl/v5.rs @@ -393,7 +393,8 @@ pub fn pvfs_require_precheck() -> Vec { >::pvfs_require_precheck() } -/// Returns the validation code hash for the given parachain making the given `OccupiedCoreAssumption`. +/// Returns the validation code hash for the given parachain making the given +/// `OccupiedCoreAssumption`. pub fn validation_code_hash( para_id: ParaId, assumption: OccupiedCoreAssumption, diff --git a/runtime/parachains/src/scheduler.rs b/runtime/parachains/src/scheduler.rs index b69c16ae8d01..6882834187dc 100644 --- a/runtime/parachains/src/scheduler.rs +++ b/runtime/parachains/src/scheduler.rs @@ -21,19 +21,20 @@ //! - Scheduling parachains and parathreads //! //! It aims to achieve these tasks with these goals in mind: -//! - It should be possible to know at least a block ahead-of-time, ideally more, -//! which validators are going to be assigned to which parachains. -//! - Parachains that have a candidate pending availability in this fork of the chain -//! should not be assigned. +//! - It should be possible to know at least a block ahead-of-time, ideally more, which validators +//! are going to be assigned to which parachains. +//! - Parachains that have a candidate pending availability in this fork of the chain should not be +//! assigned. //! - Validator assignments should not be gameable. Malicious cartels should not be able to //! manipulate the scheduler to assign themselves as desired. -//! - High or close to optimal throughput of parachains and parathreads. Work among validator groups should be balanced. +//! - High or close to optimal throughput of parachains and parathreads. Work among validator groups +//! should be balanced. //! //! The Scheduler manages resource allocation using the concept of "Availability Cores". //! There will be one availability core for each parachain, and a fixed number of cores //! used for multiplexing parathreads. Validators will be partitioned into groups, with the same -//! number of groups as availability cores. Validator groups will be assigned to different availability cores -//! over time. +//! number of groups as availability cores. Validator groups will be assigned to different +//! availability cores over time. use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::BlockNumberFor; @@ -169,8 +170,9 @@ pub mod pallet { /// broader set of Polkadot validators, but instead just the subset used for parachains during /// this session. /// - /// Bound: The number of cores is the sum of the numbers of parachains and parathread multiplexers. - /// Reasonably, 100-1000. The dominant factor is the number of validators: safe upper bound at 10k. + /// Bound: The number of cores is the sum of the numbers of parachains and parathread + /// multiplexers. Reasonably, 100-1000. The dominant factor is the number of validators: safe + /// upper bound at 10k. #[pallet::storage] #[pallet::getter(fn validator_groups)] pub(crate) type ValidatorGroups = StorageValue<_, Vec>, ValueQuery>; @@ -182,8 +184,8 @@ pub mod pallet { #[pallet::storage] pub(crate) type ParathreadQueue = StorageValue<_, ParathreadClaimQueue, ValueQuery>; - /// One entry for each availability core. Entries are `None` if the core is not currently occupied. Can be - /// temporarily `Some` if scheduled but not occupied. + /// One entry for each availability core. Entries are `None` if the core is not currently + /// occupied. Can be temporarily `Some` if scheduled but not occupied. /// The i'th parachain belongs to the i'th core, with the remaining cores all being /// parathread-multiplexers. /// @@ -197,11 +199,13 @@ pub mod pallet { /// An index used to ensure that only one claim on a parathread exists in the queue or is /// currently being handled by an occupied core. /// - /// Bounded by the number of parathread cores and scheduling lookahead. Reasonably, 10 * 50 = 500. + /// Bounded by the number of parathread cores and scheduling lookahead. Reasonably, 10 * 50 = + /// 500. #[pallet::storage] pub(crate) type ParathreadClaimIndex = StorageValue<_, Vec, ValueQuery>; - /// The block number where the session start occurred. Used to track how many group rotations have occurred. + /// The block number where the session start occurred. Used to track how many group rotations + /// have occurred. /// /// Note that in the context of parachains modules the session change is signaled during /// the block and enacted at the end of the block (at the finalization stage, to be exact). @@ -215,8 +219,8 @@ pub mod pallet { /// /// Bounded by the number of cores: one for each parachain and parathread multiplexer. /// - /// The value contained here will not be valid after the end of a block. Runtime APIs should be used to determine scheduled cores/ - /// for the upcoming block. + /// The value contained here will not be valid after the end of a block. Runtime APIs should be + /// used to determine scheduled cores/ for the upcoming block. #[pallet::storage] #[pallet::getter(fn scheduled)] pub(crate) type Scheduled = StorageValue<_, Vec, ValueQuery>; @@ -380,8 +384,9 @@ impl Pallet { }) } - /// Free unassigned cores. Provide a list of cores that should be considered newly-freed along with the reason - /// for them being freed. The list is assumed to be sorted in ascending order by core index. + /// Free unassigned cores. Provide a list of cores that should be considered newly-freed along + /// with the reason for them being freed. The list is assumed to be sorted in ascending order by + /// core index. pub(crate) fn free_cores(just_freed_cores: impl IntoIterator) { let config = >::config(); @@ -403,8 +408,8 @@ impl Pallet { }) }, FreedReason::TimedOut => { - // If a parathread candidate times out, it's not the collator's fault, - // so we don't increment retries. + // If a parathread candidate times out, it's not the collator's + // fault, so we don't increment retries. ParathreadQueue::::mutate(|queue| { queue.enqueue_entry(entry, config.parathread_cores); }) @@ -417,9 +422,9 @@ impl Pallet { }) } - /// Schedule all unassigned cores, where possible. Provide a list of cores that should be considered - /// newly-freed along with the reason for them being freed. The list is assumed to be sorted in - /// ascending order by core index. + /// Schedule all unassigned cores, where possible. Provide a list of cores that should be + /// considered newly-freed along with the reason for them being freed. The list is assumed to be + /// sorted in ascending order by core index. pub(crate) fn schedule( just_freed_cores: impl IntoIterator, now: BlockNumberFor, @@ -455,10 +460,10 @@ impl Pallet { // check the first entry already scheduled with core index >= than the one we // are looking at. 3 cases: - // 1. No such entry, clearly this core is not scheduled, so we need to schedule and put at the end. - // 2. Entry exists and has same index as the core we are inspecting. do not schedule again. - // 3. Entry exists and has higher index than the core we are inspecting. schedule and note - // insertion position. + // 1. No such entry, clearly this core is not scheduled, so we need to schedule + // and put at the end. 2. Entry exists and has same index as the core we are + // inspecting. do not schedule again. 3. Entry exists and has higher index than + // the core we are inspecting. schedule and note insertion position. prev_scheduled_in_order.peek().map_or( Some(scheduled.len()), |(idx_in_scheduled, assign)| { @@ -509,8 +514,9 @@ impl Pallet { } } - // at this point, because `Scheduled` is guaranteed to be sorted and we navigated unassigned - // core indices in ascending order, we can enact the updates prepared by the previous actions. + // at this point, because `Scheduled` is guaranteed to be sorted and we navigated + // unassigned core indices in ascending order, we can enact the updates prepared by the + // previous actions. // // while inserting, we have to account for the amount of insertions already done. // @@ -522,20 +528,20 @@ impl Pallet { scheduled.insert(insert_at, to_insert); } - // scheduled is guaranteed to be sorted after this point because it was sorted before, and we - // applied sorted updates at their correct positions, accounting for the offsets of previous - // insertions. + // scheduled is guaranteed to be sorted after this point because it was sorted before, + // and we applied sorted updates at their correct positions, accounting for the offsets + // of previous insertions. } Scheduled::::set(scheduled); ParathreadQueue::::set(parathread_queue); } - /// Note that the given cores have become occupied. Behavior undefined if any of the given cores were not scheduled - /// or the slice is not sorted ascending by core index. + /// Note that the given cores have become occupied. Behavior undefined if any of the given cores + /// were not scheduled or the slice is not sorted ascending by core index. /// - /// Complexity: O(n) in the number of scheduled cores, which is capped at the number of total cores. - /// This is efficient in the case that most scheduled cores are occupied. + /// Complexity: O(n) in the number of scheduled cores, which is capped at the number of total + /// cores. This is efficient in the case that most scheduled cores are occupied. pub(crate) fn occupied(now_occupied: &[CoreIndex]) { if now_occupied.is_empty() { return @@ -568,8 +574,8 @@ impl Pallet { AvailabilityCores::::set(availability_cores); } - /// Get the para (chain or thread) ID assigned to a particular core or index, if any. Core indices - /// out of bounds will return `None`, as will indices of unassigned cores. + /// Get the para (chain or thread) ID assigned to a particular core or index, if any. Core + /// indices out of bounds will return `None`, as will indices of unassigned cores. pub(crate) fn core_para(core_index: CoreIndex) -> Option { let cores = AvailabilityCores::::get(); match cores.get(core_index.0 as usize).and_then(|c| c.as_ref()) { @@ -587,8 +593,9 @@ impl Pallet { ValidatorGroups::::get().get(group_index.0 as usize).map(|g| g.clone()) } - /// Get the group assigned to a specific core by index at the current block number. Result undefined if the core index is unknown - /// or the block number is less than the session start index. + /// Get the group assigned to a specific core by index at the current block number. Result + /// undefined if the core index is unknown or the block number is less than the session start + /// index. pub(crate) fn group_assigned_to_core( core: CoreIndex, at: BlockNumberFor, @@ -622,10 +629,11 @@ impl Pallet { /// Returns an optional predicate that should be used for timing out occupied cores. /// - /// If `None`, no timing-out should be done. The predicate accepts the index of the core, and the - /// block number since which it has been occupied, and the respective parachain and parathread - /// timeouts, i.e. only within `max(config.chain_availability_period, config.thread_availability_period)` - /// of the last rotation would this return `Some`, unless there are no rotations. + /// If `None`, no timing-out should be done. The predicate accepts the index of the core, and + /// the block number since which it has been occupied, and the respective parachain and + /// parathread timeouts, i.e. only within `max(config.chain_availability_period, + /// config.thread_availability_period)` of the last rotation would this return `Some`, unless + /// there are no rotations. /// /// This really should not be a box, but is working around a compiler limitation filed here: /// https://github.com/rust-lang/rust/issues/73226 diff --git a/runtime/parachains/src/scheduler/tests.rs b/runtime/parachains/src/scheduler/tests.rs index 2188bb15b2e5..c4830f4bf253 100644 --- a/runtime/parachains/src/scheduler/tests.rs +++ b/runtime/parachains/src/scheduler/tests.rs @@ -56,7 +56,8 @@ fn run_to_block( if let Some(notification) = new_session(b + 1) { let mut notification_with_session_index = notification; - // We will make every session change trigger an action queue. Normally this may require 2 or more session changes. + // We will make every session change trigger an action queue. Normally this may require + // 2 or more session changes. if notification_with_session_index.session_index == SessionIndex::default() { notification_with_session_index.session_index = ParasShared::scheduled_session(); } @@ -104,8 +105,9 @@ fn default_config() -> HostConfiguration { scheduling_lookahead: 2, parathread_retries: 1, // This field does not affect anything that scheduler does. However, `HostConfiguration` - // is still a subject to consistency test. It requires that `minimum_validation_upgrade_delay` - // is greater than `chain_availability_period` and `thread_availability_period`. + // is still a subject to consistency test. It requires that + // `minimum_validation_upgrade_delay` is greater than `chain_availability_period` and + // `thread_availability_period`. minimum_validation_upgrade_delay: 6, ..Default::default() } @@ -626,9 +628,9 @@ fn schedule_schedules_including_just_freed() { assert!(Scheduler::scheduled().is_empty()); } - // add a couple more parathread claims - the claim on `b` will go to the 3rd parathread core (4) - // and the claim on `d` will go back to the 1st parathread core (2). The claim on `e` then - // will go for core `3`. + // add a couple more parathread claims - the claim on `b` will go to the 3rd parathread core + // (4) and the claim on `d` will go back to the 1st parathread core (2). The claim on `e` + // then will go for core `3`. Scheduler::add_parathread_claim(ParathreadClaim(thread_b, collator.clone())); Scheduler::add_parathread_claim(ParathreadClaim(thread_d, collator.clone())); Scheduler::add_parathread_claim(ParathreadClaim(thread_e, collator.clone())); diff --git a/runtime/parachains/src/shared.rs b/runtime/parachains/src/shared.rs index 857e671f0ee4..6b50bcce4054 100644 --- a/runtime/parachains/src/shared.rs +++ b/runtime/parachains/src/shared.rs @@ -62,8 +62,8 @@ pub mod pallet { pub(super) type ActiveValidatorIndices = StorageValue<_, Vec, ValueQuery>; - /// The parachain attestation keys of the validators actively participating in parachain consensus. - /// This should be the same length as `ActiveValidatorIndices`. + /// The parachain attestation keys of the validators actively participating in parachain + /// consensus. This should be the same length as `ActiveValidatorIndices`. #[pallet::storage] #[pallet::getter(fn active_validator_keys)] pub(super) type ActiveValidatorKeys = StorageValue<_, Vec, ValueQuery>; diff --git a/runtime/parachains/src/util.rs b/runtime/parachains/src/util.rs index d5b339b679e3..aa07ef080055 100644 --- a/runtime/parachains/src/util.rs +++ b/runtime/parachains/src/util.rs @@ -48,7 +48,7 @@ pub fn make_persisted_validation_data( /// the order of the `active` vec, the second item will contain the rest, in the original order. /// /// ```ignore -/// split_active_subset(active, all).0 == take_active_subset(active, all) +/// split_active_subset(active, all).0 == take_active_subset(active, all) /// ``` pub fn split_active_subset(active: &[ValidatorIndex], all: &[T]) -> (Vec, Vec) { let active_set: BTreeSet<_> = active.iter().cloned().collect(); @@ -76,7 +76,7 @@ pub fn split_active_subset(active: &[ValidatorIndex], all: &[T]) -> (V /// Uses `split_active_subset` and concatenates the inactive to the active vec. /// /// ```ignore -/// split_active_subset(active, all)[0..active.len()]) == take_active_subset(active, all) +/// split_active_subset(active, all)[0..active.len()]) == take_active_subset(active, all) /// ``` pub fn take_active_subset_and_inactive(active: &[ValidatorIndex], all: &[T]) -> Vec { let (mut a, mut i) = split_active_subset(active, all); diff --git a/runtime/polkadot/src/governance/old.rs b/runtime/polkadot/src/governance/old.rs index f4c2655a784a..4c7b503472f2 100644 --- a/runtime/polkadot/src/governance/old.rs +++ b/runtime/polkadot/src/governance/old.rs @@ -45,7 +45,8 @@ impl pallet_democracy::Config for Runtime { pallet_collective::EnsureProportionAtLeast, frame_system::EnsureRoot, >; - /// A 60% super-majority can have the next scheduled referendum be a straight majority-carries vote. + /// A 60% super-majority can have the next scheduled referendum be a straight majority-carries + /// vote. type ExternalMajorityOrigin = EitherOfDiverse< pallet_collective::EnsureProportionAtLeast, frame_system::EnsureRoot, diff --git a/runtime/polkadot/src/xcm_config.rs b/runtime/polkadot/src/xcm_config.rs index 867253ea0346..faae2e1d2619 100644 --- a/runtime/polkadot/src/xcm_config.rs +++ b/runtime/polkadot/src/xcm_config.rs @@ -63,8 +63,8 @@ parameter_types! { pub LocalCheckAccount: (AccountId, MintLocation) = (CheckAccount::get(), MintLocation::Local); } -/// The canonical means of converting a `MultiLocation` into an `AccountId`, used when we want to determine -/// the sovereign account controlled by a location. +/// The canonical means of converting a `MultiLocation` into an `AccountId`, used when we want to +/// determine the sovereign account controlled by a location. pub type SovereignAccountOf = ( // We can convert a child parachain using the standard `AccountId` conversion. ChildParachainConvertsVia, @@ -72,8 +72,8 @@ pub type SovereignAccountOf = ( AccountId32Aliases, ); -/// Our asset transactor. This is what allows us to interact with the runtime assets from the point of -/// view of XCM-only concepts like `MultiLocation` and `MultiAsset`. +/// Our asset transactor. This is what allows us to interact with the runtime assets from the point +/// of view of XCM-only concepts like `MultiLocation` and `MultiAsset`. /// /// Ours is only aware of the Balances pallet, which is mapped to `TokenLocation`. pub type LocalAssetTransactor = XcmCurrencyAdapter< @@ -369,8 +369,8 @@ pub type CouncilToPlurality = BackingToPlurality< CouncilBodyId, >; -/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior location -/// of this chain. +/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior +/// location of this chain. pub type LocalOriginToLocation = ( CouncilToPlurality, // And a usual Signed origin to be used in XCM as a corresponding AccountId32 @@ -385,11 +385,11 @@ pub type StakingAdminToPlurality = pub type FellowshipAdminToPlurality = OriginToPluralityVoice; -/// Type to convert a pallet `Origin` type value into a `MultiLocation` value which represents an interior location -/// of this chain for a destination chain. +/// Type to convert a pallet `Origin` type value into a `MultiLocation` value which represents an +/// interior location of this chain for a destination chain. pub type LocalPalletOriginToLocation = ( - // We allow an origin from the Collective pallet to be used in XCM as a corresponding Plurality of the - // `Unit` body. + // We allow an origin from the Collective pallet to be used in XCM as a corresponding Plurality + // of the `Unit` body. CouncilToPlurality, // StakingAdmin origin to be used in XCM as a corresponding Plurality `MultiLocation` value. StakingAdminToPlurality, @@ -399,7 +399,8 @@ pub type LocalPalletOriginToLocation = ( impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; - // We only allow the root, the council, the fellowship admin and the staking admin to send messages. + // We only allow the root, the council, the fellowship admin and the staking admin to send + // messages. type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; type XcmRouter = XcmRouter; // Anyone can execute XCM messages locally... diff --git a/runtime/rococo/src/xcm_config.rs b/runtime/rococo/src/xcm_config.rs index 714a4f69e759..75e06391c56b 100644 --- a/runtime/rococo/src/xcm_config.rs +++ b/runtime/rococo/src/xcm_config.rs @@ -56,8 +56,8 @@ parameter_types! { pub type LocationConverter = (ChildParachainConvertsVia, AccountId32Aliases); -/// Our asset transactor. This is what allows us to interest with the runtime facilities from the point of -/// view of XCM-only concepts like `MultiLocation` and `MultiAsset`. +/// Our asset transactor. This is what allows us to interest with the runtime facilities from the +/// point of view of XCM-only concepts like `MultiLocation` and `MultiAsset`. /// /// Ours is only aware of the Balances pallet, which is mapped to `RocLocation`. pub type LocalAssetTransactor = XcmCurrencyAdapter< @@ -342,11 +342,11 @@ pub type CouncilToPlurality = BackingToPlurality< CouncilBodyId, >; -/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior location -/// of this chain. +/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior +/// location of this chain. pub type LocalOriginToLocation = ( - // We allow an origin from the Collective pallet to be used in XCM as a corresponding Plurality of the - // `Unit` body. + // We allow an origin from the Collective pallet to be used in XCM as a corresponding Plurality + // of the `Unit` body. CouncilToPlurality, // And a usual Signed origin to be used in XCM as a corresponding AccountId32 SignedToAccountId32, diff --git a/runtime/test-runtime/src/lib.rs b/runtime/test-runtime/src/lib.rs index c9f3aa6cb203..d7594e67c12a 100644 --- a/runtime/test-runtime/src/lib.rs +++ b/runtime/test-runtime/src/lib.rs @@ -355,8 +355,8 @@ impl pallet_staking::Config for Runtime { type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = onchain::OnChainExecution; - // Use the nominator map to iter voter AND no-ops for all SortedListProvider hooks. The migration - // to bags-list is a no-op, but the storage version will be updated. + // Use the nominator map to iter voter AND no-ops for all SortedListProvider hooks. The + // migration to bags-list is a no-op, but the storage version will be updated. type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; type NominationsQuota = pallet_staking::FixedNominationsQuota; diff --git a/runtime/test-runtime/src/xcm_config.rs b/runtime/test-runtime/src/xcm_config.rs index 21ce8c877dc3..2113bbae66ad 100644 --- a/runtime/test-runtime/src/xcm_config.rs +++ b/runtime/test-runtime/src/xcm_config.rs @@ -38,8 +38,8 @@ parameter_types! { pub const UniversalLocation: xcm::latest::InteriorMultiLocation = xcm::latest::Junctions::Here; } -/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior location -/// of this chain. +/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior +/// location of this chain. pub type LocalOriginToLocation = ( // And a usual Signed origin to be used in XCM as a corresponding AccountId32 SignedToAccountId32, diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index 4b4659442cff..9bb5a6db613d 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -338,8 +338,8 @@ pub struct MaybeSignedPhase; impl Get for MaybeSignedPhase { fn get() -> u32 { - // 1 day = 4 eras -> 1 week = 28 eras. We want to disable signed phase once a week to test the fallback unsigned - // phase is able to compute elections on Westend. + // 1 day = 4 eras -> 1 week = 28 eras. We want to disable signed phase once a week to test + // the fallback unsigned phase is able to compute elections on Westend. if Staking::current_era().unwrap_or(1) % 28 == 0 { 0 } else { diff --git a/runtime/westend/src/xcm_config.rs b/runtime/westend/src/xcm_config.rs index d6a3feb3bc0f..a83c38c9f66f 100644 --- a/runtime/westend/src/xcm_config.rs +++ b/runtime/westend/src/xcm_config.rs @@ -271,8 +271,8 @@ impl xcm_executor::Config for XcmConfig { type Aliasers = Nothing; } -/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior location -/// of this chain. +/// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior +/// location of this chain. pub type LocalOriginToLocation = ( // And a usual Signed origin to be used in XCM as a corresponding AccountId32 SignedToAccountId32, diff --git a/rustfmt.toml b/rustfmt.toml index 542c561edd42..e2c4a037f37f 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,12 +1,20 @@ # Basic +edition = "2021" hard_tabs = true max_width = 100 use_small_heuristics = "Max" + # Imports imports_granularity = "Crate" reorder_imports = true + # Consistency newline_style = "Unix" + +# Format comments +comment_width = 100 +wrap_comments = true + # Misc chain_width = 80 spaces_around_ranges = false @@ -18,7 +26,3 @@ match_block_trailing_comma = true trailing_comma = "Vertical" trailing_semicolon = false use_field_init_shorthand = true -ignore = [ - "bridges", -] -edition = "2021" diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index b45c4c1be890..ea629f189dc8 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -114,4 +114,5 @@ cargo-clippy: - .docker-env - .test-refs script: + - cargo version && cargo clippy --version - SKIP_WASM_BUILD=1 env -u RUSTFLAGS cargo clippy --locked --all-targets diff --git a/statement-table/src/generic.rs b/statement-table/src/generic.rs index fcd261b438b3..9aa445becce0 100644 --- a/statement-table/src/generic.rs +++ b/statement-table/src/generic.rs @@ -96,8 +96,8 @@ pub enum ValidityDoubleVote { } impl ValidityDoubleVote { - /// Deconstruct this misbehavior into two `(Statement, Signature)` pairs, erasing the information - /// about precisely what the problem was. + /// Deconstruct this misbehavior into two `(Statement, Signature)` pairs, erasing the + /// information about precisely what the problem was. pub fn deconstruct( self, ) -> ((Statement, Signature), (Statement, Signature)) @@ -124,8 +124,8 @@ pub enum DoubleSign { } impl DoubleSign { - /// Deconstruct this misbehavior into a statement with two signatures, erasing the information about - /// precisely where in the process the issue was detected. + /// Deconstruct this misbehavior into a statement with two signatures, erasing the information + /// about precisely where in the process the issue was detected. pub fn deconstruct(self) -> (Statement, Signature, Signature) { match self { Self::Seconded(candidate, a, b) => (Statement::Seconded(candidate), a, b), @@ -555,10 +555,11 @@ impl<'a, Ctx: Context> Iterator for DrainMisbehaviors<'a, Ctx> { type Item = (Ctx::AuthorityId, MisbehaviorFor); fn next(&mut self) -> Option { - // Note: this implementation will prematurely return `None` if `self.drain.next()` ever returns a - // tuple whose vector is empty. That will never currently happen, as the only modification - // to the backing map is currently via `drain` and `entry(...).or_default().push(...)`. - // However, future code changes might change that property. + // Note: this implementation will prematurely return `None` if `self.drain.next()` ever + // returns a tuple whose vector is empty. That will never currently happen, as the only + // modification to the backing map is currently via `drain` and + // `entry(...).or_default().push(...)`. However, future code changes might change that + // property. self.maybe_item().or_else(|| { self.in_progress = self.drain.next().map(Into::into); self.maybe_item() diff --git a/tests/common.rs b/tests/common.rs index 39b92732498f..940a0c6f18d0 100644 --- a/tests/common.rs +++ b/tests/common.rs @@ -76,7 +76,8 @@ async fn wait_n_finalized_blocks_from(n: usize, url: &str) { /// This is hack to get the actual binded sockaddr because /// polkadot assigns a random port if the specified port was already binded. /// -/// You must call `Command::new("cmd").stdout(process::Stdio::piped()).stderr(process::Stdio::piped())` +/// You must call +/// `Command::new("cmd").stdout(process::Stdio::piped()).stderr(process::Stdio::piped())` /// for this to work. pub fn find_ws_url_from_output(read: impl Read + Send) -> (String, String) { let mut data = String::new(); diff --git a/utils/staking-miner/src/opts.rs b/utils/staking-miner/src/opts.rs index 819511b55b18..ecffe4531014 100644 --- a/utils/staking-miner/src/opts.rs +++ b/utils/staking-miner/src/opts.rs @@ -58,8 +58,8 @@ pub(crate) enum Command { #[derive(Debug, Clone, Parser)] #[cfg_attr(test, derive(PartialEq))] pub(crate) struct MonitorConfig { - /// The path to a file containing the seed of the account. If the file is not found, the seed is - /// used as-is. + /// The path to a file containing the seed of the account. If the file is not found, the seed + /// is used as-is. /// /// Can also be provided via the `SEED` environment variable. /// @@ -88,9 +88,11 @@ pub(crate) struct MonitorConfig { /// /// `--submission-strategy always`: always submit. /// - /// `--submission-strategy "percent-better "`: submit if the submission is `n` percent better. + /// `--submission-strategy "percent-better "`: submit if the submission is `n` percent + /// better. /// - /// `--submission-strategy "no-worse-than "`: submit if submission is no more than `n` percent worse. + /// `--submission-strategy "no-worse-than "`: submit if submission is no more than + /// `n` percent worse. #[clap(long, default_value = "if-leading")] pub submission_strategy: SubmissionStrategy, @@ -100,8 +102,8 @@ pub(crate) struct MonitorConfig { /// a delay can be enforced to avoid submitting at /// "same time" and risk potential races with other miners. /// - /// When this is enabled and there are competing solutions, your solution might not be submitted - /// if the scores are equal. + /// When this is enabled and there are competing solutions, your solution might not be + /// submitted if the scores are equal. #[arg(long, default_value_t = 0)] pub delay: usize, } @@ -109,8 +111,8 @@ pub(crate) struct MonitorConfig { #[derive(Debug, Clone, Parser)] #[cfg_attr(test, derive(PartialEq))] pub(crate) struct DryRunConfig { - /// The path to a file containing the seed of the account. If the file is not found, the seed is - /// used as-is. + /// The path to a file containing the seed of the account. If the file is not found, the seed + /// is used as-is. /// /// Can also be provided via the `SEED` environment variable. /// @@ -165,8 +167,8 @@ pub enum SubmissionStrategy { IfLeading, /// Submit if we are no worse than `Perbill` worse than the best. ClaimNoWorseThan(Perbill), - /// Submit if we are leading, or if the solution that's leading is more that the given `Perbill` - /// better than us. This helps detect obviously fake solutions and still combat them. + /// Submit if we are leading, or if the solution that's leading is more that the given + /// `Perbill` better than us. This helps detect obviously fake solutions and still combat them. ClaimBetterThan(Perbill), } @@ -189,8 +191,8 @@ pub(crate) enum Solver { /// * --submission-strategy if-leading: only submit if leading /// * --submission-strategy always: always submit /// * --submission-strategy "percent-better ": submit if submission is `n` percent better. -/// * --submission-strategy "no-worse-than": submit if submission is no more than `n` percent worse. -/// +/// * --submission-strategy "no-worse-than": submit if submission is no more than `n` +/// percent worse. impl FromStr for SubmissionStrategy { type Err = String; diff --git a/utils/staking-miner/src/rpc.rs b/utils/staking-miner/src/rpc.rs index a95e89191a49..2d25616e2a17 100644 --- a/utils/staking-miner/src/rpc.rs +++ b/utils/staking-miner/src/rpc.rs @@ -61,7 +61,8 @@ pub trait RpcApi { at: Option<&Hash>, ) -> RpcResult>; - /// Dry run an extrinsic at a given block. Return SCALE encoded [`sp_runtime::ApplyExtrinsicResult`]. + /// Dry run an extrinsic at a given block. Return SCALE encoded + /// [`sp_runtime::ApplyExtrinsicResult`]. #[method(name = "system_dryRun")] async fn dry_run(&self, extrinsic: &Bytes, at: Option) -> RpcResult; diff --git a/xcm/pallet-xcm-benchmarks/src/generic/mod.rs b/xcm/pallet-xcm-benchmarks/src/generic/mod.rs index e5fce008a0f2..195066ee5b48 100644 --- a/xcm/pallet-xcm-benchmarks/src/generic/mod.rs +++ b/xcm/pallet-xcm-benchmarks/src/generic/mod.rs @@ -52,7 +52,8 @@ pub mod pallet { /// If set to `Err`, benchmarks which rely on an `exchange_asset` will be skipped. fn worst_case_asset_exchange() -> Result<(MultiAssets, MultiAssets), BenchmarkError>; - /// A `(MultiLocation, Junction)` that is one of the `UniversalAliases` configured by the XCM executor. + /// A `(MultiLocation, Junction)` that is one of the `UniversalAliases` configured by the + /// XCM executor. /// /// If set to `Err`, benchmarks which rely on a universal alias will be skipped. fn universal_alias() -> Result<(MultiLocation, Junction), BenchmarkError>; @@ -75,13 +76,15 @@ pub mod pallet { /// Return an unlocker, owner and assets that can be locked and unlocked. fn unlockable_asset() -> Result<(MultiLocation, MultiLocation, MultiAsset), BenchmarkError>; - /// A `(MultiLocation, NetworkId, InteriorMultiLocation)` we can successfully export message to. + /// A `(MultiLocation, NetworkId, InteriorMultiLocation)` we can successfully export message + /// to. /// /// If set to `Err`, benchmarks which rely on `export_message` will be skipped. fn export_message_origin_and_destination( ) -> Result<(MultiLocation, NetworkId, InteriorMultiLocation), BenchmarkError>; - /// A `(MultiLocation, MultiLocation)` that is one of the `Aliasers` configured by the XCM executor. + /// A `(MultiLocation, MultiLocation)` that is one of the `Aliasers` configured by the XCM + /// executor. /// /// If set to `Err`, benchmarks which rely on a universal alias will be skipped. fn alias_origin() -> Result<(MultiLocation, MultiLocation), BenchmarkError>; diff --git a/xcm/pallet-xcm/src/lib.rs b/xcm/pallet-xcm/src/lib.rs index d52d5ba24271..aefcf30910ed 100644 --- a/xcm/pallet-xcm/src/lib.rs +++ b/xcm/pallet-xcm/src/lib.rs @@ -195,9 +195,9 @@ pub mod pallet { /// The type used to actually dispatch an XCM to its destination. type XcmRouter: SendXcm; - /// Required origin for executing XCM messages, including the teleport functionality. If successful, - /// then it resolves to `MultiLocation` which exists as an interior location within this chain's XCM - /// context. + /// Required origin for executing XCM messages, including the teleport functionality. If + /// successful, then it resolves to `MultiLocation` which exists as an interior location + /// within this chain's XCM context. type ExecuteXcmOrigin: EnsureOrigin< ::RuntimeOrigin, Success = MultiLocation, @@ -212,7 +212,8 @@ pub mod pallet { /// Our XCM filter which messages to be teleported using the dedicated extrinsic must pass. type XcmTeleportFilter: Contains<(MultiLocation, Vec)>; - /// Our XCM filter which messages to be reserve-transferred using the dedicated extrinsic must pass. + /// Our XCM filter which messages to be reserve-transferred using the dedicated extrinsic + /// must pass. type XcmReserveTransferFilter: Contains<(MultiLocation, Vec)>; /// Means of measuring the weight consumed by an XCM message locally. @@ -290,8 +291,8 @@ pub mod pallet { /// Query response has been received and query is removed. The registered notification has /// been dispatched and executed successfully. Notified { query_id: QueryId, pallet_index: u8, call_index: u8 }, - /// Query response has been received and query is removed. The registered notification could - /// not be dispatched because the dispatch weight is greater than the maximum weight + /// Query response has been received and query is removed. The registered notification + /// could not be dispatched because the dispatch weight is greater than the maximum weight /// originally budgeted by this runtime for the query result. NotifyOverweight { query_id: QueryId, @@ -371,7 +372,8 @@ pub mod pallet { cost: MultiAssets, message_id: XcmHash, }, - /// We have requested that a remote chain stops sending us XCM version change notifications. + /// We have requested that a remote chain stops sending us XCM version change + /// notifications. VersionNotifyUnrequested { destination: MultiLocation, cost: MultiAssets, @@ -402,8 +404,8 @@ pub mod pallet { /// The desired destination was unreachable, generally because there is a no way of routing /// to it. Unreachable, - /// There was some other issue (i.e. not to do with routing) in sending the message. Perhaps - /// a lack of space for buffering the message. + /// There was some other issue (i.e. not to do with routing) in sending the message. + /// Perhaps a lack of space for buffering the message. SendFailure, /// The message execution fails the filter. Filtered, @@ -791,12 +793,13 @@ pub mod pallet { /// with all fees taken as needed from the asset. /// /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. - /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send - /// from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain. - /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be - /// an `AccountId32` value. - /// - `assets`: The assets to be withdrawn. The first item should be the currency used to to pay the fee on the - /// `dest` side. May not be empty. + /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, + /// Parachain(..))` to send from parachain to parachain, or `X1(Parachain(..))` to send + /// from relay to parachain. + /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will + /// generally be an `AccountId32` value. + /// - `assets`: The assets to be withdrawn. The first item should be the currency used to to + /// pay the fee on the `dest` side. May not be empty. /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay /// fees. #[pallet::call_index(1)] @@ -839,12 +842,13 @@ pub mod pallet { /// with all fees taken as needed from the asset. /// /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. - /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send - /// from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain. - /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be - /// an `AccountId32` value. - /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the fee on the - /// `dest` side. + /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, + /// Parachain(..))` to send from parachain to parachain, or `X1(Parachain(..))` to send + /// from relay to parachain. + /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will + /// generally be an `AccountId32` value. + /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the + /// fee on the `dest` side. /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay /// fees. #[pallet::call_index(2)] @@ -885,12 +889,12 @@ pub mod pallet { /// An event is deposited indicating whether `msg` could be executed completely or only /// partially. /// - /// No more than `max_weight` will be used in its attempted execution. If this is less than the - /// maximum amount of weight that the message could take to be executed, then no execution - /// attempt will be made. + /// No more than `max_weight` will be used in its attempted execution. If this is less than + /// the maximum amount of weight that the message could take to be executed, then no + /// execution attempt will be made. /// - /// NOTE: A successful return to this does *not* imply that the `msg` was executed successfully - /// to completion; only that *some* of it was executed. + /// NOTE: A successful return to this does *not* imply that the `msg` was executed + /// successfully to completion; only that *some* of it was executed. #[pallet::call_index(3)] #[pallet::weight(max_weight.saturating_add(T::WeightInfo::execute()))] pub fn execute( @@ -1012,12 +1016,13 @@ pub mod pallet { /// at risk. /// /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. - /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send - /// from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain. - /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be - /// an `AccountId32` value. - /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the fee on the - /// `dest` side. + /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, + /// Parachain(..))` to send from parachain to parachain, or `X1(Parachain(..))` to send + /// from relay to parachain. + /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will + /// generally be an `AccountId32` value. + /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the + /// fee on the `dest` side. /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay /// fees. /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. @@ -1063,12 +1068,13 @@ pub mod pallet { /// at risk. /// /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. - /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send - /// from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain. - /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be - /// an `AccountId32` value. - /// - `assets`: The assets to be withdrawn. The first item should be the currency used to to pay the fee on the - /// `dest` side. May not be empty. + /// - `dest`: Destination context for the assets. Will typically be `X2(Parent, + /// Parachain(..))` to send from parachain to parachain, or `X1(Parachain(..))` to send + /// from relay to parachain. + /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will + /// generally be an `AccountId32` value. + /// - `assets`: The assets to be withdrawn. The first item should be the currency used to to + /// pay the fee on the `dest` side. May not be empty. /// - `fee_asset_item`: The index into `assets` of the item which should be used to pay /// fees. /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. @@ -1561,13 +1567,13 @@ impl Pallet { /// /// - `message`: The message whose outcome should be reported. /// - `responder`: The origin from which a response should be expected. - /// - `notify`: A dispatchable function which will be called once the outcome of `message` - /// is known. It may be a dispatchable in any pallet of the local chain, but other than - /// the usual origin, it must accept exactly two arguments: `query_id: QueryId` and - /// `outcome: Response`, and in that order. It should expect that the origin is - /// `Origin::Response` and will contain the responder's location. - /// - `timeout`: The block number after which it is permissible for `notify` not to be - /// called even if a response is received. + /// - `notify`: A dispatchable function which will be called once the outcome of `message` is + /// known. It may be a dispatchable in any pallet of the local chain, but other than the usual + /// origin, it must accept exactly two arguments: `query_id: QueryId` and `outcome: Response`, + /// and in that order. It should expect that the origin is `Origin::Response` and will contain + /// the responder's location. + /// - `timeout`: The block number after which it is permissible for `notify` not to be called + /// even if a response is received. /// /// `report_outcome_notify` may return an error if the `responder` is not invertible. /// @@ -2090,8 +2096,8 @@ impl OnResponse for Pallet { call_index, }; Self::deposit_event(e); - // Not much to do with the result as it is. It's up to the parachain to ensure that the - // message makes sense. + // Not much to do with the result as it is. It's up to the + // parachain to ensure that the message makes sense. error_and_info.post_info.actual_weight }, } @@ -2159,8 +2165,8 @@ where } } -/// Filter for `MultiLocation` to find those which represent a strict majority approval of an identified -/// plurality. +/// Filter for `MultiLocation` to find those which represent a strict majority approval of an +/// identified plurality. /// /// May reasonably be used with `EnsureXcm`. pub struct IsMajorityOfBody(PhantomData<(Prefix, Body)>); @@ -2186,8 +2192,8 @@ impl, Body: Get> Contains } } -/// `EnsureOrigin` implementation succeeding with a `MultiLocation` value to recognize and filter the -/// `Origin::Xcm` item. +/// `EnsureOrigin` implementation succeeding with a `MultiLocation` value to recognize and filter +/// the `Origin::Xcm` item. pub struct EnsureXcm(PhantomData); impl, F: Contains> EnsureOrigin for EnsureXcm where diff --git a/xcm/src/double_encoded.rs b/xcm/src/double_encoded.rs index 2c8957d9ed76..c4c1276fad8d 100644 --- a/xcm/src/double_encoded.rs +++ b/xcm/src/double_encoded.rs @@ -73,7 +73,8 @@ impl DoubleEncoded { impl DoubleEncoded { /// Decode the inner encoded value and store it. - /// Returns a reference to the value in case of success and `Err(())` in case the decoding fails. + /// Returns a reference to the value in case of success and `Err(())` in case the decoding + /// fails. pub fn ensure_decoded(&mut self) -> Result<&T, ()> { if self.decoded.is_none() { self.decoded = @@ -92,8 +93,9 @@ impl DoubleEncoded { .ok_or(()) } - /// Provides an API similar to `TryInto` that allows fallible conversion to the inner value type. - /// `TryInto` implementation would collide with std blanket implementation based on `TryFrom`. + /// Provides an API similar to `TryInto` that allows fallible conversion to the inner value + /// type. `TryInto` implementation would collide with std blanket implementation based on + /// `TryFrom`. pub fn try_into(mut self) -> Result { self.ensure_decoded()?; self.decoded.ok_or(()) diff --git a/xcm/src/lib.rs b/xcm/src/lib.rs index 2e8ea78b5c15..a012c5f53fbf 100644 --- a/xcm/src/lib.rs +++ b/xcm/src/lib.rs @@ -360,7 +360,8 @@ impl TryFrom> for v3::Xcm { } } -/// Convert an `Xcm` datum into a `VersionedXcm`, based on a destination `MultiLocation` which will interpret it. +/// Convert an `Xcm` datum into a `VersionedXcm`, based on a destination `MultiLocation` which will +/// interpret it. pub trait WrapVersion { fn wrap_version( dest: &latest::MultiLocation, @@ -368,7 +369,8 @@ pub trait WrapVersion { ) -> Result, ()>; } -/// `()` implementation does nothing with the XCM, just sending with whatever version it was authored as. +/// `()` implementation does nothing with the XCM, just sending with whatever version it was +/// authored as. impl WrapVersion for () { fn wrap_version( _: &latest::MultiLocation, @@ -378,7 +380,8 @@ impl WrapVersion for () { } } -/// `WrapVersion` implementation which attempts to always convert the XCM to version 2 before wrapping it. +/// `WrapVersion` implementation which attempts to always convert the XCM to version 2 before +/// wrapping it. pub struct AlwaysV2; impl WrapVersion for AlwaysV2 { fn wrap_version( @@ -389,7 +392,8 @@ impl WrapVersion for AlwaysV2 { } } -/// `WrapVersion` implementation which attempts to always convert the XCM to version 3 before wrapping it. +/// `WrapVersion` implementation which attempts to always convert the XCM to version 3 before +/// wrapping it. pub struct AlwaysV3; impl WrapVersion for AlwaysV3 { fn wrap_version( diff --git a/xcm/src/v2/junction.rs b/xcm/src/v2/junction.rs index be075a31fe32..73a502999462 100644 --- a/xcm/src/v2/junction.rs +++ b/xcm/src/v2/junction.rs @@ -32,13 +32,13 @@ pub enum Junction { /// /// Generally used when the context is a Polkadot Relay-chain. Parachain(#[codec(compact)] u32), - /// A 32-byte identifier for an account of a specific network that is respected as a sovereign endpoint within - /// the context. + /// A 32-byte identifier for an account of a specific network that is respected as a sovereign + /// endpoint within the context. /// /// Generally used when the context is a Substrate-based chain. AccountId32 { network: NetworkId, id: [u8; 32] }, - /// An 8-byte index for an account of a specific network that is respected as a sovereign endpoint within - /// the context. + /// An 8-byte index for an account of a specific network that is respected as a sovereign + /// endpoint within the context. /// /// May be used when the context is a Frame-based chain and includes e.g. an indices pallet. AccountIndex64 { @@ -46,8 +46,8 @@ pub enum Junction { #[codec(compact)] index: u64, }, - /// A 20-byte identifier for an account of a specific network that is respected as a sovereign endpoint within - /// the context. + /// A 20-byte identifier for an account of a specific network that is respected as a sovereign + /// endpoint within the context. /// /// May be used when the context is an Ethereum or Bitcoin chain or smart-contract. AccountKey20 { network: NetworkId, key: [u8; 20] }, @@ -73,8 +73,8 @@ pub enum Junction { OnlyChild, /// A pluralistic body existing within consensus. /// - /// Typical to be used to represent a governance origin of a chain, but could in principle be used to represent - /// things such as multisigs also. + /// Typical to be used to represent a governance origin of a chain, but could in principle be + /// used to represent things such as multisigs also. Plurality { id: BodyId, part: BodyPart }, } diff --git a/xcm/src/v2/mod.rs b/xcm/src/v2/mod.rs index 014942d6b679..79cc8ead89a1 100644 --- a/xcm/src/v2/mod.rs +++ b/xcm/src/v2/mod.rs @@ -39,11 +39,10 @@ //! - `Order` is now obsolete and replaced entirely by `Instruction`. //! - `Xcm` is now a simple wrapper around a `Vec`. //! - During conversion from `Order` to `Instruction`, we do not handle `BuyExecution`s that have -//! nested XCMs, i.e. if the `instructions` field in the `BuyExecution` enum struct variant is -//! not empty, then the conversion will fail. To address this, rewrite the XCM using -//! `Instruction`s in chronological order. -//! - During conversion from `Xcm` to `Instruction`, we do not handle `RelayedFrom` messages at -//! all. +//! nested XCMs, i.e. if the `instructions` field in the `BuyExecution` enum struct variant is not +//! empty, then the conversion will fail. To address this, rewrite the XCM using `Instruction`s in +//! chronological order. +//! - During conversion from `Xcm` to `Instruction`, we do not handle `RelayedFrom` messages at all. //! //! ### XCM Pallet //! - The `Weigher` configuration item must have sensible weights defined for `BuyExecution` and @@ -153,20 +152,20 @@ pub enum BodyId { Executive, /// The unambiguous technical body (for Polkadot, this would be the Technical Committee). Technical, - /// The unambiguous legislative body (for Polkadot, this could be considered the opinion of a majority of - /// lock-voters). + /// The unambiguous legislative body (for Polkadot, this could be considered the opinion of a + /// majority of lock-voters). Legislative, - /// The unambiguous judicial body (this doesn't exist on Polkadot, but if it were to get a "grand oracle", it - /// may be considered as that). + /// The unambiguous judicial body (this doesn't exist on Polkadot, but if it were to get a + /// "grand oracle", it may be considered as that). Judicial, - /// The unambiguous defense body (for Polkadot, an opinion on the topic given via a public referendum - /// on the `staking_admin` track). + /// The unambiguous defense body (for Polkadot, an opinion on the topic given via a public + /// referendum on the `staking_admin` track). Defense, - /// The unambiguous administration body (for Polkadot, an opinion on the topic given via a public referendum - /// on the `general_admin` track). + /// The unambiguous administration body (for Polkadot, an opinion on the topic given via a + /// public referendum on the `general_admin` track). Administration, - /// The unambiguous treasury body (for Polkadot, an opinion on the topic given via a public referendum - /// on the `treasurer` track). + /// The unambiguous treasury body (for Polkadot, an opinion on the topic given via a public + /// referendum on the `treasurer` track). Treasury, } @@ -422,8 +421,8 @@ pub type Weight = u64; /// /// All messages are delivered from a known *origin*, expressed as a `MultiLocation`. /// -/// This is the inner XCM format and is version-sensitive. Messages are typically passed using the outer -/// XCM format, known as `VersionedXcm`. +/// This is the inner XCM format and is version-sensitive. Messages are typically passed using the +/// outer XCM format, known as `VersionedXcm`. #[derive(Derivative, Encode, Decode, TypeInfo, xcm_procedural::XcmWeightInfoTrait)] #[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] #[codec(encode_bound())] @@ -508,8 +507,8 @@ pub enum Instruction { /// - `dest`: The location whose sovereign account will own the assets and thus the effective /// beneficiary for the assets and the notification target for the reserve asset deposit /// message. - /// - `xcm`: The instructions that should follow the `ReserveAssetDeposited` - /// instruction, which is sent onwards to `dest`. + /// - `xcm`: The instructions that should follow the `ReserveAssetDeposited` instruction, which + /// is sent onwards to `dest`. /// /// Safety: No concerns. /// @@ -538,10 +537,11 @@ pub enum Instruction { call: DoubleEncoded, }, - /// A message to notify about a new incoming HRMP channel. This message is meant to be sent by the - /// relay-chain to a para. + /// A message to notify about a new incoming HRMP channel. This message is meant to be sent by + /// the relay-chain to a para. /// - /// - `sender`: The sender in the to-be opened channel. Also, the initiator of the channel opening. + /// - `sender`: The sender in the to-be opened channel. Also, the initiator of the channel + /// opening. /// - `max_message_size`: The maximum size of a message proposed by the sender. /// - `max_capacity`: The maximum number of messages that can be queued in the channel. /// @@ -558,8 +558,8 @@ pub enum Instruction { }, /// A message to notify about that a previously sent open channel request has been accepted by - /// the recipient. That means that the channel will be opened during the next relay-chain session - /// change. This message is meant to be sent by the relay-chain to a para. + /// the recipient. That means that the channel will be opened during the next relay-chain + /// session change. This message is meant to be sent by the relay-chain to a para. /// /// Safety: The message should originate directly from the relay-chain. /// @@ -573,10 +573,10 @@ pub enum Instruction { recipient: u32, }, - /// A message to notify that the other party in an open channel decided to close it. In particular, - /// `initiator` is going to close the channel opened from `sender` to the `recipient`. The close - /// will be enacted at the next relay-chain session change. This message is meant to be sent by - /// the relay-chain to a para. + /// A message to notify that the other party in an open channel decided to close it. In + /// particular, `initiator` is going to close the channel opened from `sender` to the + /// `recipient`. The close will be enacted at the next relay-chain session change. This message + /// is meant to be sent by the relay-chain to a para. /// /// Safety: The message should originate directly from the relay-chain. /// @@ -639,8 +639,8 @@ pub enum Instruction { /// /// - `assets`: The asset(s) to remove from holding. /// - `max_assets`: The maximum number of unique assets/asset instances to remove from holding. - /// Only the first `max_assets` assets/instances of those matched by `assets` will be removed, - /// prioritized under standard asset ordering. Any others will remain in holding. + /// Only the first `max_assets` assets/instances of those matched by `assets` will be + /// removed, prioritized under standard asset ordering. Any others will remain in holding. /// - `beneficiary`: The new owner for the assets. /// /// Kind: *Instruction* @@ -661,13 +661,13 @@ pub enum Instruction { /// /// - `assets`: The asset(s) to remove from holding. /// - `max_assets`: The maximum number of unique assets/asset instances to remove from holding. - /// Only the first `max_assets` assets/instances of those matched by `assets` will be removed, - /// prioritized under standard asset ordering. Any others will remain in holding. + /// Only the first `max_assets` assets/instances of those matched by `assets` will be + /// removed, prioritized under standard asset ordering. Any others will remain in holding. /// - `dest`: The location whose sovereign account will own the assets and thus the effective /// beneficiary for the assets and the notification target for the reserve asset deposit /// message. - /// - `xcm`: The orders that should follow the `ReserveAssetDeposited` instruction - /// which is sent onwards to `dest`. + /// - `xcm`: The orders that should follow the `ReserveAssetDeposited` instruction which is + /// sent onwards to `dest`. /// /// Kind: *Instruction* /// @@ -699,9 +699,9 @@ pub enum Instruction { /// /// - `assets`: The asset(s) to remove from holding. /// - `reserve`: A valid location that acts as a reserve for all asset(s) in `assets`. The - /// sovereign account of this consensus system *on the reserve location* will have appropriate - /// assets withdrawn and `effects` will be executed on them. There will typically be only one - /// valid location on any given asset/chain combination. + /// sovereign account of this consensus system *on the reserve location* will have + /// appropriate assets withdrawn and `effects` will be executed on them. There will typically + /// be only one valid location on any given asset/chain combination. /// - `xcm`: The instructions to execute on the assets once withdrawn *on the reserve /// location*. /// @@ -718,8 +718,8 @@ pub enum Instruction { /// - `xcm`: The instructions to execute on the assets once arrived *on the destination /// location*. /// - /// NOTE: The `dest` location *MUST* respect this origin as a valid teleportation origin for all - /// `assets`. If it does not, then the assets may be lost. + /// NOTE: The `dest` location *MUST* respect this origin as a valid teleportation origin for + /// all `assets`. If it does not, then the assets may be lost. /// /// Kind: *Instruction* /// diff --git a/xcm/src/v2/multiasset.rs b/xcm/src/v2/multiasset.rs index aae65dcbb54a..fdd7797a1230 100644 --- a/xcm/src/v2/multiasset.rs +++ b/xcm/src/v2/multiasset.rs @@ -17,11 +17,14 @@ //! Cross-Consensus Message format asset data structures. //! //! This encompasses four types for representing assets: -//! - `MultiAsset`: A description of a single asset, either an instance of a non-fungible or some amount of a fungible. -//! - `MultiAssets`: A collection of `MultiAsset`s. These are stored in a `Vec` and sorted with fungibles first. -//! - `Wild`: A single asset wildcard, this can either be "all" assets, or all assets of a specific kind. -//! - `MultiAssetFilter`: A combination of `Wild` and `MultiAssets` designed for efficiently filtering an XCM holding -//! account. +//! - `MultiAsset`: A description of a single asset, either an instance of a non-fungible or some +//! amount of a fungible. +//! - `MultiAssets`: A collection of `MultiAsset`s. These are stored in a `Vec` and sorted with +//! fungibles first. +//! - `Wild`: A single asset wildcard, this can either be "all" assets, or all assets of a specific +//! kind. +//! - `MultiAssetFilter`: A combination of `Wild` and `MultiAssets` designed for efficiently +//! filtering an XCM holding account. use super::MultiLocation; use crate::v3::{ @@ -42,8 +45,8 @@ pub enum AssetInstance { /// Undefined - used if the non-fungible asset class has only one instance. Undefined, - /// A compact index. Technically this could be greater than `u128`, but this implementation supports only - /// values up to `2**128 - 1`. + /// A compact index. Technically this could be greater than `u128`, but this implementation + /// supports only values up to `2**128 - 1`. Index(#[codec(compact)] u128), /// A 4-byte fixed-length datum. @@ -165,19 +168,21 @@ impl AssetId { Ok(()) } - /// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding `MultiAsset` value. + /// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding + /// `MultiAsset` value. pub fn into_multiasset(self, fun: Fungibility) -> MultiAsset { MultiAsset { fun, id: self } } - /// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding `WildMultiAsset` - /// wildcard (`AllOf`) value. + /// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding + /// `WildMultiAsset` wildcard (`AllOf`) value. pub fn into_wild(self, fun: WildFungibility) -> WildMultiAsset { WildMultiAsset::AllOf { fun, id: self } } } -/// Classification of whether an asset is fungible or not, along with a mandatory amount or instance. +/// Classification of whether an asset is fungible or not, along with a mandatory amount or +/// instance. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, Decode, TypeInfo)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub enum Fungibility { @@ -300,7 +305,8 @@ impl TryFrom for MultiAsset { } } -/// A `Vec` of `MultiAsset`s. There may be no duplicate fungible items in here and when decoding, they must be sorted. +/// A `Vec` of `MultiAsset`s. There may be no duplicate fungible items in here and when decoding, +/// they must be sorted. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, TypeInfo)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct MultiAssets(Vec); @@ -370,11 +376,12 @@ impl MultiAssets { Self(Vec::new()) } - /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted and - /// which contain no duplicates. + /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted + /// and which contain no duplicates. /// - /// Returns `Ok` if the operation succeeds and `Err` if `r` is out of order or had duplicates. If you can't - /// guarantee that `r` is sorted and deduplicated, then use `From::>::from` which is infallible. + /// Returns `Ok` if the operation succeeds and `Err` if `r` is out of order or had duplicates. + /// If you can't guarantee that `r` is sorted and deduplicated, then use + /// `From::>::from` which is infallible. pub fn from_sorted_and_deduplicated(r: Vec) -> Result { if r.is_empty() { return Ok(Self(Vec::new())) @@ -389,20 +396,22 @@ impl MultiAssets { Ok(Self(r)) } - /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted and - /// which contain no duplicates. + /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted + /// and which contain no duplicates. /// - /// In release mode, this skips any checks to ensure that `r` is correct, making it a negligible-cost operation. - /// Generally though you should avoid using it unless you have a strict proof that `r` is valid. + /// In release mode, this skips any checks to ensure that `r` is correct, making it a + /// negligible-cost operation. Generally though you should avoid using it unless you have a + /// strict proof that `r` is valid. #[cfg(test)] pub fn from_sorted_and_deduplicated_skip_checks(r: Vec) -> Self { Self::from_sorted_and_deduplicated(r).expect("Invalid input r is not sorted/deduped") } - /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted and - /// which contain no duplicates. + /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted + /// and which contain no duplicates. /// - /// In release mode, this skips any checks to ensure that `r` is correct, making it a negligible-cost operation. - /// Generally though you should avoid using it unless you have a strict proof that `r` is valid. + /// In release mode, this skips any checks to ensure that `r` is correct, making it a + /// negligible-cost operation. Generally though you should avoid using it unless you have a + /// strict proof that `r` is valid. /// /// In test mode, this checks anyway and panics on fail. #[cfg(not(test))] @@ -410,7 +419,8 @@ impl MultiAssets { Self(r) } - /// Add some asset onto the list, saturating. This is quite a laborious operation since it maintains the ordering. + /// Add some asset onto the list, saturating. This is quite a laborious operation since it + /// maintains the ordering. pub fn push(&mut self, a: MultiAsset) { if let Fungibility::Fungible(ref amount) = a.fun { for asset in self.0.iter_mut().filter(|x| x.id == a.id) { @@ -489,19 +499,19 @@ impl TryFrom for WildFungibility { #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, Decode, TypeInfo)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub enum WildMultiAsset { - /// All assets in the holding register, up to `usize` individual assets (different instances of non-fungibles could - /// be separate assets). + /// All assets in the holding register, up to `usize` individual assets (different instances of + /// non-fungibles could be separate assets). All, - /// All assets in the holding register of a given fungibility and ID. If operating on non-fungibles, then a limit - /// is provided for the maximum amount of matching instances. + /// All assets in the holding register of a given fungibility and ID. If operating on + /// non-fungibles, then a limit is provided for the maximum amount of matching instances. AllOf { id: AssetId, fun: WildFungibility }, } impl WildMultiAsset { /// Returns true if `self` is a super-set of the given `inner`. /// - /// Typically, any wildcard is never contained in anything else, and a wildcard can contain any other non-wildcard. - /// For more details, see the implementation and tests. + /// Typically, any wildcard is never contained in anything else, and a wildcard can contain any + /// other non-wildcard. For more details, see the implementation and tests. pub fn contains(&self, inner: &MultiAsset) -> bool { use WildMultiAsset::*; match self { @@ -565,8 +575,8 @@ impl From for MultiAssetFilter { impl MultiAssetFilter { /// Returns true if `self` is a super-set of the given `inner`. /// - /// Typically, any wildcard is never contained in anything else, and a wildcard can contain any other non-wildcard. - /// For more details, see the implementation and tests. + /// Typically, any wildcard is never contained in anything else, and a wildcard can contain any + /// other non-wildcard. For more details, see the implementation and tests. pub fn contains(&self, inner: &MultiAsset) -> bool { match self { MultiAssetFilter::Definite(ref assets) => assets.contains(inner), diff --git a/xcm/src/v2/multilocation.rs b/xcm/src/v2/multilocation.rs index 086a83277322..9fb74e8afb35 100644 --- a/xcm/src/v2/multilocation.rs +++ b/xcm/src/v2/multilocation.rs @@ -174,8 +174,8 @@ impl MultiLocation { self.interior.push_front(new) } - /// Consumes `self` and returns a `MultiLocation` suffixed with `new`, or an `Err` with theoriginal value of - /// `self` in case of overflow. + /// Consumes `self` and returns a `MultiLocation` suffixed with `new`, or an `Err` with + /// theoriginal value of `self` in case of overflow. pub fn pushed_with_interior(self, new: Junction) -> result::Result { match self.interior.pushed_with(new) { Ok(i) => Ok(MultiLocation { interior: i, parents: self.parents }), @@ -183,8 +183,8 @@ impl MultiLocation { } } - /// Consumes `self` and returns a `MultiLocation` prefixed with `new`, or an `Err` with the original value of - /// `self` in case of overflow. + /// Consumes `self` and returns a `MultiLocation` prefixed with `new`, or an `Err` with the + /// original value of `self` in case of overflow. pub fn pushed_front_with_interior( self, new: Junction, @@ -430,7 +430,8 @@ impl From for MultiLocation { } } -/// A tuple struct which can be converted into a `MultiLocation` of `parents` value 1 with the inner interior. +/// A tuple struct which can be converted into a `MultiLocation` of `parents` value 1 with the inner +/// interior. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)] pub struct ParentThen(pub Junctions); impl From for MultiLocation { @@ -448,7 +449,8 @@ impl From for MultiLocation { } } -/// A unit struct which can be converted into a `MultiLocation` of the inner `parents` value and the inner interior. +/// A unit struct which can be converted into a `MultiLocation` of the inner `parents` value and the +/// inner interior. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)] pub struct AncestorThen(pub u8, pub Interior); impl> From> for MultiLocation { @@ -598,8 +600,8 @@ impl Junctions { } } - /// Splits off the first junction, returning the remaining suffix (first item in tuple) and the first element - /// (second item in tuple) or `None` if it was empty. + /// Splits off the first junction, returning the remaining suffix (first item in tuple) and the + /// first element (second item in tuple) or `None` if it was empty. pub fn split_first(self) -> (Junctions, Option) { match self { Junctions::Here => (Junctions::Here, None), @@ -614,8 +616,8 @@ impl Junctions { } } - /// Splits off the last junction, returning the remaining prefix (first item in tuple) and the last element - /// (second item in tuple) or `None` if it was empty. + /// Splits off the last junction, returning the remaining prefix (first item in tuple) and the + /// last element (second item in tuple) or `None` if it was empty. pub fn split_last(self) -> (Junctions, Option) { match self { Junctions::Here => (Junctions::Here, None), @@ -727,7 +729,8 @@ impl Junctions { } } - /// Returns the junction at index `i`, or `None` if the location doesn't contain that many elements. + /// Returns the junction at index `i`, or `None` if the location doesn't contain that many + /// elements. pub fn at(&self, i: usize) -> Option<&Junction> { Some(match (i, self) { (0, Junctions::X1(ref a)) => a, @@ -770,8 +773,8 @@ impl Junctions { }) } - /// Returns a mutable reference to the junction at index `i`, or `None` if the location doesn't contain that many - /// elements. + /// Returns a mutable reference to the junction at index `i`, or `None` if the location doesn't + /// contain that many elements. pub fn at_mut(&mut self, i: usize) -> Option<&mut Junction> { Some(match (i, self) { (0, Junctions::X1(ref mut a)) => a, diff --git a/xcm/src/v2/traits.rs b/xcm/src/v2/traits.rs index 524b659d57e1..ae03cf5547ba 100644 --- a/xcm/src/v2/traits.rs +++ b/xcm/src/v2/traits.rs @@ -81,7 +81,8 @@ pub enum Error { /// Used by `Transact` when the functor cannot be decoded. #[codec(index = 17)] FailedToDecode, - /// Used by `Transact` to indicate that the given weight limit could be breached by the functor. + /// Used by `Transact` to indicate that the given weight limit could be breached by the + /// functor. #[codec(index = 18)] MaxWeightInvalid, /// Used by `BuyExecution` when the Holding Register does not contain payable fees. @@ -94,7 +95,8 @@ pub enum Error { #[codec(index = 21)] Trap(u64), - // Errors that happen prior to instructions being executed. These fall outside of the XCM spec. + // Errors that happen prior to instructions being executed. These fall outside of the XCM + // spec. /// XCM version not able to be handled. UnhandledXcmVersion, /// Execution of the XCM would potentially result in a greater weight used than weight limit. @@ -161,7 +163,8 @@ pub type Result = result::Result<(), Error>; pub enum Outcome { /// Execution completed successfully; given weight was used. Complete(Weight), - /// Execution started, but did not complete successfully due to the given error; given weight was used. + /// Execution started, but did not complete successfully due to the given error; given weight + /// was used. Incomplete(Weight, Error), /// Execution did not start due to the given error. Error(Error), @@ -194,9 +197,9 @@ impl Outcome { /// Type of XCM message executor. pub trait ExecuteXcm { - /// Execute some XCM `message` from `origin` using no more than `weight_limit` weight. The weight limit is - /// a basic hard-limit and the implementation may place further restrictions or requirements on weight and - /// other aspects. + /// Execute some XCM `message` from `origin` using no more than `weight_limit` weight. The + /// weight limit is a basic hard-limit and the implementation may place further restrictions or + /// requirements on weight and other aspects. fn execute_xcm( origin: impl Into, message: Xcm, @@ -215,8 +218,8 @@ pub trait ExecuteXcm { /// Execute some XCM `message` from `origin` using no more than `weight_limit` weight. /// - /// Some amount of `weight_credit` may be provided which, depending on the implementation, may allow - /// execution without associated payment. + /// Some amount of `weight_credit` may be provided which, depending on the implementation, may + /// allow execution without associated payment. fn execute_xcm_in_credit( origin: impl Into, message: Xcm, @@ -263,9 +266,9 @@ pub type SendResult = result::Result<(), SendError>; /// Utility for sending an XCM message. /// -/// These can be amalgamated in tuples to form sophisticated routing systems. In tuple format, each router might return -/// `NotApplicable` to pass the execution to the next sender item. Note that each `NotApplicable` -/// might alter the destination and the XCM message for to the next router. +/// These can be amalgamated in tuples to form sophisticated routing systems. In tuple format, each +/// router might return `NotApplicable` to pass the execution to the next sender item. Note that +/// each `NotApplicable` might alter the destination and the XCM message for to the next router. /// /// /// # Example @@ -330,9 +333,9 @@ pub type SendResult = result::Result<(), SendError>; pub trait SendXcm { /// Send an XCM `message` to a given `destination`. /// - /// If it is not a destination which can be reached with this type but possibly could by others, then it *MUST* - /// return `NotApplicable`. Any other error will cause the tuple implementation to exit early without - /// trying other type fields. + /// If it is not a destination which can be reached with this type but possibly could by others, + /// then it *MUST* return `NotApplicable`. Any other error will cause the tuple implementation + /// to exit early without trying other type fields. fn send_xcm(destination: impl Into, message: Xcm<()>) -> SendResult; } diff --git a/xcm/src/v3/junction.rs b/xcm/src/v3/junction.rs index 5fee8d1f83bd..ae66e2b33364 100644 --- a/xcm/src/v3/junction.rs +++ b/xcm/src/v3/junction.rs @@ -127,20 +127,20 @@ pub enum BodyId { Executive, /// The unambiguous technical body (for Polkadot, this would be the Technical Committee). Technical, - /// The unambiguous legislative body (for Polkadot, this could be considered the opinion of a majority of - /// lock-voters). + /// The unambiguous legislative body (for Polkadot, this could be considered the opinion of a + /// majority of lock-voters). Legislative, - /// The unambiguous judicial body (this doesn't exist on Polkadot, but if it were to get a "grand oracle", it - /// may be considered as that). + /// The unambiguous judicial body (this doesn't exist on Polkadot, but if it were to get a + /// "grand oracle", it may be considered as that). Judicial, - /// The unambiguous defense body (for Polkadot, an opinion on the topic given via a public referendum - /// on the `staking_admin` track). + /// The unambiguous defense body (for Polkadot, an opinion on the topic given via a public + /// referendum on the `staking_admin` track). Defense, - /// The unambiguous administration body (for Polkadot, an opinion on the topic given via a public referendum - /// on the `general_admin` track). + /// The unambiguous administration body (for Polkadot, an opinion on the topic given via a + /// public referendum on the `general_admin` track). Administration, - /// The unambiguous treasury body (for Polkadot, an opinion on the topic given via a public referendum - /// on the `treasurer` track). + /// The unambiguous treasury body (for Polkadot, an opinion on the topic given via a public + /// referendum on the `treasurer` track). Treasury, } @@ -266,13 +266,13 @@ pub enum Junction { /// /// Generally used when the context is a Polkadot Relay-chain. Parachain(#[codec(compact)] u32), - /// A 32-byte identifier for an account of a specific network that is respected as a sovereign endpoint within - /// the context. + /// A 32-byte identifier for an account of a specific network that is respected as a sovereign + /// endpoint within the context. /// /// Generally used when the context is a Substrate-based chain. AccountId32 { network: Option, id: [u8; 32] }, - /// An 8-byte index for an account of a specific network that is respected as a sovereign endpoint within - /// the context. + /// An 8-byte index for an account of a specific network that is respected as a sovereign + /// endpoint within the context. /// /// May be used when the context is a Frame-based chain and includes e.g. an indices pallet. AccountIndex64 { @@ -280,8 +280,8 @@ pub enum Junction { #[codec(compact)] index: u64, }, - /// A 20-byte identifier for an account of a specific network that is respected as a sovereign endpoint within - /// the context. + /// A 20-byte identifier for an account of a specific network that is respected as a sovereign + /// endpoint within the context. /// /// May be used when the context is an Ethereum or Bitcoin chain or smart-contract. AccountKey20 { network: Option, key: [u8; 20] }, @@ -310,8 +310,8 @@ pub enum Junction { OnlyChild, /// A pluralistic body existing within consensus. /// - /// Typical to be used to represent a governance origin of a chain, but could in principle be used to represent - /// things such as multisigs also. + /// Typical to be used to represent a governance origin of a chain, but could in principle be + /// used to represent things such as multisigs also. Plurality { id: BodyId, part: BodyPart }, /// A global network capable of externalizing its own consensus. This is not generally /// meaningful outside of the universal level. @@ -413,7 +413,8 @@ impl Junction { /// Convert `self` into a `MultiLocation` containing `n` parents. /// - /// Similar to `Self::into_location`, with the added ability to specify the number of parent junctions. + /// Similar to `Self::into_location`, with the added ability to specify the number of parent + /// junctions. pub const fn into_exterior(self, n: u8) -> MultiLocation { MultiLocation { parents: n, interior: Junctions::X1(self) } } diff --git a/xcm/src/v3/junctions.rs b/xcm/src/v3/junctions.rs index da06cdbdad67..201a80fb7658 100644 --- a/xcm/src/v3/junctions.rs +++ b/xcm/src/v3/junctions.rs @@ -137,7 +137,8 @@ impl Junctions { /// Convert `self` into a `MultiLocation` containing `n` parents. /// - /// Similar to `Self::into_location`, with the added ability to specify the number of parent junctions. + /// Similar to `Self::into_location`, with the added ability to specify the number of parent + /// junctions. pub const fn into_exterior(self, n: u8) -> MultiLocation { MultiLocation { parents: n, interior: self } } @@ -309,8 +310,8 @@ impl Junctions { } } - /// Splits off the first junction, returning the remaining suffix (first item in tuple) and the first element - /// (second item in tuple) or `None` if it was empty. + /// Splits off the first junction, returning the remaining suffix (first item in tuple) and the + /// first element (second item in tuple) or `None` if it was empty. pub fn split_first(self) -> (Junctions, Option) { match self { Junctions::Here => (Junctions::Here, None), @@ -325,8 +326,8 @@ impl Junctions { } } - /// Splits off the last junction, returning the remaining prefix (first item in tuple) and the last element - /// (second item in tuple) or `None` if it was empty. + /// Splits off the last junction, returning the remaining prefix (first item in tuple) and the + /// last element (second item in tuple) or `None` if it was empty. pub fn split_last(self) -> (Junctions, Option) { match self { Junctions::Here => (Junctions::Here, None), @@ -469,7 +470,8 @@ impl Junctions { } } - /// Returns the junction at index `i`, or `None` if the location doesn't contain that many elements. + /// Returns the junction at index `i`, or `None` if the location doesn't contain that many + /// elements. pub fn at(&self, i: usize) -> Option<&Junction> { Some(match (i, self) { (0, Junctions::X1(ref a)) => a, @@ -512,8 +514,8 @@ impl Junctions { }) } - /// Returns a mutable reference to the junction at index `i`, or `None` if the location doesn't contain that many - /// elements. + /// Returns a mutable reference to the junction at index `i`, or `None` if the location doesn't + /// contain that many elements. pub fn at_mut(&mut self, i: usize) -> Option<&mut Junction> { Some(match (i, self) { (0, Junctions::X1(ref mut a)) => a, diff --git a/xcm/src/v3/mod.rs b/xcm/src/v3/mod.rs index 772ad48ac4b2..3614dc22550d 100644 --- a/xcm/src/v3/mod.rs +++ b/xcm/src/v3/mod.rs @@ -367,8 +367,8 @@ impl XcmContext { /// /// All messages are delivered from a known *origin*, expressed as a `MultiLocation`. /// -/// This is the inner XCM format and is version-sensitive. Messages are typically passed using the outer -/// XCM format, known as `VersionedXcm`. +/// This is the inner XCM format and is version-sensitive. Messages are typically passed using the +/// outer XCM format, known as `VersionedXcm`. #[derive(Derivative, Encode, Decode, TypeInfo, xcm_procedural::XcmWeightInfoTrait)] #[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] #[codec(encode_bound())] @@ -417,9 +417,8 @@ pub enum Instruction { /// - `response`: The message content. /// - `max_weight`: The maximum weight that handling this response should take. /// - `querier`: The location responsible for the initiation of the response, if there is one. - /// In general this will tend to be the same location as the receiver of this message. - /// NOTE: As usual, this is interpreted from the perspective of the receiving consensus - /// system. + /// In general this will tend to be the same location as the receiver of this message. NOTE: + /// As usual, this is interpreted from the perspective of the receiving consensus system. /// /// Safety: Since this is information only, there are no immediate concerns. However, it should /// be remembered that even if the Origin behaves reasonably, it can always be asked to make @@ -460,8 +459,8 @@ pub enum Instruction { /// - `dest`: The location whose sovereign account will own the assets and thus the effective /// beneficiary for the assets and the notification target for the reserve asset deposit /// message. - /// - `xcm`: The instructions that should follow the `ReserveAssetDeposited` - /// instruction, which is sent onwards to `dest`. + /// - `xcm`: The instructions that should follow the `ReserveAssetDeposited` instruction, which + /// is sent onwards to `dest`. /// /// Safety: No concerns. /// @@ -487,10 +486,11 @@ pub enum Instruction { /// Errors: Transact { origin_kind: OriginKind, require_weight_at_most: Weight, call: DoubleEncoded }, - /// A message to notify about a new incoming HRMP channel. This message is meant to be sent by the - /// relay-chain to a para. + /// A message to notify about a new incoming HRMP channel. This message is meant to be sent by + /// the relay-chain to a para. /// - /// - `sender`: The sender in the to-be opened channel. Also, the initiator of the channel opening. + /// - `sender`: The sender in the to-be opened channel. Also, the initiator of the channel + /// opening. /// - `max_message_size`: The maximum size of a message proposed by the sender. /// - `max_capacity`: The maximum number of messages that can be queued in the channel. /// @@ -507,8 +507,8 @@ pub enum Instruction { }, /// A message to notify about that a previously sent open channel request has been accepted by - /// the recipient. That means that the channel will be opened during the next relay-chain session - /// change. This message is meant to be sent by the relay-chain to a para. + /// the recipient. That means that the channel will be opened during the next relay-chain + /// session change. This message is meant to be sent by the relay-chain to a para. /// /// Safety: The message should originate directly from the relay-chain. /// @@ -522,10 +522,10 @@ pub enum Instruction { recipient: u32, }, - /// A message to notify that the other party in an open channel decided to close it. In particular, - /// `initiator` is going to close the channel opened from `sender` to the `recipient`. The close - /// will be enacted at the next relay-chain session change. This message is meant to be sent by - /// the relay-chain to a para. + /// A message to notify that the other party in an open channel decided to close it. In + /// particular, `initiator` is going to close the channel opened from `sender` to the + /// `recipient`. The close will be enacted at the next relay-chain session change. This message + /// is meant to be sent by the relay-chain to a para. /// /// Safety: The message should originate directly from the relay-chain. /// @@ -593,8 +593,8 @@ pub enum Instruction { /// - `dest`: The location whose sovereign account will own the assets and thus the effective /// beneficiary for the assets and the notification target for the reserve asset deposit /// message. - /// - `xcm`: The orders that should follow the `ReserveAssetDeposited` instruction - /// which is sent onwards to `dest`. + /// - `xcm`: The orders that should follow the `ReserveAssetDeposited` instruction which is + /// sent onwards to `dest`. /// /// Kind: *Instruction* /// @@ -623,9 +623,9 @@ pub enum Instruction { /// /// - `assets`: The asset(s) to remove from holding. /// - `reserve`: A valid location that acts as a reserve for all asset(s) in `assets`. The - /// sovereign account of this consensus system *on the reserve location* will have appropriate - /// assets withdrawn and `effects` will be executed on them. There will typically be only one - /// valid location on any given asset/chain combination. + /// sovereign account of this consensus system *on the reserve location* will have + /// appropriate assets withdrawn and `effects` will be executed on them. There will typically + /// be only one valid location on any given asset/chain combination. /// - `xcm`: The instructions to execute on the assets once withdrawn *on the reserve /// location*. /// @@ -642,8 +642,8 @@ pub enum Instruction { /// - `xcm`: The instructions to execute on the assets once arrived *on the destination /// location*. /// - /// NOTE: The `dest` location *MUST* respect this origin as a valid teleportation origin for all - /// `assets`. If it does not, then the assets may be lost. + /// NOTE: The `dest` location *MUST* respect this origin as a valid teleportation origin for + /// all `assets`. If it does not, then the assets may be lost. /// /// Kind: *Instruction* /// @@ -809,7 +809,8 @@ pub enum Instruction { /// Kind: *Instruction* /// /// Errors: - /// - `ExpectationFalse`: If the value of the Transact Status Register is not equal to the parameter. + /// - `ExpectationFalse`: If the value of the Transact Status Register is not equal to the + /// parameter. ExpectTransactStatus(MaybeErrorCode), /// Query the existence of a particular pallet type. @@ -830,11 +831,15 @@ pub enum Instruction { /// Ensure that a particular pallet with a particular version exists. /// - /// - `index: Compact`: The index which identifies the pallet. An error if no pallet exists at this index. + /// - `index: Compact`: The index which identifies the pallet. An error if no pallet exists at + /// this index. /// - `name: Vec`: Name which must be equal to the name of the pallet. - /// - `module_name: Vec`: Module name which must be equal to the name of the module in which the pallet exists. - /// - `crate_major: Compact`: Version number which must be equal to the major version of the crate which implements the pallet. - /// - `min_crate_minor: Compact`: Version number which must be at most the minor version of the crate which implements the pallet. + /// - `module_name: Vec`: Module name which must be equal to the name of the module in + /// which the pallet exists. + /// - `crate_major: Compact`: Version number which must be equal to the major version of the + /// crate which implements the pallet. + /// - `min_crate_minor: Compact`: Version number which must be at most the minor version of the + /// crate which implements the pallet. /// /// Safety: No concerns. /// @@ -961,8 +966,8 @@ pub enum Instruction { /// of course, if there is no record that the asset actually is locked. /// /// - `asset`: The asset(s) to be unlocked. - /// - `locker`: The location from which a previous `NoteUnlockable` was sent and to which - /// an `UnlockAsset` should be sent. + /// - `locker`: The location from which a previous `NoteUnlockable` was sent and to which an + /// `UnlockAsset` should be sent. /// /// Kind: *Instruction*. /// @@ -971,8 +976,8 @@ pub enum Instruction { /// Sets the Fees Mode Register. /// - /// - `jit_withdraw`: The fees mode item; if set to `true` then fees for any instructions - /// are withdrawn as needed using the same mechanism as `WithdrawAssets`. + /// - `jit_withdraw`: The fees mode item; if set to `true` then fees for any instructions are + /// withdrawn as needed using the same mechanism as `WithdrawAssets`. /// /// Kind: *Instruction*. /// diff --git a/xcm/src/v3/multiasset.rs b/xcm/src/v3/multiasset.rs index a4900a71539a..1668d1b870dc 100644 --- a/xcm/src/v3/multiasset.rs +++ b/xcm/src/v3/multiasset.rs @@ -17,11 +17,14 @@ //! Cross-Consensus Message format asset data structures. //! //! This encompasses four types for representing assets: -//! - `MultiAsset`: A description of a single asset, either an instance of a non-fungible or some amount of a fungible. -//! - `MultiAssets`: A collection of `MultiAsset`s. These are stored in a `Vec` and sorted with fungibles first. -//! - `Wild`: A single asset wildcard, this can either be "all" assets, or all assets of a specific kind. -//! - `MultiAssetFilter`: A combination of `Wild` and `MultiAssets` designed for efficiently filtering an XCM holding -//! account. +//! - `MultiAsset`: A description of a single asset, either an instance of a non-fungible or some +//! amount of a fungible. +//! - `MultiAssets`: A collection of `MultiAsset`s. These are stored in a `Vec` and sorted with +//! fungibles first. +//! - `Wild`: A single asset wildcard, this can either be "all" assets, or all assets of a specific +//! kind. +//! - `MultiAssetFilter`: A combination of `Wild` and `MultiAssets` designed for efficiently +//! filtering an XCM holding account. use super::{InteriorMultiLocation, MultiLocation}; use crate::v2::{ @@ -47,8 +50,8 @@ pub enum AssetInstance { /// Undefined - used if the non-fungible asset class has only one instance. Undefined, - /// A compact index. Technically this could be greater than `u128`, but this implementation supports only - /// values up to `2**128 - 1`. + /// A compact index. Technically this could be greater than `u128`, but this implementation + /// supports only values up to `2**128 - 1`. Index(#[codec(compact)] u128), /// A 4-byte fixed-length datum. @@ -234,7 +237,8 @@ impl TryFrom for u128 { } } -/// Classification of whether an asset is fungible or not, along with a mandatory amount or instance. +/// Classification of whether an asset is fungible or not, along with a mandatory amount or +/// instance. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Encode, TypeInfo, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub enum Fungibility { @@ -387,13 +391,14 @@ impl AssetId { Ok(()) } - /// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding `MultiAsset` value. + /// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding + /// `MultiAsset` value. pub fn into_multiasset(self, fun: Fungibility) -> MultiAsset { MultiAsset { fun, id: self } } - /// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding `WildMultiAsset` - /// wildcard (`AllOf`) value. + /// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding + /// `WildMultiAsset` wildcard (`AllOf`) value. pub fn into_wild(self, fun: WildFungibility) -> WildMultiAsset { WildMultiAsset::AllOf { fun, id: self } } @@ -576,11 +581,12 @@ impl MultiAssets { Self(Vec::new()) } - /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted and - /// which contain no duplicates. + /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted + /// and which contain no duplicates. /// - /// Returns `Ok` if the operation succeeds and `Err` if `r` is out of order or had duplicates. If you can't - /// guarantee that `r` is sorted and deduplicated, then use `From::>::from` which is infallible. + /// Returns `Ok` if the operation succeeds and `Err` if `r` is out of order or had duplicates. + /// If you can't guarantee that `r` is sorted and deduplicated, then use + /// `From::>::from` which is infallible. pub fn from_sorted_and_deduplicated(r: Vec) -> Result { if r.is_empty() { return Ok(Self(Vec::new())) @@ -595,20 +601,22 @@ impl MultiAssets { Ok(Self(r)) } - /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted and - /// which contain no duplicates. + /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted + /// and which contain no duplicates. /// - /// In release mode, this skips any checks to ensure that `r` is correct, making it a negligible-cost operation. - /// Generally though you should avoid using it unless you have a strict proof that `r` is valid. + /// In release mode, this skips any checks to ensure that `r` is correct, making it a + /// negligible-cost operation. Generally though you should avoid using it unless you have a + /// strict proof that `r` is valid. #[cfg(test)] pub fn from_sorted_and_deduplicated_skip_checks(r: Vec) -> Self { Self::from_sorted_and_deduplicated(r).expect("Invalid input r is not sorted/deduped") } - /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted and - /// which contain no duplicates. + /// Create a new instance of `MultiAssets` from a `Vec` whose contents are sorted + /// and which contain no duplicates. /// - /// In release mode, this skips any checks to ensure that `r` is correct, making it a negligible-cost operation. - /// Generally though you should avoid using it unless you have a strict proof that `r` is valid. + /// In release mode, this skips any checks to ensure that `r` is correct, making it a + /// negligible-cost operation. Generally though you should avoid using it unless you have a + /// strict proof that `r` is valid. /// /// In test mode, this checks anyway and panics on fail. #[cfg(not(test))] @@ -616,7 +624,8 @@ impl MultiAssets { Self(r) } - /// Add some asset onto the list, saturating. This is quite a laborious operation since it maintains the ordering. + /// Add some asset onto the list, saturating. This is quite a laborious operation since it + /// maintains the ordering. pub fn push(&mut self, a: MultiAsset) { for asset in self.0.iter_mut().filter(|x| x.id == a.id) { match (&a.fun, &mut asset.fun) { diff --git a/xcm/src/v3/multilocation.rs b/xcm/src/v3/multilocation.rs index 09d547503f1c..07f829d014c0 100644 --- a/xcm/src/v3/multilocation.rs +++ b/xcm/src/v3/multilocation.rs @@ -198,8 +198,8 @@ impl MultiLocation { self.interior.push_front(new) } - /// Consumes `self` and returns a `MultiLocation` suffixed with `new`, or an `Err` with theoriginal value of - /// `self` in case of overflow. + /// Consumes `self` and returns a `MultiLocation` suffixed with `new`, or an `Err` with + /// theoriginal value of `self` in case of overflow. pub fn pushed_with_interior( self, new: impl Into, @@ -210,8 +210,8 @@ impl MultiLocation { } } - /// Consumes `self` and returns a `MultiLocation` prefixed with `new`, or an `Err` with the original value of - /// `self` in case of overflow. + /// Consumes `self` and returns a `MultiLocation` prefixed with `new`, or an `Err` with the + /// original value of `self` in case of overflow. pub fn pushed_front_with_interior( self, new: impl Into, @@ -472,7 +472,8 @@ impl From for MultiLocation { } } -/// A tuple struct which can be converted into a `MultiLocation` of `parents` value 1 with the inner interior. +/// A tuple struct which can be converted into a `MultiLocation` of `parents` value 1 with the inner +/// interior. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)] pub struct ParentThen(pub Junctions); impl From for MultiLocation { @@ -490,7 +491,8 @@ impl From for MultiLocation { } } -/// A unit struct which can be converted into a `MultiLocation` of the inner `parents` value and the inner interior. +/// A unit struct which can be converted into a `MultiLocation` of the inner `parents` value and the +/// inner interior. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)] pub struct AncestorThen(pub u8, pub Interior); impl> From> for MultiLocation { diff --git a/xcm/src/v3/traits.rs b/xcm/src/v3/traits.rs index 966fb724ed11..128be42c2a2b 100644 --- a/xcm/src/v3/traits.rs +++ b/xcm/src/v3/traits.rs @@ -86,7 +86,8 @@ pub enum Error { /// Used by `Transact` when the functor cannot be decoded. #[codec(index = 17)] FailedToDecode, - /// Used by `Transact` to indicate that the given weight limit could be breached by the functor. + /// Used by `Transact` to indicate that the given weight limit could be breached by the + /// functor. #[codec(index = 18)] MaxWeightInvalid, /// Used by `BuyExecution` when the Holding Register does not contain payable fees. @@ -138,7 +139,8 @@ pub enum Error { #[codec(index = 34)] NotDepositable, - // Errors that happen prior to instructions being executed. These fall outside of the XCM spec. + // Errors that happen prior to instructions being executed. These fall outside of the XCM + // spec. /// XCM version not able to be handled. UnhandledXcmVersion, /// Execution of the XCM would potentially result in a greater weight used than weight limit. @@ -263,7 +265,8 @@ impl From for Outcome { pub enum Outcome { /// Execution completed successfully; given weight was used. Complete(Weight), - /// Execution started, but did not complete successfully due to the given error; given weight was used. + /// Execution started, but did not complete successfully due to the given error; given weight + /// was used. Incomplete(Weight, Error), /// Execution did not start due to the given error. Error(Error), diff --git a/xcm/xcm-builder/src/asset_conversion.rs b/xcm/xcm-builder/src/asset_conversion.rs index 583231d792dd..2fe26e8cd1e3 100644 --- a/xcm/xcm-builder/src/asset_conversion.rs +++ b/xcm/xcm-builder/src/asset_conversion.rs @@ -22,9 +22,9 @@ use sp_std::{marker::PhantomData, prelude::*, result}; use xcm::latest::prelude::*; use xcm_executor::traits::{Error as MatchError, MatchesFungibles, MatchesNonFungibles}; -/// Converter struct implementing `AssetIdConversion` converting a numeric asset ID (must be `TryFrom/TryInto`) into -/// a `GeneralIndex` junction, prefixed by some `MultiLocation` value. The `MultiLocation` value will typically be a -/// `PalletInstance` junction. +/// Converter struct implementing `AssetIdConversion` converting a numeric asset ID (must be +/// `TryFrom/TryInto`) into a `GeneralIndex` junction, prefixed by some `MultiLocation` value. +/// The `MultiLocation` value will typically be a `PalletInstance` junction. pub struct AsPrefixedGeneralIndex( PhantomData<(Prefix, AssetId, ConvertAssetId)>, ); diff --git a/xcm/xcm-builder/src/currency_adapter.rs b/xcm/xcm-builder/src/currency_adapter.rs index 32db840858a9..4dbd4fe8bcd0 100644 --- a/xcm/xcm-builder/src/currency_adapter.rs +++ b/xcm/xcm-builder/src/currency_adapter.rs @@ -44,8 +44,8 @@ impl From for XcmError { } } -/// Simple adapter to use a currency as asset transactor. This type can be used as `type AssetTransactor` in -/// `xcm::Config`. +/// Simple adapter to use a currency as asset transactor. This type can be used as `type +/// AssetTransactor` in `xcm::Config`. /// /// # Example /// ``` diff --git a/xcm/xcm-builder/src/fungibles_adapter.rs b/xcm/xcm-builder/src/fungibles_adapter.rs index bcb0e9c870b3..d7fded01e2db 100644 --- a/xcm/xcm-builder/src/fungibles_adapter.rs +++ b/xcm/xcm-builder/src/fungibles_adapter.rs @@ -63,8 +63,8 @@ impl< /// The location which is allowed to mint a particular asset. #[derive(Copy, Clone, Eq, PartialEq)] pub enum MintLocation { - /// This chain is allowed to mint the asset. When we track teleports of the asset we ensure that - /// no more of the asset returns back to the chain than has been sent out. + /// This chain is allowed to mint the asset. When we track teleports of the asset we ensure + /// that no more of the asset returns back to the chain than has been sent out. Local, /// This chain is not allowed to mint the asset. When we track teleports of the asset we ensure /// that no more of the asset is sent out from the chain than has been previously received. diff --git a/xcm/xcm-builder/src/location_conversion.rs b/xcm/xcm-builder/src/location_conversion.rs index ccc3cc040e61..26b48fc88adc 100644 --- a/xcm/xcm-builder/src/location_conversion.rs +++ b/xcm/xcm-builder/src/location_conversion.rs @@ -345,10 +345,11 @@ impl>, AccountId: From<[u8; 20]> + Into<[u8; 20]> } } -/// Converts a location which is a top-level relay chain (which provides its own consensus) into a 32-byte `AccountId`. +/// Converts a location which is a top-level relay chain (which provides its own consensus) into a +/// 32-byte `AccountId`. /// -/// This will always result in the *same account ID* being returned for the same Relay-chain, regardless of the relative security of -/// this Relay-chain compared to the local chain. +/// This will always result in the *same account ID* being returned for the same Relay-chain, +/// regardless of the relative security of this Relay-chain compared to the local chain. /// /// Note: No distinction is made between the cases when the given `UniversalLocation` lies within /// the same consensus system (i.e. is itself or a parent) and when it is a foreign consensus diff --git a/xcm/xcm-builder/src/origin_aliases.rs b/xcm/xcm-builder/src/origin_aliases.rs index 12bcdad3dfea..82c5f71b7a12 100644 --- a/xcm/xcm-builder/src/origin_aliases.rs +++ b/xcm/xcm-builder/src/origin_aliases.rs @@ -20,7 +20,8 @@ use frame_support::traits::{Contains, ContainsPair}; use sp_std::marker::PhantomData; use xcm::latest::prelude::*; -/// Alias a Foreign `AccountId32` with a local `AccountId32` if the foreign `AccountId32` matches the `Prefix` pattern. +/// Alias a Foreign `AccountId32` with a local `AccountId32` if the foreign `AccountId32` matches +/// the `Prefix` pattern. /// /// Requires that the prefixed origin `AccountId32` matches the target `AccountId32`. pub struct AliasForeignAccountId32(PhantomData); diff --git a/xcm/xcm-builder/src/origin_conversion.rs b/xcm/xcm-builder/src/origin_conversion.rs index 0810b1ce2f8b..112b26869a99 100644 --- a/xcm/xcm-builder/src/origin_conversion.rs +++ b/xcm/xcm-builder/src/origin_conversion.rs @@ -24,7 +24,8 @@ use sp_std::marker::PhantomData; use xcm::latest::{BodyId, BodyPart, Junction, Junctions::*, MultiLocation, NetworkId, OriginKind}; use xcm_executor::traits::{ConvertLocation, ConvertOrigin}; -/// Sovereign accounts use the system's `Signed` origin with an account ID derived from the `LocationConverter`. +/// Sovereign accounts use the system's `Signed` origin with an account ID derived from the +/// `LocationConverter`. pub struct SovereignSignedViaLocation( PhantomData<(LocationConverter, RuntimeOrigin)>, ); @@ -269,10 +270,11 @@ where } } -/// `Convert` implementation to convert from some a `Signed` (system) `Origin` into an `AccountId32`. +/// `Convert` implementation to convert from some a `Signed` (system) `Origin` into an +/// `AccountId32`. /// -/// Typically used when configuring `pallet-xcm` for allowing normal accounts to dispatch an XCM from an `AccountId32` -/// origin. +/// Typically used when configuring `pallet-xcm` for allowing normal accounts to dispatch an XCM +/// from an `AccountId32` origin. pub struct SignedToAccountId32( PhantomData<(RuntimeOrigin, AccountId, Network)>, ); @@ -296,11 +298,11 @@ where } } -/// `Convert` implementation to convert from some an origin which implements `Backing` into a corresponding `Plurality` -/// `MultiLocation`. +/// `Convert` implementation to convert from some an origin which implements `Backing` into a +/// corresponding `Plurality` `MultiLocation`. /// -/// Typically used when configuring `pallet-xcm` for allowing a collective's Origin to dispatch an XCM from a -/// `Plurality` origin. +/// Typically used when configuring `pallet-xcm` for allowing a collective's Origin to dispatch an +/// XCM from a `Plurality` origin. pub struct BackingToPlurality( PhantomData<(RuntimeOrigin, COrigin, Body)>, ); diff --git a/xcm/xcm-builder/src/tests/assets.rs b/xcm/xcm-builder/src/tests/assets.rs index 9b8ba0e459de..dbcb731a1bda 100644 --- a/xcm/xcm-builder/src/tests/assets.rs +++ b/xcm/xcm-builder/src/tests/assets.rs @@ -396,7 +396,8 @@ fn max_assets_limit_should_work() { ); assert_eq!(r, Outcome::Incomplete(Weight::from_parts(95, 95), XcmError::HoldingWouldOverflow)); - // Attempt to withdraw 4 different assets and then the same 4 and then a different 4 will succeed. + // Attempt to withdraw 4 different assets and then the same 4 and then a different 4 will + // succeed. let message = Xcm(vec![ WithdrawAsset(([1u8; 32], 100u128).into()), WithdrawAsset(([2u8; 32], 100u128).into()), diff --git a/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs b/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs index 2f9bfcc2d80a..6870413c38d5 100644 --- a/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs +++ b/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs @@ -80,7 +80,8 @@ fn sending_to_bridged_chain_works() { )]; assert_eq!(take_received_remote_messages(), expected); - // The export cost 50 ref time and 50 proof size weight units (and thus 100 units of balance). + // The export cost 50 ref time and 50 proof size weight units (and thus 100 units of + // balance). assert_eq!(asset_list(Parachain(100)), vec![(Here, 1000u128 - price).into()]); let entry = LogEntry { @@ -154,7 +155,8 @@ fn sending_to_parachain_of_bridged_chain_works() { )]; assert_eq!(take_received_remote_messages(), expected); - // The export cost 50 ref time and 50 proof size weight units (and thus 100 units of balance). + // The export cost 50 ref time and 50 proof size weight units (and thus 100 units of + // balance). assert_eq!(asset_list(Parachain(100)), vec![(Here, 1000u128 - price).into()]); let entry = LogEntry { diff --git a/xcm/xcm-builder/src/tests/mock.rs b/xcm/xcm-builder/src/tests/mock.rs index 66a676369a67..aea780b84367 100644 --- a/xcm/xcm-builder/src/tests/mock.rs +++ b/xcm/xcm-builder/src/tests/mock.rs @@ -60,8 +60,8 @@ pub enum TestOrigin { /// A dummy call. /// -/// Each item contains the amount of weight that it *wants* to consume as the first item, and the actual amount (if -/// different from the former) in the second option. +/// Each item contains the amount of weight that it *wants* to consume as the first item, and the +/// actual amount (if different from the former) in the second option. #[derive(Debug, Encode, Decode, Eq, PartialEq, Clone, Copy, scale_info::TypeInfo)] pub enum TestCall { OnlyRoot(Weight, Option), diff --git a/xcm/xcm-builder/src/tests/querying.rs b/xcm/xcm-builder/src/tests/querying.rs index be8edfe87b8d..8fbb55eb2542 100644 --- a/xcm/xcm-builder/src/tests/querying.rs +++ b/xcm/xcm-builder/src/tests/querying.rs @@ -95,7 +95,8 @@ fn pallet_query_with_results_should_work() { #[test] fn prepaid_result_of_query_should_get_free_execution() { let query_id = 33; - // We put this in manually here, but normally this would be done at the point of crafting the message. + // We put this in manually here, but normally this would be done at the point of crafting the + // message. expect_response(query_id, Parent.into()); let the_response = Response::Assets((Parent, 100u128).into()); diff --git a/xcm/xcm-builder/src/universal_exports.rs b/xcm/xcm-builder/src/universal_exports.rs index 9a65ec7dfe40..0ee627e0ee90 100644 --- a/xcm/xcm-builder/src/universal_exports.rs +++ b/xcm/xcm-builder/src/universal_exports.rs @@ -300,7 +300,8 @@ pub trait HaulBlob { #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum HaulBlobError { - /// Represents point-to-point link failure with a human-readable explanation of the specific issue is provided. + /// Represents point-to-point link failure with a human-readable explanation of the specific + /// issue is provided. Transport(&'static str), } @@ -361,8 +362,9 @@ impl< message.try_into().map_err(|_| DispatchBlobError::UnsupportedXcmVersion)?; // Prepend our bridge instance discriminator. - // Can be used for fine-grained control of origin on destination in case of multiple bridge instances, - // e.g. restrict `type UniversalAliases` and `UniversalOrigin` instruction to trust just particular bridge instance for `NetworkId`. + // Can be used for fine-grained control of origin on destination in case of multiple bridge + // instances, e.g. restrict `type UniversalAliases` and `UniversalOrigin` instruction to + // trust just particular bridge instance for `NetworkId`. if let Some(bridge_instance) = OurPlaceBridgeInstance::get() { message.0.insert(0, DescendOrigin(bridge_instance)); } diff --git a/xcm/xcm-builder/src/weight.rs b/xcm/xcm-builder/src/weight.rs index 73cba6cb557b..f1c14a4c6517 100644 --- a/xcm/xcm-builder/src/weight.rs +++ b/xcm/xcm-builder/src/weight.rs @@ -114,8 +114,9 @@ where } } -/// Function trait for handling some revenue. Similar to a negative imbalance (credit) handler, but for a -/// `MultiAsset`. Sensible implementations will deposit the asset in some known treasury or block-author account. +/// Function trait for handling some revenue. Similar to a negative imbalance (credit) handler, but +/// for a `MultiAsset`. Sensible implementations will deposit the asset in some known treasury or +/// block-author account. pub trait TakeRevenue { /// Do something with the given `revenue`, which is a single non-wildcard `MultiAsset`. fn take_revenue(revenue: MultiAsset); diff --git a/xcm/xcm-builder/tests/scenarios.rs b/xcm/xcm-builder/tests/scenarios.rs index e587c4118e74..3e735720aa76 100644 --- a/xcm/xcm-builder/tests/scenarios.rs +++ b/xcm/xcm-builder/tests/scenarios.rs @@ -101,8 +101,8 @@ fn transfer_asset_works() { /// A parachain wants to be notified that a transfer worked correctly. /// It includes a `QueryHolding` order after the deposit to get notified on success. /// This somewhat abuses `QueryHolding` as an indication of execution success. It works because -/// order execution halts on error (so no `QueryResponse` will be sent if the previous order failed). -/// The inner response sent due to the query is not used. +/// order execution halts on error (so no `QueryResponse` will be sent if the previous order +/// failed). The inner response sent due to the query is not used. /// /// Asserts that the balances are updated correctly and the expected XCM is sent. #[test] diff --git a/xcm/xcm-executor/src/assets.rs b/xcm/xcm-executor/src/assets.rs index f5e0659931eb..d8d8936df331 100644 --- a/xcm/xcm-executor/src/assets.rs +++ b/xcm/xcm-executor/src/assets.rs @@ -132,15 +132,17 @@ impl Assets { /// Mutate `self` to contain all given `assets`, saturating if necessary. /// - /// NOTE: [`Assets`] are always sorted, allowing us to optimize this function from `O(n^2)` to `O(n)`. + /// NOTE: [`Assets`] are always sorted, allowing us to optimize this function from `O(n^2)` to + /// `O(n)`. pub fn subsume_assets(&mut self, mut assets: Assets) { let mut f_iter = assets.fungible.iter_mut(); let mut g_iter = self.fungible.iter_mut(); if let (Some(mut f), Some(mut g)) = (f_iter.next(), g_iter.next()) { loop { if f.0 == g.0 { - // keys are equal. in this case, we add `self`'s balance for the asset onto `assets`, balance, knowing - // that the `append` operation which follows will clobber `self`'s value and only use `assets`'s. + // keys are equal. in this case, we add `self`'s balance for the asset onto + // `assets`, balance, knowing that the `append` operation which follows will + // clobber `self`'s value and only use `assets`'s. (*f.1).saturating_accrue(*g.1); } if f.0 <= g.0 { @@ -186,8 +188,9 @@ impl Assets { /// Alter any concretely identified assets by prepending the given `MultiLocation`. /// - /// WARNING: For now we consider this infallible and swallow any errors. It is thus the caller's responsibility to - /// ensure that any internal asset IDs are able to be prepended without overflow. + /// WARNING: For now we consider this infallible and swallow any errors. It is thus the caller's + /// responsibility to ensure that any internal asset IDs are able to be prepended without + /// overflow. pub fn prepend_location(&mut self, prepend: &MultiLocation) { let mut fungible = Default::default(); mem::swap(&mut self.fungible, &mut fungible); @@ -269,8 +272,8 @@ impl Assets { self.non_fungible.is_superset(&assets.non_fungible) } - /// Returns an error unless all `assets` are contained in `self`. In the case of an error, the first asset in - /// `assets` which is not wholly in `self` is returned. + /// Returns an error unless all `assets` are contained in `self`. In the case of an error, the + /// first asset in `assets` which is not wholly in `self` is returned. pub fn ensure_contains(&self, assets: &MultiAssets) -> Result<(), TakeError> { for asset in assets.inner().iter() { match asset { @@ -292,16 +295,17 @@ impl Assets { /// Mutates `self` to its original value less `mask` and returns assets that were removed. /// - /// If `saturate` is `true`, then `self` is considered to be masked by `mask`, thereby avoiding any attempt at - /// reducing it by assets it does not contain. In this case, the function is infallible. If `saturate` is `false` - /// and `mask` references a definite asset which `self` does not contain then an error is returned. + /// If `saturate` is `true`, then `self` is considered to be masked by `mask`, thereby avoiding + /// any attempt at reducing it by assets it does not contain. In this case, the function is + /// infallible. If `saturate` is `false` and `mask` references a definite asset which `self` + /// does not contain then an error is returned. /// /// The number of unique assets which are removed will respect the `count` parameter in the /// counted wildcard variants. /// - /// Returns `Ok` with the definite assets token from `self` and mutates `self` to its value minus - /// `mask`. Returns `Err` in the non-saturating case where `self` did not contain (enough of) a definite asset to - /// be removed. + /// Returns `Ok` with the definite assets token from `self` and mutates `self` to its value + /// minus `mask`. Returns `Err` in the non-saturating case where `self` did not contain (enough + /// of) a definite asset to be removed. fn general_take( &mut self, mask: MultiAssetFilter, @@ -386,24 +390,27 @@ impl Assets { Ok(taken) } - /// Mutates `self` to its original value less `mask` and returns `true` iff it contains at least `mask`. + /// Mutates `self` to its original value less `mask` and returns `true` iff it contains at least + /// `mask`. /// - /// Returns `Ok` with the non-wildcard equivalence of `mask` taken and mutates `self` to its value minus - /// `mask` if `self` contains `asset`, and return `Err` otherwise. + /// Returns `Ok` with the non-wildcard equivalence of `mask` taken and mutates `self` to its + /// value minus `mask` if `self` contains `asset`, and return `Err` otherwise. pub fn saturating_take(&mut self, asset: MultiAssetFilter) -> Assets { self.general_take(asset, true) .expect("general_take never results in error when saturating") } - /// Mutates `self` to its original value less `mask` and returns `true` iff it contains at least `mask`. + /// Mutates `self` to its original value less `mask` and returns `true` iff it contains at least + /// `mask`. /// - /// Returns `Ok` with the non-wildcard equivalence of `asset` taken and mutates `self` to its value minus - /// `asset` if `self` contains `asset`, and return `Err` otherwise. + /// Returns `Ok` with the non-wildcard equivalence of `asset` taken and mutates `self` to its + /// value minus `asset` if `self` contains `asset`, and return `Err` otherwise. pub fn try_take(&mut self, mask: MultiAssetFilter) -> Result { self.general_take(mask, false) } - /// Consumes `self` and returns its original value excluding `asset` iff it contains at least `asset`. + /// Consumes `self` and returns its original value excluding `asset` iff it contains at least + /// `asset`. pub fn checked_sub(mut self, asset: MultiAsset) -> Result { match asset.fun { Fungible(amount) => { diff --git a/xcm/xcm-executor/src/lib.rs b/xcm/xcm-executor/src/lib.rs index 57ddc4322923..a48cd3259d67 100644 --- a/xcm/xcm-executor/src/lib.rs +++ b/xcm/xcm-executor/src/lib.rs @@ -356,7 +356,8 @@ impl XcmExecutor { } /// Execute any final operations after having executed the XCM message. - /// This includes refunding surplus weight, trapping extra holding funds, and returning any errors during execution. + /// This includes refunding surplus weight, trapping extra holding funds, and returning any + /// errors during execution. pub fn post_process(mut self, xcm_weight: Weight) -> Outcome { // We silently drop any error from our attempt to refund the surplus as it's a charitable // thing so best-effort is all we will do. @@ -533,9 +534,10 @@ impl XcmExecutor { Config::IsTeleporter::contains(asset, &origin), XcmError::UntrustedTeleportLocation ); - // We should check that the asset can actually be teleported in (for this to be in error, there - // would need to be an accounting violation by one of the trusted chains, so it's unlikely, but we - // don't want to punish a possibly innocent chain/user). + // We should check that the asset can actually be teleported in (for this to be + // in error, there would need to be an accounting violation by one of the + // trusted chains, so it's unlikely, but we don't want to punish a possibly + // innocent chain/user). Config::AssetTransactor::can_check_in(&origin, asset, &self.context)?; } for asset in assets.into_inner().into_iter() { @@ -603,8 +605,8 @@ impl XcmExecutor { Ok(()) }, ReportError(response_info) => { - // Report the given result by sending a QueryResponse XCM to a previously given outcome - // destination if one was registered. + // Report the given result by sending a QueryResponse XCM to a previously given + // outcome destination if one was registered. self.respond( self.cloned_origin(), Response::ExecutionResult(self.error), @@ -823,10 +825,12 @@ impl XcmExecutor { Ok(()) }, ExportMessage { network, destination, xcm } => { - // The actual message sent to the bridge for forwarding is prepended with `UniversalOrigin` - // and `DescendOrigin` in order to ensure that the message is executed with this Origin. + // The actual message sent to the bridge for forwarding is prepended with + // `UniversalOrigin` and `DescendOrigin` in order to ensure that the message is + // executed with this Origin. // - // Prepend the desired message with instructions which effectively rewrite the origin. + // Prepend the desired message with instructions which effectively rewrite the + // origin. // // This only works because the remote chain empowers the bridge // to speak for the local network. diff --git a/xcm/xcm-executor/src/traits/asset_exchange.rs b/xcm/xcm-executor/src/traits/asset_exchange.rs index 465468992ae4..0cb188d348de 100644 --- a/xcm/xcm-executor/src/traits/asset_exchange.rs +++ b/xcm/xcm-executor/src/traits/asset_exchange.rs @@ -24,8 +24,8 @@ pub trait AssetExchange { /// - `origin`: The location attempting the exchange; this should generally not matter. /// - `give`: The assets which have been removed from the caller. /// - `want`: The minimum amount of assets which should be given to the caller in case any - /// exchange happens. If more assets are provided, then they should generally be of the - /// same asset class if at all possible. + /// exchange happens. If more assets are provided, then they should generally be of the same + /// asset class if at all possible. /// - `maximal`: If `true`, then as much as possible should be exchanged. /// /// `Ok` is returned along with the new set of assets which have been exchanged for `give`. At diff --git a/xcm/xcm-executor/src/traits/asset_lock.rs b/xcm/xcm-executor/src/traits/asset_lock.rs index bb19e90b0c36..b5a2b22f5fc5 100644 --- a/xcm/xcm-executor/src/traits/asset_lock.rs +++ b/xcm/xcm-executor/src/traits/asset_lock.rs @@ -69,8 +69,8 @@ pub trait AssetLock { /// unlock. type UnlockTicket: Enact; - /// `Enact` implementer for `prepare_reduce_unlockable`. This type may be dropped safely to avoid doing the - /// unlock. + /// `Enact` implementer for `prepare_reduce_unlockable`. This type may be dropped safely to + /// avoid doing the unlock. type ReduceTicket: Enact; /// Prepare to lock an asset. On success, a `Self::LockTicket` it returned, which can be used diff --git a/xcm/xcm-executor/src/traits/conversion.rs b/xcm/xcm-executor/src/traits/conversion.rs index 2f584a900f69..dac099ffaf8e 100644 --- a/xcm/xcm-executor/src/traits/conversion.rs +++ b/xcm/xcm-executor/src/traits/conversion.rs @@ -40,9 +40,9 @@ impl ConvertLocation for Tuple { /// A converter `trait` for origin types. /// -/// Can be amalgamated into tuples. If any of the tuple elements returns `Ok(_)`, it short circuits. Else, the `Err(_)` -/// of the last tuple item is returned. Each intermediate `Err(_)` might return a different `origin` of type `Origin` -/// which is passed to the next convert item. +/// Can be amalgamated into tuples. If any of the tuple elements returns `Ok(_)`, it short circuits. +/// Else, the `Err(_)` of the last tuple item is returned. Each intermediate `Err(_)` might return a +/// different `origin` of type `Origin` which is passed to the next convert item. /// /// ```rust /// # use xcm::latest::{MultiLocation, Junctions, Junction, OriginKind}; diff --git a/xcm/xcm-executor/src/traits/filter_asset_location.rs b/xcm/xcm-executor/src/traits/filter_asset_location.rs index 7aeb26b28094..b162a8b0729d 100644 --- a/xcm/xcm-executor/src/traits/filter_asset_location.rs +++ b/xcm/xcm-executor/src/traits/filter_asset_location.rs @@ -19,7 +19,8 @@ use xcm::latest::{MultiAsset, MultiLocation}; /// Filters assets/location pairs. /// -/// Can be amalgamated into tuples. If any item returns `true`, it short-circuits, else `false` is returned. +/// Can be amalgamated into tuples. If any item returns `true`, it short-circuits, else `false` is +/// returned. #[deprecated = "Use `frame_support::traits::ContainsPair` instead"] pub trait FilterAssetLocation { /// A filter to distinguish between asset/location pairs. diff --git a/xcm/xcm-executor/src/traits/on_response.rs b/xcm/xcm-executor/src/traits/on_response.rs index 34bb7eb9597d..b0f8b35bb98f 100644 --- a/xcm/xcm-executor/src/traits/on_response.rs +++ b/xcm/xcm-executor/src/traits/on_response.rs @@ -107,11 +107,14 @@ impl VersionChangeNotifier for () { /// The possible state of an XCM query response. #[derive(Debug, PartialEq, Eq)] pub enum QueryResponseStatus { - /// The response has arrived, and includes the inner Response and the block number it arrived at. + /// The response has arrived, and includes the inner Response and the block number it arrived + /// at. Ready { response: Response, at: BlockNumber }, - /// The response has not yet arrived, the XCM might still be executing or the response might be in transit. + /// The response has not yet arrived, the XCM might still be executing or the response might be + /// in transit. Pending { timeout: BlockNumber }, - /// No response with the given `QueryId` was found, or the response was already queried and removed from local storage. + /// No response with the given `QueryId` was found, or the response was already queried and + /// removed from local storage. NotFound, /// Got an unexpected XCM version. UnexpectedVersion, @@ -144,7 +147,8 @@ pub trait QueryHandler { /// /// - `message`: The message whose outcome should be reported. /// - `responder`: The origin from which a response should be expected. - /// - `timeout`: The block number after which it is permissible to return `NotFound` from `take_response`. + /// - `timeout`: The block number after which it is permissible to return `NotFound` from + /// `take_response`. /// /// `report_outcome` may return an error if the `responder` is not invertible. /// diff --git a/xcm/xcm-executor/src/traits/should_execute.rs b/xcm/xcm-executor/src/traits/should_execute.rs index 2b634e375136..d85458b54709 100644 --- a/xcm/xcm-executor/src/traits/should_execute.rs +++ b/xcm/xcm-executor/src/traits/should_execute.rs @@ -32,8 +32,8 @@ pub struct Properties { /// Trait to determine whether the execution engine should actually execute a given XCM. /// -/// Can be amalgamated into a tuple to have multiple trials. If any of the tuple elements returns `Ok()`, the -/// execution stops. Else, `Err(_)` is returned if all elements reject the message. +/// Can be amalgamated into a tuple to have multiple trials. If any of the tuple elements returns +/// `Ok()`, the execution stops. Else, `Err(_)` is returned if all elements reject the message. pub trait ShouldExecute { /// Returns `true` if the given `message` may be executed. /// diff --git a/xcm/xcm-executor/src/traits/transact_asset.rs b/xcm/xcm-executor/src/traits/transact_asset.rs index 832397a0fd25..34cdb0c71413 100644 --- a/xcm/xcm-executor/src/traits/transact_asset.rs +++ b/xcm/xcm-executor/src/traits/transact_asset.rs @@ -20,11 +20,13 @@ use xcm::latest::{Error as XcmError, MultiAsset, MultiLocation, Result as XcmRes /// Facility for asset transacting. /// -/// This should work with as many asset/location combinations as possible. Locations to support may include non-account -/// locations such as a `MultiLocation::X1(Junction::Parachain)`. Different chains may handle them in different ways. +/// This should work with as many asset/location combinations as possible. Locations to support may +/// include non-account locations such as a `MultiLocation::X1(Junction::Parachain)`. Different +/// chains may handle them in different ways. /// -/// Can be amalgamated as a tuple of items that implement this trait. In such executions, if any of the transactors -/// returns `Ok(())`, then it will short circuit. Else, execution is passed to the next transactor. +/// Can be amalgamated as a tuple of items that implement this trait. In such executions, if any of +/// the transactors returns `Ok(())`, then it will short circuit. Else, execution is passed to the +/// next transactor. pub trait TransactAsset { /// Ensure that `check_in` will do as expected. /// @@ -37,19 +39,23 @@ pub trait TransactAsset { Err(XcmError::Unimplemented) } - /// An asset has been teleported in from the given origin. This should do whatever housekeeping is needed. + /// An asset has been teleported in from the given origin. This should do whatever housekeeping + /// is needed. /// - /// NOTE: This will make only a best-effort at bookkeeping. The caller should ensure that `can_check_in` has - /// returned with `Ok` in order to guarantee that this operation proceeds properly. + /// NOTE: This will make only a best-effort at bookkeeping. The caller should ensure that + /// `can_check_in` has returned with `Ok` in order to guarantee that this operation proceeds + /// properly. /// - /// Implementation note: In general this will do one of two things: On chains where the asset is native, - /// it will reduce the assets from a special "teleported" account so that a) total-issuance is preserved; - /// and b) to ensure that no more assets can be teleported in than were teleported out overall (this should - /// not be needed if the teleporting chains are to be trusted, but better to be safe than sorry). On chains - /// where the asset is not native then it will generally just be a no-op. + /// Implementation note: In general this will do one of two things: On chains where the asset is + /// native, it will reduce the assets from a special "teleported" account so that a) + /// total-issuance is preserved; and b) to ensure that no more assets can be teleported in than + /// were teleported out overall (this should not be needed if the teleporting chains are to be + /// trusted, but better to be safe than sorry). On chains where the asset is not native then it + /// will generally just be a no-op. /// - /// When composed as a tuple, all type-items are called. It is up to the implementer that there exists no - /// value for `_what` which can cause side-effects for more than one of the type-items. + /// When composed as a tuple, all type-items are called. It is up to the implementer that there + /// exists no value for `_what` which can cause side-effects for more than one of the + /// type-items. fn check_in(_origin: &MultiLocation, _what: &MultiAsset, _context: &XcmContext) {} /// Ensure that `check_out` will do as expected. @@ -63,16 +69,19 @@ pub trait TransactAsset { Err(XcmError::Unimplemented) } - /// An asset has been teleported out to the given destination. This should do whatever housekeeping is needed. + /// An asset has been teleported out to the given destination. This should do whatever + /// housekeeping is needed. /// - /// Implementation note: In general this will do one of two things: On chains where the asset is native, - /// it will increase the assets in a special "teleported" account so that a) total-issuance is preserved; and - /// b) to ensure that no more assets can be teleported in than were teleported out overall (this should not - /// be needed if the teleporting chains are to be trusted, but better to be safe than sorry). On chains where - /// the asset is not native then it will generally just be a no-op. + /// Implementation note: In general this will do one of two things: On chains where the asset is + /// native, it will increase the assets in a special "teleported" account so that a) + /// total-issuance is preserved; and b) to ensure that no more assets can be teleported in than + /// were teleported out overall (this should not be needed if the teleporting chains are to be + /// trusted, but better to be safe than sorry). On chains where the asset is not native then it + /// will generally just be a no-op. /// - /// When composed as a tuple, all type-items are called. It is up to the implementer that there exists no - /// value for `_what` which can cause side-effects for more than one of the type-items. + /// When composed as a tuple, all type-items are called. It is up to the implementer that there + /// exists no value for `_what` which can cause side-effects for more than one of the + /// type-items. fn check_out(_dest: &MultiLocation, _what: &MultiAsset, _context: &XcmContext) {} /// Deposit the `what` asset into the account of `who`. diff --git a/xcm/xcm-executor/src/traits/weight.rs b/xcm/xcm-executor/src/traits/weight.rs index 06e6b5f55bce..bc40c10074f5 100644 --- a/xcm/xcm-executor/src/traits/weight.rs +++ b/xcm/xcm-executor/src/traits/weight.rs @@ -56,8 +56,8 @@ pub trait WeightTrader: Sized { context: &XcmContext, ) -> Result; - /// Attempt a refund of `weight` into some asset. The caller does not guarantee that the weight was - /// purchased using `buy_weight`. + /// Attempt a refund of `weight` into some asset. The caller does not guarantee that the weight + /// was purchased using `buy_weight`. /// /// Default implementation refunds nothing. fn refund_weight(&mut self, _weight: Weight, _context: &XcmContext) -> Option { @@ -93,8 +93,8 @@ impl WeightTrader for Tuple { log::trace!(target: "xcm::buy_weight", "last_error: {:?}, too_expensive_error_found: {}", last_error, too_expensive_error_found); - // if we have multiple traders, and first one returns `TooExpensive` and others fail e.g. `AssetNotFound` - // then it is more accurate to return `TooExpensive` then `AssetNotFound` + // if we have multiple traders, and first one returns `TooExpensive` and others fail e.g. + // `AssetNotFound` then it is more accurate to return `TooExpensive` then `AssetNotFound` Err(if too_expensive_error_found { XcmError::TooExpensive } else { diff --git a/xcm/xcm-simulator/src/lib.rs b/xcm/xcm-simulator/src/lib.rs index f98eb6e571e6..cf56784f7d4e 100644 --- a/xcm/xcm-simulator/src/lib.rs +++ b/xcm/xcm-simulator/src/lib.rs @@ -161,12 +161,12 @@ macro_rules! decl_test_relay_chain { /// /// ```ignore /// decl_test_parachain! { -/// pub struct ParaA { -/// Runtime = parachain::Runtime, -/// XcmpMessageHandler = parachain::MsgQueue, -/// DmpMessageHandler = parachain::MsgQueue, -/// new_ext = para_ext(), -/// } +/// pub struct ParaA { +/// Runtime = parachain::Runtime, +/// XcmpMessageHandler = parachain::MsgQueue, +/// DmpMessageHandler = parachain::MsgQueue, +/// new_ext = para_ext(), +/// } /// } /// ``` #[macro_export] @@ -272,13 +272,13 @@ thread_local! { /// /// ```ignore /// decl_test_network! { -/// pub struct ExampleNet { -/// relay_chain = Relay, -/// parachains = vec![ -/// (1, ParaA), -/// (2, ParaB), -/// ], -/// } +/// pub struct ExampleNet { +/// relay_chain = Relay, +/// parachains = vec![ +/// (1, ParaA), +/// (2, ParaB), +/// ], +/// } /// } /// ``` #[macro_export] From 1a50cb1aede201e08df18d9deb501e32f833f40b Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Mon, 14 Aug 2023 17:34:38 +0300 Subject: [PATCH 12/27] Disable validation/collation protocols for normal full nodes (#7601) If authority discovery is not enabled, `Overseer` is not enabled, meaning `NetworkBridge` is not started. Validation/collation protocols are, however, enabled even if the `NetworkBridge` is not started. Currently this results in normal Polkadot full nodes advertising these protocols, accepting inbound substreams and even establishing outbound substreams for the validation protocol. Since the `NetworkBridge` is not started and no protocol in Substrate is interested in these protocol events, the events are relayed to all protocol handlers but are getting discarded because no installed protocol is interested in them. Co-authored-by: parity-processbot <> --- node/service/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index 4dda57110825..d0b6db17ed0e 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -828,10 +828,11 @@ pub fn new_full( net_config.add_request_response_protocol(beefy_req_resp_cfg); } + // validation/collation protocols are enabled only if `Overseer` is enabled let peerset_protocol_names = PeerSetProtocolNames::new(genesis_hash, config.chain_spec.fork_id()); - { + if auth_or_collator || overseer_enable_anyways { use polkadot_network_bridge::{peer_sets_info, IsAuthority}; let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No }; for config in peer_sets_info(is_authority, &peerset_protocol_names) { From b10c8af5db47d549fd60a349db2ce7547e7cce2d Mon Sep 17 00:00:00 2001 From: Lulu Date: Mon, 14 Aug 2023 16:31:13 +0100 Subject: [PATCH 13/27] Don't publish test crates (#7588) --- node/subsystem-test-helpers/Cargo.toml | 1 + node/test/client/Cargo.toml | 1 + node/test/performance-test/Cargo.toml | 1 + node/test/service/Cargo.toml | 1 + parachain/test-parachains/adder/Cargo.toml | 1 + parachain/test-parachains/adder/collator/Cargo.toml | 1 + parachain/test-parachains/halt/Cargo.toml | 1 + parachain/test-parachains/undying/Cargo.toml | 1 + parachain/test-parachains/undying/collator/Cargo.toml | 1 + primitives/test-helpers/Cargo.toml | 1 + runtime/test-runtime/Cargo.toml | 1 + runtime/test-runtime/constants/Cargo.toml | 1 + utils/remote-ext-tests/bags-list/Cargo.toml | 1 + xcm/xcm-executor/integration-tests/Cargo.toml | 1 + 14 files changed, 14 insertions(+) diff --git a/node/subsystem-test-helpers/Cargo.toml b/node/subsystem-test-helpers/Cargo.toml index 81bc19a13031..adb0587370ec 100644 --- a/node/subsystem-test-helpers/Cargo.toml +++ b/node/subsystem-test-helpers/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-node-subsystem-test-helpers" description = "Subsystem traits and message definitions" +publish = false version.workspace = true authors.workspace = true edition.workspace = true diff --git a/node/test/client/Cargo.toml b/node/test/client/Cargo.toml index 33c240443d02..aac46bd4b8fc 100644 --- a/node/test/client/Cargo.toml +++ b/node/test/client/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "polkadot-test-client" +publish = false version.workspace = true authors.workspace = true edition.workspace = true diff --git a/node/test/performance-test/Cargo.toml b/node/test/performance-test/Cargo.toml index c6d0ce7f7ec9..1bddc6b08702 100644 --- a/node/test/performance-test/Cargo.toml +++ b/node/test/performance-test/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "polkadot-performance-test" +publish = false version.workspace = true authors.workspace = true edition.workspace = true diff --git a/node/test/service/Cargo.toml b/node/test/service/Cargo.toml index 08e9e3889b06..8912e19306e0 100644 --- a/node/test/service/Cargo.toml +++ b/node/test/service/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "polkadot-test-service" +publish = false version.workspace = true authors.workspace = true edition.workspace = true diff --git a/parachain/test-parachains/adder/Cargo.toml b/parachain/test-parachains/adder/Cargo.toml index 5e1b9a7d174c..d2b2224328a7 100644 --- a/parachain/test-parachains/adder/Cargo.toml +++ b/parachain/test-parachains/adder/Cargo.toml @@ -6,6 +6,7 @@ edition.workspace = true license.workspace = true version.workspace = true authors.workspace = true +publish = false [dependencies] parachain = { package = "polkadot-parachain", path = "../../", default-features = false, features = [ "wasm-api" ] } diff --git a/parachain/test-parachains/adder/collator/Cargo.toml b/parachain/test-parachains/adder/collator/Cargo.toml index 08dcbcaa644e..fad51a863a15 100644 --- a/parachain/test-parachains/adder/collator/Cargo.toml +++ b/parachain/test-parachains/adder/collator/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "test-parachain-adder-collator" description = "Collator for the adder test parachain" +publish = false version.workspace = true authors.workspace = true edition.workspace = true diff --git a/parachain/test-parachains/halt/Cargo.toml b/parachain/test-parachains/halt/Cargo.toml index 99076aae6aa3..85ee5d99d891 100644 --- a/parachain/test-parachains/halt/Cargo.toml +++ b/parachain/test-parachains/halt/Cargo.toml @@ -2,6 +2,7 @@ name = "test-parachain-halt" description = "Test parachain which executes forever" build = "build.rs" +publish = false version.workspace = true authors.workspace = true edition.workspace = true diff --git a/parachain/test-parachains/undying/Cargo.toml b/parachain/test-parachains/undying/Cargo.toml index 43cb1bc37fda..030032e7754d 100644 --- a/parachain/test-parachains/undying/Cargo.toml +++ b/parachain/test-parachains/undying/Cargo.toml @@ -2,6 +2,7 @@ name = "test-parachain-undying" description = "Test parachain for zombienet integration tests" build = "build.rs" +publish = false version.workspace = true authors.workspace = true edition.workspace = true diff --git a/parachain/test-parachains/undying/collator/Cargo.toml b/parachain/test-parachains/undying/collator/Cargo.toml index 5b5656efb4ac..b0118555506c 100644 --- a/parachain/test-parachains/undying/collator/Cargo.toml +++ b/parachain/test-parachains/undying/collator/Cargo.toml @@ -5,6 +5,7 @@ edition.workspace = true license.workspace = true version.workspace = true authors.workspace = true +publish = false [[bin]] name = "undying-collator" diff --git a/primitives/test-helpers/Cargo.toml b/primitives/test-helpers/Cargo.toml index a1f7f9268b9f..b43bac1e8550 100644 --- a/primitives/test-helpers/Cargo.toml +++ b/primitives/test-helpers/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "polkadot-primitives-test-helpers" +publish = false version.workspace = true authors.workspace = true edition.workspace = true diff --git a/runtime/test-runtime/Cargo.toml b/runtime/test-runtime/Cargo.toml index 76bd63d59462..41fbebb39f3a 100644 --- a/runtime/test-runtime/Cargo.toml +++ b/runtime/test-runtime/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-test-runtime" build = "build.rs" +publish = false version.workspace = true authors.workspace = true edition.workspace = true diff --git a/runtime/test-runtime/constants/Cargo.toml b/runtime/test-runtime/constants/Cargo.toml index 9b435da80682..15ab1dbdd4fe 100644 --- a/runtime/test-runtime/constants/Cargo.toml +++ b/runtime/test-runtime/constants/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "test-runtime-constants" +publish = false version.workspace = true authors.workspace = true edition.workspace = true diff --git a/utils/remote-ext-tests/bags-list/Cargo.toml b/utils/remote-ext-tests/bags-list/Cargo.toml index 772efb1eddd0..c84c95ab0498 100644 --- a/utils/remote-ext-tests/bags-list/Cargo.toml +++ b/utils/remote-ext-tests/bags-list/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "remote-ext-tests-bags-list" +publish = false version.workspace = true authors.workspace = true edition.workspace = true diff --git a/xcm/xcm-executor/integration-tests/Cargo.toml b/xcm/xcm-executor/integration-tests/Cargo.toml index d2af1304beb6..18a729e082d2 100644 --- a/xcm/xcm-executor/integration-tests/Cargo.toml +++ b/xcm/xcm-executor/integration-tests/Cargo.toml @@ -5,6 +5,7 @@ authors.workspace = true edition.workspace = true license.workspace = true version.workspace = true +publish = false [dependencies] frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } From f2ad8c531c96c32437df041b02c757edb69d3e4c Mon Sep 17 00:00:00 2001 From: Marcin S Date: Mon, 14 Aug 2023 12:14:30 -0400 Subject: [PATCH 14/27] PVF workers: some fixes for cargo run and cargo install (#7608) - Update some places where `cargo run` was used - Add note to error messages about `cargo build` before `cargo run` - Fix call to `cargo install` in readme --- README.md | 7 ++++++- node/service/src/lib.rs | 4 ++-- parachain/test-parachains/adder/collator/README.md | 8 +++++++- utils/staking-miner/README.md | 2 ++ 4 files changed, 17 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index f3d1f5e276cd..c6e969760362 100644 --- a/README.md +++ b/README.md @@ -93,7 +93,11 @@ cargo build --release **Note:** compilation is a memory intensive process. We recommend having 4 GiB of physical RAM or swap available (keep in mind that if a build hits swap it tends to be very slow). -**Note:** if you want to move the built `polkadot` binary somewhere (e.g. into $PATH) you will also need to move `polkadot-execute-worker` and `polkadot-prepare-worker`. You can let cargo do all this for you by running `cargo install --path .`. +**Note:** if you want to move the built `polkadot` binary somewhere (e.g. into $PATH) you will also need to move `polkadot-execute-worker` and `polkadot-prepare-worker`. You can let cargo do all this for you by running: + +```sh +cargo install --path . --locked +``` #### Build from Source with Docker @@ -193,6 +197,7 @@ cargo test --workspace --release You can start a development chain with: ```bash +cargo build cargo run -- --dev ``` diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index d0b6db17ed0e..d42c737330cd 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -243,7 +243,7 @@ pub enum Error { InvalidWorkerBinaries { prep_worker_path: PathBuf, exec_worker_path: PathBuf }, #[cfg(feature = "full-node")] - #[error("Worker binaries could not be found, make sure polkadot was built/installed correctly. Searched given workers path ({given_workers_path:?}), polkadot binary path ({current_exe_path:?}), and lib path (/usr/lib/polkadot), workers names: {workers_names:?}")] + #[error("Worker binaries could not be found, make sure polkadot was built/installed correctly. If you ran with `cargo run`, please run `cargo build` first. Searched given workers path ({given_workers_path:?}), polkadot binary path ({current_exe_path:?}), and lib path (/usr/lib/polkadot), workers names: {workers_names:?}")] MissingWorkerBinaries { given_workers_path: Option, current_exe_path: PathBuf, @@ -251,7 +251,7 @@ pub enum Error { }, #[cfg(feature = "full-node")] - #[error("Version of worker binary ({worker_version}) is different from node version ({node_version}), worker_path: {worker_path}. TESTING ONLY: this check can be disabled with --disable-worker-version-check")] + #[error("Version of worker binary ({worker_version}) is different from node version ({node_version}), worker_path: {worker_path}. If you ran with `cargo run`, please run `cargo build` first, otherwise try to `cargo clean`. TESTING ONLY: this check can be disabled with --disable-worker-version-check")] WorkerBinaryVersionMismatch { worker_version: String, node_version: String, diff --git a/parachain/test-parachains/adder/collator/README.md b/parachain/test-parachains/adder/collator/README.md index 4347a9a8ced7..a1378544c386 100644 --- a/parachain/test-parachains/adder/collator/README.md +++ b/parachain/test-parachains/adder/collator/README.md @@ -1,6 +1,12 @@ # How to run this collator -First start two validators that will run for the relay chain: +First, build Polkadot: + +```sh +cargo build --release +``` + +Then start two validators that will run for the relay chain: ```sh cargo run --release -- -d alice --chain rococo-local --validator --alice --port 50551 diff --git a/utils/staking-miner/README.md b/utils/staking-miner/README.md index b7f70de573b0..7e7254dc7759 100644 --- a/utils/staking-miner/README.md +++ b/utils/staking-miner/README.md @@ -64,5 +64,7 @@ docker run --rm -i \ ### Test locally +Make sure you've built Polkadot, then: + 1. `cargo run -p polkadot --features fast-runtime -- --chain polkadot-dev --tmp --alice -lruntime=debug` 2. `cargo run -p staking-miner -- --uri ws://localhost:9944 monitor --seed-or-path //Alice phrag-mms` From 1a57e74ec72fe9d5f2731c25c2e3693e7a89d839 Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Mon, 14 Aug 2023 14:30:12 -0700 Subject: [PATCH 15/27] XCM: Rename Instruction instructions to Command instructions (#7593) Co-authored-by: parity-processbot <> --- xcm/src/v2/mod.rs | 44 +++++++++++++------------- xcm/src/v3/mod.rs | 80 +++++++++++++++++++++++------------------------ 2 files changed, 62 insertions(+), 62 deletions(-) diff --git a/xcm/src/v2/mod.rs b/xcm/src/v2/mod.rs index 79cc8ead89a1..8a67b771c9e9 100644 --- a/xcm/src/v2/mod.rs +++ b/xcm/src/v2/mod.rs @@ -434,7 +434,7 @@ pub enum Instruction { /// /// - `assets`: The asset(s) to be withdrawn into holding. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: WithdrawAsset(MultiAssets), @@ -492,7 +492,7 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: TransferAsset { assets: MultiAssets, beneficiary: MultiLocation }, @@ -512,7 +512,7 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: TransferReserveAsset { assets: MultiAssets, dest: MultiLocation, xcm: Xcm<()> }, @@ -527,7 +527,7 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: Transact { @@ -600,14 +600,14 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: ClearOrigin, /// Mutate the origin to some interior location. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: DescendOrigin(InteriorMultiLocation), @@ -623,7 +623,7 @@ pub enum Instruction { /// is sent as a reply may take to execute. NOTE: If this is unexpectedly large then the /// response may not execute at all. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: ReportError { @@ -643,7 +643,7 @@ pub enum Instruction { /// removed, prioritized under standard asset ordering. Any others will remain in holding. /// - `beneficiary`: The new owner for the assets. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: DepositAsset { @@ -669,7 +669,7 @@ pub enum Instruction { /// - `xcm`: The orders that should follow the `ReserveAssetDeposited` instruction which is /// sent onwards to `dest`. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: DepositReserveAsset { @@ -689,7 +689,7 @@ pub enum Instruction { /// - `give`: The asset(s) to remove from holding. /// - `receive`: The minimum amount of assets(s) which `give` should be exchanged for. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: ExchangeAsset { give: MultiAssetFilter, receive: MultiAssets }, @@ -705,7 +705,7 @@ pub enum Instruction { /// - `xcm`: The instructions to execute on the assets once withdrawn *on the reserve /// location*. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: InitiateReserveWithdraw { assets: MultiAssetFilter, reserve: MultiLocation, xcm: Xcm<()> }, @@ -721,7 +721,7 @@ pub enum Instruction { /// NOTE: The `dest` location *MUST* respect this origin as a valid teleportation origin for /// all `assets`. If it does not, then the assets may be lost. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: InitiateTeleport { assets: MultiAssetFilter, dest: MultiLocation, xcm: Xcm<()> }, @@ -739,7 +739,7 @@ pub enum Instruction { /// is sent as a reply may take to execute. NOTE: If this is unexpectedly large then the /// response may not execute at all. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: QueryHolding { @@ -759,14 +759,14 @@ pub enum Instruction { /// expected maximum weight of the total XCM to be executed for the /// `AllowTopLevelPaidExecutionFrom` barrier to allow the XCM be executed. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: BuyExecution { fees: MultiAsset, weight_limit: WeightLimit }, /// Refund any surplus weight previously bought with `BuyExecution`. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: None. RefundSurplus, @@ -782,7 +782,7 @@ pub enum Instruction { /// weight however includes only the difference between the previous handler and the new /// handler, which can reasonably be negative, which would result in a surplus. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: None. SetErrorHandler(Xcm), @@ -798,14 +798,14 @@ pub enum Instruction { /// weight however includes only the difference between the previous appendix and the new /// appendix, which can reasonably be negative, which would result in a surplus. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: None. SetAppendix(Xcm), /// Clear the Error Register. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: None. ClearError, @@ -817,14 +817,14 @@ pub enum Instruction { /// - `ticket`: The ticket of the asset; this is an abstract identifier to help locate the /// asset. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: ClaimAsset { assets: MultiAssets, ticket: MultiLocation }, /// Always throws an error of type `Trap`. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: /// - `Trap`: All circumstances, whose inner value is the same as this item's inner value. @@ -839,7 +839,7 @@ pub enum Instruction { /// is sent as a reply may take to execute. NOTE: If this is unexpectedly large then the /// response may not execute at all. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: *Fallible* SubscribeVersion { @@ -851,7 +851,7 @@ pub enum Instruction { /// Cancel the effect of a previous `SubscribeVersion` instruction. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: *Fallible* UnsubscribeVersion, diff --git a/xcm/src/v3/mod.rs b/xcm/src/v3/mod.rs index 3614dc22550d..360867957862 100644 --- a/xcm/src/v3/mod.rs +++ b/xcm/src/v3/mod.rs @@ -380,7 +380,7 @@ pub enum Instruction { /// /// - `assets`: The asset(s) to be withdrawn into holding. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: WithdrawAsset(MultiAssets), @@ -444,7 +444,7 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: TransferAsset { assets: MultiAssets, beneficiary: MultiLocation }, @@ -464,7 +464,7 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: TransferReserveAsset { assets: MultiAssets, dest: MultiLocation, xcm: Xcm<()> }, @@ -481,7 +481,7 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: Transact { origin_kind: OriginKind, require_weight_at_most: Weight, call: DoubleEncoded }, @@ -549,14 +549,14 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: ClearOrigin, /// Mutate the origin to some interior location. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: DescendOrigin(InteriorMultiLocation), @@ -567,7 +567,7 @@ pub enum Instruction { /// /// - `response_info`: Information for making the response. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: ReportError(QueryResponseInfo), @@ -578,7 +578,7 @@ pub enum Instruction { /// - `assets`: The asset(s) to remove from holding. /// - `beneficiary`: The new owner for the assets. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: DepositAsset { assets: MultiAssetFilter, beneficiary: MultiLocation }, @@ -596,7 +596,7 @@ pub enum Instruction { /// - `xcm`: The orders that should follow the `ReserveAssetDeposited` instruction which is /// sent onwards to `dest`. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: DepositReserveAsset { assets: MultiAssetFilter, dest: MultiLocation, xcm: Xcm<()> }, @@ -613,7 +613,7 @@ pub enum Instruction { /// and receive accordingly more. If `false`, then prefer to give as little as possible in /// order to receive as little as possible while receiving at least `want`. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: ExchangeAsset { give: MultiAssetFilter, want: MultiAssets, maximal: bool }, @@ -629,7 +629,7 @@ pub enum Instruction { /// - `xcm`: The instructions to execute on the assets once withdrawn *on the reserve /// location*. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: InitiateReserveWithdraw { assets: MultiAssetFilter, reserve: MultiLocation, xcm: Xcm<()> }, @@ -645,7 +645,7 @@ pub enum Instruction { /// NOTE: The `dest` location *MUST* respect this origin as a valid teleportation origin for /// all `assets`. If it does not, then the assets may be lost. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: InitiateTeleport { assets: MultiAssetFilter, dest: MultiLocation, xcm: Xcm<()> }, @@ -659,7 +659,7 @@ pub enum Instruction { /// will be, asset-wise, *the lesser of this value and the holding register*. No wildcards /// will be used when reporting assets back. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: ReportHolding { response_info: QueryResponseInfo, assets: MultiAssetFilter }, @@ -672,14 +672,14 @@ pub enum Instruction { /// expected maximum weight of the total XCM to be executed for the /// `AllowTopLevelPaidExecutionFrom` barrier to allow the XCM be executed. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: BuyExecution { fees: MultiAsset, weight_limit: WeightLimit }, /// Refund any surplus weight previously bought with `BuyExecution`. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: None. RefundSurplus, @@ -695,7 +695,7 @@ pub enum Instruction { /// weight however includes only the difference between the previous handler and the new /// handler, which can reasonably be negative, which would result in a surplus. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: None. SetErrorHandler(Xcm), @@ -711,14 +711,14 @@ pub enum Instruction { /// weight however includes only the difference between the previous appendix and the new /// appendix, which can reasonably be negative, which would result in a surplus. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: None. SetAppendix(Xcm), /// Clear the Error Register. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: None. ClearError, @@ -730,14 +730,14 @@ pub enum Instruction { /// - `ticket`: The ticket of the asset; this is an abstract identifier to help locate the /// asset. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: ClaimAsset { assets: MultiAssets, ticket: MultiLocation }, /// Always throws an error of type `Trap`. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: /// - `Trap`: All circumstances, whose inner value is the same as this item's inner value. @@ -752,7 +752,7 @@ pub enum Instruction { /// is sent as a reply may take to execute. NOTE: If this is unexpectedly large then the /// response may not execute at all. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: *Fallible* SubscribeVersion { @@ -763,7 +763,7 @@ pub enum Instruction { /// Cancel the effect of a previous `SubscribeVersion` instruction. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: *Fallible* UnsubscribeVersion, @@ -774,14 +774,14 @@ pub enum Instruction { /// error if the Holding does not contain the assets (to make this an error, use `ExpectAsset` /// prior). /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: *Infallible* BurnAsset(MultiAssets), /// Throw an error if Holding does not contain at least the given assets. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: /// - `ExpectationFalse`: If Holding Register does not contain the assets in the parameter. @@ -789,7 +789,7 @@ pub enum Instruction { /// Ensure that the Origin Register equals some given value and throw an error if not. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: /// - `ExpectationFalse`: If Origin Register is not equal to the parameter. @@ -797,7 +797,7 @@ pub enum Instruction { /// Ensure that the Error Register equals some given value and throw an error if not. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: /// - `ExpectationFalse`: If the value of the Error Register is not equal to the parameter. @@ -806,7 +806,7 @@ pub enum Instruction { /// Ensure that the Transact Status Register equals some given value and throw an error if /// not. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: /// - `ExpectationFalse`: If the value of the Transact Status Register is not equal to the @@ -824,7 +824,7 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: *Fallible*. QueryPallet { module_name: Vec, response_info: QueryResponseInfo }, @@ -843,7 +843,7 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: /// - `ExpectationFalse`: In case any of the expectations are broken. @@ -866,7 +866,7 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: *Fallible*. ReportTransactStatus(QueryResponseInfo), @@ -875,7 +875,7 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: *Infallible*. ClearTransactStatus, @@ -890,7 +890,7 @@ pub enum Instruction { /// The `Junction` parameter should generally be a `GlobalConsensus` variant since it is only /// these which are children of the Universal Ancestor. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: *Fallible*. UniversalOrigin(Junction), @@ -911,7 +911,7 @@ pub enum Instruction { /// `destination: X1(Parachain(1000))`. Alternatively, to export a message for execution on /// Polkadot, you would call with `network: NetworkId:: Polkadot` and `destination: Here`. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: *Fallible*. ExportMessage { network: NetworkId, destination: InteriorMultiLocation, xcm: Xcm<()> }, @@ -927,7 +927,7 @@ pub enum Instruction { /// - `unlocker`: The value which the Origin must be for a corresponding `UnlockAsset` /// instruction to work. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: LockAsset { asset: MultiAsset, unlocker: MultiLocation }, @@ -940,7 +940,7 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: UnlockAsset { asset: MultiAsset, target: MultiLocation }, @@ -969,7 +969,7 @@ pub enum Instruction { /// - `locker`: The location from which a previous `NoteUnlockable` was sent and to which an /// `UnlockAsset` should be sent. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: RequestUnlock { asset: MultiAsset, locker: MultiLocation }, @@ -979,7 +979,7 @@ pub enum Instruction { /// - `jit_withdraw`: The fees mode item; if set to `true` then fees for any instructions are /// withdrawn as needed using the same mechanism as `WithdrawAssets`. /// - /// Kind: *Instruction*. + /// Kind: *Command*. /// /// Errors: SetFeesMode { jit_withdraw: bool }, @@ -992,21 +992,21 @@ pub enum Instruction { /// /// Safety: No concerns. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: SetTopic([u8; 32]), /// Clear the Topic Register. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: None. ClearTopic, /// Alter the current Origin to another given origin. /// - /// Kind: *Instruction* + /// Kind: *Command* /// /// Errors: If the existing state would not allow such a change. AliasOrigin(MultiLocation), From e074364b08d07266aaba952004456d5af61dbc5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 15 Aug 2023 10:51:27 +0200 Subject: [PATCH 16/27] Remove superflous parameter `overseer_enable_anyways` and make parachain node type more explicit (#7617) * Remove superflous parameter `overseer_enable_anyways` We don't need this flag, as we don't need the overseer enabled when the node isn't a collator or validator. * Rename `IsCollator` to `IsParachainNode` `IsParachainNode` is more expressive and also encapsulates the state of the parachain node being a full node. Some functionality like the overseer needs to run always when the node runs alongside a parachain node. The parachain node needs the overseer to e.g. recover PoVs. Other things like candidate validation or pvf checking are only required for when the node is running as validator. * FMT * Fix CI --- cli/src/command.rs | 3 +- node/core/approval-voting/src/lib.rs | 2 +- node/core/pvf/execute-worker/src/lib.rs | 4 +- node/network/approval-distribution/src/lib.rs | 14 +- .../approval-distribution/src/tests.rs | 4 +- node/network/collator-protocol/src/lib.rs | 28 ++-- node/network/gossip-support/src/lib.rs | 3 +- .../statement-distribution/src/responder.rs | 4 +- node/service/src/lib.rs | 146 ++++++++++-------- node/service/src/overseer.rs | 13 +- node/test/service/src/lib.rs | 11 +- .../adder/collator/src/main.rs | 9 +- .../undying/collator/src/main.rs | 9 +- runtime/parachains/src/configuration.rs | 19 ++- runtime/parachains/src/paras_inherent/mod.rs | 4 +- .../parachains/src/runtime_api_impl/mod.rs | 5 +- 16 files changed, 148 insertions(+), 130 deletions(-) diff --git a/cli/src/command.rs b/cli/src/command.rs index c75f96ee2ebf..dcffa09aaf91 100644 --- a/cli/src/command.rs +++ b/cli/src/command.rs @@ -287,14 +287,13 @@ where let task_manager = service::build_full( config, service::NewFullParams { - is_collator: service::IsCollator::No, + is_parachain_node: service::IsParachainNode::No, grandpa_pause, jaeger_agent, telemetry_worker_handle: None, node_version, workers_path: cli.run.workers_path, workers_names: None, - overseer_enable_anyways: false, overseer_gen, overseer_message_channel_capacity_override: cli .run diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 7e29e64c400a..b29e47b4c435 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -2253,7 +2253,7 @@ where // // 1. This is not a local approval, as we don't store anything new in the approval entry. // 2. The candidate is not newly approved, as we haven't altered the approval entry's - // approved flag with `mark_approved` above. + // approved flag with `mark_approved` above. // 3. The approver, if any, had already approved the candidate, as we haven't altered the // bitfield. if transition.is_local_approval() || newly_approved || !already_approved_by.unwrap_or(true) diff --git a/node/core/pvf/execute-worker/src/lib.rs b/node/core/pvf/execute-worker/src/lib.rs index 6f632a0ae95e..7a14de18a82f 100644 --- a/node/core/pvf/execute-worker/src/lib.rs +++ b/node/core/pvf/execute-worker/src/lib.rs @@ -55,8 +55,8 @@ use tokio::{io, net::UnixStream}; // // There are quirks to that configuration knob: // -// 1. It only limits the amount of stack space consumed by wasm but does not ensure nor check -// that the stack space is actually available. +// 1. It only limits the amount of stack space consumed by wasm but does not ensure nor check that +// the stack space is actually available. // // That means, if the calling thread has 1 MiB of stack space left and the wasm code consumes // more, then the wasmtime limit will **not** trigger. Instead, the wasm code will hit the diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 803a56251495..b94ebb282219 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -1319,13 +1319,13 @@ impl State { } // Here we're leaning on a few behaviors of assignment propagation: - // 1. At this point, the only peer we're aware of which has the approval - // message is the source peer. - // 2. We have sent the assignment message to every peer in the required routing - // which is aware of this block _unless_ the peer we originally received the - // assignment from was part of the required routing. In that case, we've sent - // the assignment to all aware peers in the required routing _except_ the original - // source of the assignment. Hence the `in_topology_check`. + // 1. At this point, the only peer we're aware of which has the approval message is + // the source peer. + // 2. We have sent the assignment message to every peer in the required routing which + // is aware of this block _unless_ the peer we originally received the assignment + // from was part of the required routing. In that case, we've sent the assignment + // to all aware peers in the required routing _except_ the original source of the + // assignment. Hence the `in_topology_check`. // 3. Any randomly selected peers have been sent the assignment already. let in_topology = topology .map_or(false, |t| t.local_grid_neighbors().route_to_peer(required_routing, peer)); diff --git a/node/network/approval-distribution/src/tests.rs b/node/network/approval-distribution/src/tests.rs index 979f0ada4ee6..422157a1eda9 100644 --- a/node/network/approval-distribution/src/tests.rs +++ b/node/network/approval-distribution/src/tests.rs @@ -463,8 +463,8 @@ fn delay_reputation_change() { /// /// /// 1. Send a view update that removes block B from their view. -/// 2. Send a message from B that they incur `COST_UNEXPECTED_MESSAGE` for, -/// but then they receive `BENEFIT_VALID_MESSAGE`. +/// 2. Send a message from B that they incur `COST_UNEXPECTED_MESSAGE` for, but then they receive +/// `BENEFIT_VALID_MESSAGE`. /// 3. Send all other messages related to B. #[test] fn spam_attack_results_in_negative_reputation_change() { diff --git a/node/network/collator-protocol/src/lib.rs b/node/network/collator-protocol/src/lib.rs index 8e710a26ad71..68d882be6fa1 100644 --- a/node/network/collator-protocol/src/lib.rs +++ b/node/network/collator-protocol/src/lib.rs @@ -37,7 +37,7 @@ use polkadot_node_network_protocol::{ }; use polkadot_primitives::CollatorPair; -use polkadot_node_subsystem::{errors::SubsystemError, overseer, SpawnedSubsystem}; +use polkadot_node_subsystem::{errors::SubsystemError, overseer, DummySubsystem, SpawnedSubsystem}; mod error; @@ -82,6 +82,8 @@ pub enum ProtocolSide { IncomingRequestReceiver, collator_side::Metrics, ), + /// No protocol side, just disable it. + None, } /// The collator protocol subsystem. @@ -98,24 +100,22 @@ impl CollatorProtocolSubsystem { pub fn new(protocol_side: ProtocolSide) -> Self { Self { protocol_side } } - - async fn run(self, ctx: Context) -> std::result::Result<(), error::FatalError> { - match self.protocol_side { - ProtocolSide::Validator { keystore, eviction_policy, metrics } => - validator_side::run(ctx, keystore, eviction_policy, metrics).await, - ProtocolSide::Collator(local_peer_id, collator_pair, req_receiver, metrics) => - collator_side::run(ctx, local_peer_id, collator_pair, req_receiver, metrics).await, - } - } } #[overseer::subsystem(CollatorProtocol, error=SubsystemError, prefix=self::overseer)] impl CollatorProtocolSubsystem { fn start(self, ctx: Context) -> SpawnedSubsystem { - let future = self - .run(ctx) - .map_err(|e| SubsystemError::with_origin("collator-protocol", e)) - .boxed(); + let future = match self.protocol_side { + ProtocolSide::Validator { keystore, eviction_policy, metrics } => + validator_side::run(ctx, keystore, eviction_policy, metrics) + .map_err(|e| SubsystemError::with_origin("collator-protocol", e)) + .boxed(), + ProtocolSide::Collator(local_peer_id, collator_pair, req_receiver, metrics) => + collator_side::run(ctx, local_peer_id, collator_pair, req_receiver, metrics) + .map_err(|e| SubsystemError::with_origin("collator-protocol", e)) + .boxed(), + ProtocolSide::None => return DummySubsystem.start(ctx), + }; SpawnedSubsystem { name: "collator-protocol-subsystem", future } } diff --git a/node/network/gossip-support/src/lib.rs b/node/network/gossip-support/src/lib.rs index 3c178ad9dfa5..b92aa4e9fe39 100644 --- a/node/network/gossip-support/src/lib.rs +++ b/node/network/gossip-support/src/lib.rs @@ -183,8 +183,7 @@ where } /// 1. Determine if the current session index has changed. - /// 2. If it has, determine relevant validators - /// and issue a connection request. + /// 2. If it has, determine relevant validators and issue a connection request. async fn handle_active_leaves( &mut self, sender: &mut impl overseer::GossipSupportSenderTrait, diff --git a/node/network/statement-distribution/src/responder.rs b/node/network/statement-distribution/src/responder.rs index 4dad10eb5e4f..68976436039d 100644 --- a/node/network/statement-distribution/src/responder.rs +++ b/node/network/statement-distribution/src/responder.rs @@ -62,8 +62,8 @@ pub async fn respond( // // 1. We want some requesters to have full data fast, rather then lots of them having them // late, as each requester having the data will help distributing it. - // 2. If we take too long, the requests timing out will not yet have had any data sent, - // thus we wasted no bandwidth. + // 2. If we take too long, the requests timing out will not yet have had any data sent, thus + // we wasted no bandwidth. // 3. If the queue is full, requestes will get an immediate error instead of running in a // timeout, thus requesters can immediately try another peer and be faster. // diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index d42c737330cd..dab69473c6ba 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -627,7 +627,7 @@ where #[cfg(feature = "full-node")] pub struct NewFullParams { - pub is_collator: IsCollator, + pub is_parachain_node: IsParachainNode, pub grandpa_pause: Option<(u32, u32)>, pub jaeger_agent: Option, pub telemetry_worker_handle: Option, @@ -638,7 +638,6 @@ pub struct NewFullParams { pub workers_path: Option, /// Optional custom names for the prepare and execute workers. pub workers_names: Option<(String, String)>, - pub overseer_enable_anyways: bool, pub overseer_gen: OverseerGenerator, pub overseer_message_channel_capacity_override: Option, #[allow(dead_code)] @@ -657,32 +656,46 @@ pub struct NewFull { pub backend: Arc, } -/// Is this node a collator? +/// Is this node running as in-process node for a parachain node? #[cfg(feature = "full-node")] #[derive(Clone)] -pub enum IsCollator { - /// This node is a collator. - Yes(CollatorPair), - /// This node is not a collator. +pub enum IsParachainNode { + /// This node is running as in-process node for a parachain collator. + Collator(CollatorPair), + /// This node is running as in-process node for a parachain full node. + FullNode, + /// This node is not running as in-process node for a parachain node, aka a normal relay chain + /// node. No, } #[cfg(feature = "full-node")] -impl std::fmt::Debug for IsCollator { +impl std::fmt::Debug for IsParachainNode { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { use sp_core::Pair; match self { - IsCollator::Yes(pair) => write!(fmt, "Yes({})", pair.public()), - IsCollator::No => write!(fmt, "No"), + IsParachainNode::Collator(pair) => write!(fmt, "Collator({})", pair.public()), + IsParachainNode::FullNode => write!(fmt, "FullNode"), + IsParachainNode::No => write!(fmt, "No"), } } } #[cfg(feature = "full-node")] -impl IsCollator { - /// Is this a collator? +impl IsParachainNode { + /// Is this running alongside a collator? fn is_collator(&self) -> bool { - matches!(self, Self::Yes(_)) + matches!(self, Self::Collator(_)) + } + + /// Is this running alongside a full node? + fn is_full_node(&self) -> bool { + matches!(self, Self::FullNode) + } + + /// Is this node running alongside a relay chain node? + fn is_running_alongside_parachain_node(&self) -> bool { + self.is_collator() || self.is_full_node() } } @@ -696,11 +709,6 @@ pub const AVAILABILITY_CONFIG: AvailabilityConfig = AvailabilityConfig { /// This is an advanced feature and not recommended for general use. Generally, `build_full` is /// a better choice. /// -/// `overseer_enable_anyways` always enables the overseer, based on the provided -/// `OverseerGenerator`, regardless of the role the node has. The relay chain selection (longest or -/// disputes-aware) is still determined based on the role of the node. Likewise for authority -/// discovery. -/// /// `workers_path` is used to get the path to the directory where auxiliary worker binaries reside. /// If not specified, the main binary's directory is searched first, then `/usr/lib/polkadot` is /// searched. If the path points to an executable rather then directory, that executable is used @@ -709,14 +717,13 @@ pub const AVAILABILITY_CONFIG: AvailabilityConfig = AvailabilityConfig { pub fn new_full( mut config: Configuration, NewFullParams { - is_collator, + is_parachain_node, grandpa_pause, jaeger_agent, telemetry_worker_handle, node_version, workers_path, workers_names, - overseer_enable_anyways, overseer_gen, overseer_message_channel_capacity_override, malus_finality_delay: _malus_finality_delay, @@ -768,8 +775,9 @@ pub fn new_full( let chain_spec = config.chain_spec.cloned_box(); let keystore = basics.keystore_container.local_keystore(); - let auth_or_collator = role.is_authority() || is_collator.is_collator(); - let pvf_checker_enabled = role.is_authority() && !is_collator.is_collator(); + let auth_or_collator = role.is_authority() || is_parachain_node.is_collator(); + // We only need to enable the pvf checker when this is a validator. + let pvf_checker_enabled = role.is_authority(); let select_chain = if auth_or_collator { let metrics = @@ -832,7 +840,12 @@ pub fn new_full( let peerset_protocol_names = PeerSetProtocolNames::new(genesis_hash, config.chain_spec.fork_id()); - if auth_or_collator || overseer_enable_anyways { + // If this is a validator or running alongside a parachain node, we need to enable the + // networking protocols. + // + // Collators and parachain full nodes require the collator and validator networking to send + // collations and to be able to recover PoVs. + if role.is_authority() || is_parachain_node.is_running_alongside_parachain_node() { use polkadot_network_bridge::{peer_sets_info, IsAuthority}; let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No }; for config in peer_sets_info(is_authority, &peerset_protocol_names) { @@ -910,7 +923,7 @@ pub fn new_full( slot_duration_millis: slot_duration.as_millis() as u64, }; - let candidate_validation_config = if role.is_authority() && !is_collator.is_collator() { + let candidate_validation_config = if role.is_authority() { let (prep_worker_path, exec_worker_path) = workers::determine_workers_paths(workers_path, workers_names, node_version.clone())?; log::info!("🚀 Using prepare-worker binary at: {:?}", prep_worker_path); @@ -979,46 +992,50 @@ pub fn new_full( let overseer_client = client.clone(); let spawner = task_manager.spawn_handle(); - let authority_discovery_service = if auth_or_collator || overseer_enable_anyways { - use futures::StreamExt; - use sc_network::{Event, NetworkEventStream}; + let authority_discovery_service = + // We need the authority discovery if this node is either a validator or running alongside a parachain node. + // Parachains node require the authority discovery for finding relay chain validators for sending + // their PoVs or recovering PoVs. + if role.is_authority() || is_parachain_node.is_running_alongside_parachain_node() { + use futures::StreamExt; + use sc_network::{Event, NetworkEventStream}; - let authority_discovery_role = if role.is_authority() { - sc_authority_discovery::Role::PublishAndDiscover(keystore_container.keystore()) + let authority_discovery_role = if role.is_authority() { + sc_authority_discovery::Role::PublishAndDiscover(keystore_container.keystore()) + } else { + // don't publish our addresses when we're not an authority (collator, cumulus, ..) + sc_authority_discovery::Role::Discover + }; + let dht_event_stream = + network.event_stream("authority-discovery").filter_map(|e| async move { + match e { + Event::Dht(e) => Some(e), + _ => None, + } + }); + let (worker, service) = sc_authority_discovery::new_worker_and_service_with_config( + sc_authority_discovery::WorkerConfig { + publish_non_global_ips: auth_disc_publish_non_global_ips, + // Require that authority discovery records are signed. + strict_record_validation: true, + ..Default::default() + }, + client.clone(), + network.clone(), + Box::pin(dht_event_stream), + authority_discovery_role, + prometheus_registry.clone(), + ); + + task_manager.spawn_handle().spawn( + "authority-discovery-worker", + Some("authority-discovery"), + Box::pin(worker.run()), + ); + Some(service) } else { - // don't publish our addresses when we're not an authority (collator, cumulus, ..) - sc_authority_discovery::Role::Discover + None }; - let dht_event_stream = - network.event_stream("authority-discovery").filter_map(|e| async move { - match e { - Event::Dht(e) => Some(e), - _ => None, - } - }); - let (worker, service) = sc_authority_discovery::new_worker_and_service_with_config( - sc_authority_discovery::WorkerConfig { - publish_non_global_ips: auth_disc_publish_non_global_ips, - // Require that authority discovery records are signed. - strict_record_validation: true, - ..Default::default() - }, - client.clone(), - network.clone(), - Box::pin(dht_event_stream), - authority_discovery_role, - prometheus_registry.clone(), - ); - - task_manager.spawn_handle().spawn( - "authority-discovery-worker", - Some("authority-discovery"), - Box::pin(worker.run()), - ); - Some(service) - } else { - None - }; let overseer_handle = if let Some(authority_discovery_service) = authority_discovery_service { let (overseer, overseer_handle) = overseer_gen @@ -1039,7 +1056,7 @@ pub fn new_full( dispute_req_receiver, registry: prometheus_registry.as_ref(), spawner, - is_collator, + is_parachain_node, approval_voting_config, availability_config: AVAILABILITY_CONFIG, candidate_validation_config, @@ -1332,11 +1349,6 @@ pub fn new_chain_ops( /// /// The actual "flavor", aka if it will use `Polkadot`, `Rococo` or `Kusama` is determined based on /// [`IdentifyVariant`] using the chain spec. -/// -/// `overseer_enable_anyways` always enables the overseer, based on the provided -/// `OverseerGenerator`, regardless of the role the node has. The relay chain selection (longest or -/// disputes-aware) is still determined based on the role of the node. Likewise for authority -/// discovery. #[cfg(feature = "full-node")] pub fn build_full( config: Configuration, diff --git a/node/service/src/overseer.rs b/node/service/src/overseer.rs index 29122ddca162..b315d2847c07 100644 --- a/node/service/src/overseer.rs +++ b/node/service/src/overseer.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use super::{AuthorityDiscoveryApi, Block, Error, Hash, IsCollator, Registry}; +use super::{AuthorityDiscoveryApi, Block, Error, Hash, IsParachainNode, Registry}; use polkadot_node_subsystem_types::DefaultSubsystemClient; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_core::traits::SpawnNamed; @@ -108,7 +108,7 @@ where /// Task spawner to be used throughout the overseer and the APIs it provides. pub spawner: Spawner, /// Determines the behavior of the collator. - pub is_collator: IsCollator, + pub is_parachain_node: IsParachainNode, /// Configuration for the approval voting subsystem. pub approval_voting_config: ApprovalVotingConfig, /// Configuration for the availability store subsystem. @@ -149,7 +149,7 @@ pub fn prepared_overseer_builder( dispute_req_receiver, registry, spawner, - is_collator, + is_parachain_node, approval_voting_config, availability_config, candidate_validation_config, @@ -266,14 +266,15 @@ where .chain_api(ChainApiSubsystem::new(runtime_client.clone(), Metrics::register(registry)?)) .collation_generation(CollationGenerationSubsystem::new(Metrics::register(registry)?)) .collator_protocol({ - let side = match is_collator { - IsCollator::Yes(collator_pair) => ProtocolSide::Collator( + let side = match is_parachain_node { + IsParachainNode::Collator(collator_pair) => ProtocolSide::Collator( network_service.local_peer_id(), collator_pair, collation_req_receiver, Metrics::register(registry)?, ), - IsCollator::No => ProtocolSide::Validator { + IsParachainNode::FullNode => ProtocolSide::None, + IsParachainNode::No => ProtocolSide::Validator { keystore: keystore.clone(), eviction_policy: Default::default(), metrics: Metrics::register(registry)?, diff --git a/node/test/service/src/lib.rs b/node/test/service/src/lib.rs index ed25d28d2925..932e95a7cab6 100644 --- a/node/test/service/src/lib.rs +++ b/node/test/service/src/lib.rs @@ -28,7 +28,7 @@ use polkadot_overseer::Handle; use polkadot_primitives::{Balance, CollatorPair, HeadData, Id as ParaId, ValidationCode}; use polkadot_runtime_common::BlockHashCount; use polkadot_runtime_parachains::paras::{ParaGenesisArgs, ParaKind}; -use polkadot_service::{Error, FullClient, IsCollator, NewFull, PrometheusConfig}; +use polkadot_service::{Error, FullClient, IsParachainNode, NewFull, PrometheusConfig}; use polkadot_test_runtime::{ ParasCall, ParasSudoWrapperCall, Runtime, SignedExtra, SignedPayload, SudoCall, UncheckedExtrinsic, VERSION, @@ -71,7 +71,7 @@ pub use polkadot_service::{FullBackend, GetLastTimestamp}; #[sc_tracing::logging::prefix_logs_with(config.network.node_name.as_str())] pub fn new_full( config: Configuration, - is_collator: IsCollator, + is_parachain_node: IsParachainNode, workers_path: Option, ) -> Result { let workers_path = Some(workers_path.unwrap_or_else(get_relative_workers_path_for_test)); @@ -79,14 +79,13 @@ pub fn new_full( polkadot_service::new_full( config, polkadot_service::NewFullParams { - is_collator, + is_parachain_node, grandpa_pause: None, jaeger_agent: None, telemetry_worker_handle: None, node_version: None, workers_path, workers_names: None, - overseer_enable_anyways: false, overseer_gen: polkadot_service::RealOverseerGen, overseer_message_channel_capacity_override: None, malus_finality_delay: None, @@ -207,7 +206,7 @@ pub fn run_validator_node( ) -> PolkadotTestNode { let multiaddr = config.network.listen_addresses[0].clone(); let NewFull { task_manager, client, network, rpc_handlers, overseer_handle, .. } = - new_full(config, IsCollator::No, worker_program_path) + new_full(config, IsParachainNode::No, worker_program_path) .expect("could not create Polkadot test service"); let overseer_handle = overseer_handle.expect("test node must have an overseer handle"); @@ -239,7 +238,7 @@ pub fn run_collator_node( let config = node_config(storage_update_func, tokio_handle, key, boot_nodes, false); let multiaddr = config.network.listen_addresses[0].clone(); let NewFull { task_manager, client, network, rpc_handlers, overseer_handle, .. } = - new_full(config, IsCollator::Yes(collator_pair), None) + new_full(config, IsParachainNode::Collator(collator_pair), None) .expect("could not create Polkadot test service"); let overseer_handle = overseer_handle.expect("test node must have an overseer handle"); diff --git a/parachain/test-parachains/adder/collator/src/main.rs b/parachain/test-parachains/adder/collator/src/main.rs index 8d8a13767178..f9efa9c68ad3 100644 --- a/parachain/test-parachains/adder/collator/src/main.rs +++ b/parachain/test-parachains/adder/collator/src/main.rs @@ -21,6 +21,7 @@ use polkadot_node_primitives::CollationGenerationConfig; use polkadot_node_subsystem::messages::{CollationGenerationMessage, CollatorProtocolMessage}; use polkadot_primitives::Id as ParaId; use sc_cli::{Error as SubstrateCliError, SubstrateCli}; +use sc_service::Role; use sp_core::hexdisplay::HexDisplay; use test_parachain_adder_collator::Collator; @@ -57,10 +58,15 @@ fn main() -> Result<()> { let collator = Collator::new(); config.disable_beefy = true; + // Zombienet is spawning all collators currently with the same CLI, this means it + // sets `--validator` and this is wrong here. + config.role = Role::Full; let full_node = polkadot_service::build_full( config, polkadot_service::NewFullParams { - is_collator: polkadot_service::IsCollator::Yes(collator.collator_key()), + is_parachain_node: polkadot_service::IsParachainNode::Collator( + collator.collator_key(), + ), grandpa_pause: None, jaeger_agent: None, telemetry_worker_handle: None, @@ -70,7 +76,6 @@ fn main() -> Result<()> { workers_path: None, workers_names: None, - overseer_enable_anyways: false, overseer_gen: polkadot_service::RealOverseerGen, overseer_message_channel_capacity_override: None, malus_finality_delay: None, diff --git a/parachain/test-parachains/undying/collator/src/main.rs b/parachain/test-parachains/undying/collator/src/main.rs index da8205ba1893..8eadc233ae78 100644 --- a/parachain/test-parachains/undying/collator/src/main.rs +++ b/parachain/test-parachains/undying/collator/src/main.rs @@ -21,6 +21,7 @@ use polkadot_node_primitives::CollationGenerationConfig; use polkadot_node_subsystem::messages::{CollationGenerationMessage, CollatorProtocolMessage}; use polkadot_primitives::Id as ParaId; use sc_cli::{Error as SubstrateCliError, SubstrateCli}; +use sc_service::Role; use sp_core::hexdisplay::HexDisplay; use test_parachain_undying_collator::Collator; @@ -57,10 +58,15 @@ fn main() -> Result<()> { let collator = Collator::new(cli.run.pov_size, cli.run.pvf_complexity); config.disable_beefy = true; + // Zombienet is spawning all collators currently with the same CLI, this means it + // sets `--validator` and this is wrong here. + config.role = Role::Full; let full_node = polkadot_service::build_full( config, polkadot_service::NewFullParams { - is_collator: polkadot_service::IsCollator::Yes(collator.collator_key()), + is_parachain_node: polkadot_service::IsParachainNode::Collator( + collator.collator_key(), + ), grandpa_pause: None, jaeger_agent: None, telemetry_worker_handle: None, @@ -70,7 +76,6 @@ fn main() -> Result<()> { workers_path: None, workers_names: None, - overseer_enable_anyways: false, overseer_gen: polkadot_service::RealOverseerGen, overseer_message_channel_capacity_override: None, malus_finality_delay: None, diff --git a/runtime/parachains/src/configuration.rs b/runtime/parachains/src/configuration.rs index d4ad8619f16e..0631b280aadd 100644 --- a/runtime/parachains/src/configuration.rs +++ b/runtime/parachains/src/configuration.rs @@ -1244,28 +1244,27 @@ impl Pallet { ) -> DispatchResult { let mut pending_configs = >::get(); - // 1. pending_configs = [] - // No pending configuration changes. + // 1. pending_configs = [] No pending configuration changes. // // That means we should use the active config as the base configuration. We will insert // the new pending configuration as (cur+2, new_config) into the list. // - // 2. pending_configs = [(cur+2, X)] - // There is a configuration that is pending for the scheduled session. + // 2. pending_configs = [(cur+2, X)] There is a configuration that is pending for the + // scheduled session. // // We will use X as the base configuration. We can update the pending configuration X // directly. // - // 3. pending_configs = [(cur+1, X)] - // There is a pending configuration scheduled and it will be applied in the next session. + // 3. pending_configs = [(cur+1, X)] There is a pending configuration scheduled and it will + // be applied in the next session. // // We will use X as the base configuration. We need to schedule a new configuration // change for the `scheduled_session` and use X as the base for the new configuration. // - // 4. pending_configs = [(cur+1, X), (cur+2, Y)] - // There is a pending configuration change in the next session and for the scheduled - // session. Due to case №3, we can be sure that Y is based on top of X. This means we - // can use Y as the base configuration and update Y directly. + // 4. pending_configs = [(cur+1, X), (cur+2, Y)] There is a pending configuration change in + // the next session and for the scheduled session. Due to case №3, we can be sure that Y + // is based on top of X. This means we can use Y as the base configuration and update Y + // directly. // // There cannot be (cur, X) because those are applied in the session change handler for the // current session. diff --git a/runtime/parachains/src/paras_inherent/mod.rs b/runtime/parachains/src/paras_inherent/mod.rs index a40a3422a669..da0b972bc92c 100644 --- a/runtime/parachains/src/paras_inherent/mod.rs +++ b/runtime/parachains/src/paras_inherent/mod.rs @@ -977,8 +977,8 @@ fn compute_entropy(parent_hash: T::Hash) -> [u8; 32] { /// 2. If exceeded: /// 1. Check validity of all dispute statements sequentially /// 2. If not exceeded: -/// 1. If weight is exceeded by locals, pick the older ones (lower indices) -/// until the weight limit is reached. +/// 1. If weight is exceeded by locals, pick the older ones (lower indices) until the weight limit +/// is reached. /// /// Returns the consumed weight amount, that is guaranteed to be less than the provided /// `max_consumable_weight`. diff --git a/runtime/parachains/src/runtime_api_impl/mod.rs b/runtime/parachains/src/runtime_api_impl/mod.rs index e22ef825858d..e066ad825a33 100644 --- a/runtime/parachains/src/runtime_api_impl/mod.rs +++ b/runtime/parachains/src/runtime_api_impl/mod.rs @@ -23,8 +23,7 @@ //! will contain methods from `vstaging`. //! The promotion consists of the following steps: //! 1. Bump the version of the stable module (e.g. `v2` becomes `v3`) -//! 2. Move methods from `vstaging` to `v3`. The new stable version should include -//! all methods from `vstaging` tagged with the new version number (e.g. all -//! `v3` methods). +//! 2. Move methods from `vstaging` to `v3`. The new stable version should include all methods from +//! `vstaging` tagged with the new version number (e.g. all `v3` methods). pub mod v5; pub mod vstaging; From 1bddab1660310bc450af0a741e827e795ae3e320 Mon Sep 17 00:00:00 2001 From: alexd10s Date: Tue, 15 Aug 2023 15:17:15 +0200 Subject: [PATCH 17/27] Change the Config of the MaxRococoNum Slot from a Constant to a Storage function (#7217) * set MaxPermanentSlots and MaxTemporarySlots with a extrinsic instead of a constant * delete the MaxPermanentSlots and MaxTemporarySlots constants from config on Rococo and Westend * migration code for assigned slots * remove getters * little refactor * set values in the GenesisConfig * refactor in the migration, adding it in the rococo runtime * refactor: fmt * Minor fix * pre_upgrade check * add migration to mod v1 * Logs following Substrate#12873 * fix: current storage version set to 1 * use enact when try-runtime * Vec seems to be missing * feature gate import * fix as per #13993 * address comments Co-authored-by: Oliver Tale-Yazdi * address comments Co-authored-by: Oliver Tale-Yazdi * benchmarking for assign_perm_parachain_slot extrinsic * benchmark all the extrinsics of the pallet * cargo fmt for assigned slots * migration added for westend * licence in benchmarking file * BuildGenesisConfig * assigned_slots default in genesis * cargo fmt * assigned_slots fix tests config * cargo fmt * fix benchmarking compile error * fix benchmarking imports * benchmark worst case scenario for validation code and head data * add assigned_slots in frame_benchmarking on Rococo and Westend * modify values for para_id in benchmarking * delete the assigned_slots in westend frame_benchmarking * fix benchmarkings and add it to westend * cargo fmt * ".git/.scripts/commands/bench/bench.sh" --subcommand=runtime --runtime=rococo --target_dir=polkadot --pallet=runtime_common::assigned_slots * ".git/.scripts/commands/bench/bench.sh" --subcommand=runtime --runtime=westend --target_dir=polkadot --pallet=runtime_common::assigned_slots * use generated weights in assigned_slots pallet * small changes in set_max_permanent_slots and set_max_temporary_slots * revert last commit * address some comments * wrap migration with VersionCheckedMigrateToV1 * add experimental feature in pallet, and assers in post_upgrade migration * clean warnings * clean unnecesary experimental flag * small typo in comments * cargo fmt * small comments fixes --------- Co-authored-by: al3mart <11448715+al3mart@users.noreply.github.com> Co-authored-by: Oliver Tale-Yazdi Co-authored-by: command-bot <> --- node/service/src/chain_spec.rs | 4 + runtime/common/Cargo.toml | 3 + .../common/src/assigned_slots/benchmarking.rs | 160 ++++++++++++++ .../common/src/assigned_slots/migration.rs | 77 +++++++ .../mod.rs} | 202 +++++++++++++++--- runtime/rococo/Cargo.toml | 2 +- runtime/rococo/src/lib.rs | 9 +- runtime/rococo/src/weights/mod.rs | 1 + .../weights/runtime_common_assigned_slots.rs | 151 +++++++++++++ runtime/westend/Cargo.toml | 2 +- runtime/westend/src/lib.rs | 9 +- runtime/westend/src/weights/mod.rs | 1 + .../weights/runtime_common_assigned_slots.rs | 151 +++++++++++++ 13 files changed, 727 insertions(+), 45 deletions(-) create mode 100644 runtime/common/src/assigned_slots/benchmarking.rs create mode 100644 runtime/common/src/assigned_slots/migration.rs rename runtime/common/src/{assigned_slots.rs => assigned_slots/mod.rs} (88%) create mode 100644 runtime/rococo/src/weights/runtime_common_assigned_slots.rs create mode 100644 runtime/westend/src/weights/runtime_common_assigned_slots.rs diff --git a/node/service/src/chain_spec.rs b/node/service/src/chain_spec.rs index 7aabfa6e9185..87a8650c2ed6 100644 --- a/node/service/src/chain_spec.rs +++ b/node/service/src/chain_spec.rs @@ -515,6 +515,7 @@ fn westend_staging_testnet_config_genesis(wasm_binary: &[u8]) -> westend::Runtim }, xcm_pallet: Default::default(), nomination_pools: Default::default(), + assigned_slots: Default::default(), } } @@ -1023,6 +1024,7 @@ fn rococo_staging_testnet_config_genesis( }, xcm_pallet: Default::default(), nis_counterpart_balances: Default::default(), + assigned_slots: Default::default(), } } @@ -1484,6 +1486,7 @@ pub fn westend_testnet_genesis( }, xcm_pallet: Default::default(), nomination_pools: Default::default(), + assigned_slots: Default::default(), } } @@ -1573,6 +1576,7 @@ pub fn rococo_testnet_genesis( }, xcm_pallet: Default::default(), nis_counterpart_balances: Default::default(), + assigned_slots: Default::default(), } } diff --git a/runtime/common/Cargo.toml b/runtime/common/Cargo.toml index c9812d806733..dda7c2e92368 100644 --- a/runtime/common/Cargo.toml +++ b/runtime/common/Cargo.toml @@ -64,6 +64,9 @@ test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../pri [features] default = ["std"] +experimental = [ + "frame-support/experimental" +] no_std = [] std = [ "bitvec/std", diff --git a/runtime/common/src/assigned_slots/benchmarking.rs b/runtime/common/src/assigned_slots/benchmarking.rs new file mode 100644 index 000000000000..61638fe6cabf --- /dev/null +++ b/runtime/common/src/assigned_slots/benchmarking.rs @@ -0,0 +1,160 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Benchmarking for assigned_slots pallet + +#![cfg(feature = "runtime-benchmarks")] +use super::*; + +use frame_benchmarking::v2::*; +use frame_support::assert_ok; +use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; +use primitives::Id as ParaId; +use sp_runtime::traits::Bounded; + +type CurrencyOf = <::Leaser as Leaser>>::Currency; +type BalanceOf = <<::Leaser as Leaser>>::Currency as Currency< + ::AccountId, +>>::Balance; +#[benchmarks(where T: Config)] +mod benchmarks { + use super::*; + + use crate::assigned_slots::Pallet as AssignedSlots; + + fn register_parachain(para_id: ParaId) { + let who: T::AccountId = whitelisted_caller(); + let worst_validation_code = T::Registrar::worst_validation_code(); + let worst_head_data = T::Registrar::worst_head_data(); + + CurrencyOf::::make_free_balance_be(&who, BalanceOf::::max_value()); + + assert_ok!(T::Registrar::register( + who, + para_id, + worst_head_data, + worst_validation_code.clone() + )); + assert_ok!(paras::Pallet::::add_trusted_validation_code( + frame_system::Origin::::Root.into(), + worst_validation_code, + )); + T::Registrar::execute_pending_transitions(); + } + + #[benchmark] + fn assign_perm_parachain_slot() { + let para_id = ParaId::from(1_u32); + let caller = RawOrigin::Root; + + let _ = + AssignedSlots::::set_max_permanent_slots(frame_system::Origin::::Root.into(), 10); + register_parachain::(para_id); + + let counter = PermanentSlotCount::::get(); + let current_lease_period: BlockNumberFor = + T::Leaser::lease_period_index(frame_system::Pallet::::block_number()) + .and_then(|x| Some(x.0)) + .unwrap(); + #[extrinsic_call] + assign_perm_parachain_slot(caller, para_id); + + assert_eq!( + PermanentSlots::::get(para_id), + Some(( + current_lease_period, + LeasePeriodOf::::from(T::PermanentSlotLeasePeriodLength::get()), + )) + ); + assert_eq!(PermanentSlotCount::::get(), counter + 1); + } + + #[benchmark] + fn assign_temp_parachain_slot() { + let para_id = ParaId::from(2_u32); + let caller = RawOrigin::Root; + + let _ = + AssignedSlots::::set_max_temporary_slots(frame_system::Origin::::Root.into(), 10); + register_parachain::(para_id); + + let current_lease_period: BlockNumberFor = + T::Leaser::lease_period_index(frame_system::Pallet::::block_number()) + .and_then(|x| Some(x.0)) + .unwrap(); + + let counter = TemporarySlotCount::::get(); + #[extrinsic_call] + assign_temp_parachain_slot(caller, para_id, SlotLeasePeriodStart::Current); + + let tmp = ParachainTemporarySlot { + manager: whitelisted_caller(), + period_begin: current_lease_period, + period_count: LeasePeriodOf::::from(T::TemporarySlotLeasePeriodLength::get()), + last_lease: Some(BlockNumberFor::::zero()), + lease_count: 1, + }; + assert_eq!(TemporarySlots::::get(para_id), Some(tmp)); + assert_eq!(TemporarySlotCount::::get(), counter + 1); + } + + #[benchmark] + fn unassign_parachain_slot() { + let para_id = ParaId::from(3_u32); + let caller = RawOrigin::Root; + + let _ = + AssignedSlots::::set_max_temporary_slots(frame_system::Origin::::Root.into(), 10); + register_parachain::(para_id); + + let _ = AssignedSlots::::assign_temp_parachain_slot( + caller.clone().into(), + para_id, + SlotLeasePeriodStart::Current, + ); + + let counter = TemporarySlotCount::::get(); + #[extrinsic_call] + unassign_parachain_slot(caller, para_id); + + assert_eq!(TemporarySlots::::get(para_id), None); + assert_eq!(TemporarySlotCount::::get(), counter - 1); + } + + #[benchmark] + fn set_max_permanent_slots() { + let caller = RawOrigin::Root; + #[extrinsic_call] + set_max_permanent_slots(caller, u32::MAX); + + assert_eq!(MaxPermanentSlots::::get(), u32::MAX); + } + + #[benchmark] + fn set_max_temporary_slots() { + let caller = RawOrigin::Root; + #[extrinsic_call] + set_max_temporary_slots(caller, u32::MAX); + + assert_eq!(MaxTemporarySlots::::get(), u32::MAX); + } + + impl_benchmark_test_suite!( + AssignedSlots, + crate::assigned_slots::tests::new_test_ext(), + crate::assigned_slots::tests::Test, + ); +} diff --git a/runtime/common/src/assigned_slots/migration.rs b/runtime/common/src/assigned_slots/migration.rs new file mode 100644 index 000000000000..884d67222d28 --- /dev/null +++ b/runtime/common/src/assigned_slots/migration.rs @@ -0,0 +1,77 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use super::{Config, MaxPermanentSlots, MaxTemporarySlots, Pallet, LOG_TARGET}; +use frame_support::{ + dispatch::GetStorageVersion, + traits::{Get, OnRuntimeUpgrade}, +}; + +#[cfg(feature = "try-runtime")] +use frame_support::ensure; +#[cfg(feature = "try-runtime")] +use sp_std::vec::Vec; + +pub mod v1 { + + use super::*; + pub struct MigrateToV1(sp_std::marker::PhantomData); + impl OnRuntimeUpgrade for MigrateToV1 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + let onchain_version = Pallet::::on_chain_storage_version(); + ensure!(onchain_version < 1, "assigned_slots::MigrateToV1 migration can be deleted"); + Ok(Default::default()) + } + + fn on_runtime_upgrade() -> frame_support::weights::Weight { + let onchain_version = Pallet::::on_chain_storage_version(); + if onchain_version < 1 { + const MAX_PERMANENT_SLOTS: u32 = 100; + const MAX_TEMPORARY_SLOTS: u32 = 100; + + >::put(MAX_PERMANENT_SLOTS); + >::put(MAX_TEMPORARY_SLOTS); + // Return the weight consumed by the migration. + T::DbWeight::get().reads_writes(1, 3) + } else { + log::info!(target: LOG_TARGET, "MigrateToV1 should be removed"); + T::DbWeight::get().reads(1) + } + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + let onchain_version = Pallet::::on_chain_storage_version(); + ensure!(onchain_version == 1, "assigned_slots::MigrateToV1 needs to be run"); + assert_eq!(>::get(), 100); + assert_eq!(>::get(), 100); + Ok(()) + } + } + + /// [`VersionUncheckedMigrateToV1`] wrapped in a + /// [`frame_support::migrations::VersionedRuntimeUpgrade`], ensuring the migration is only + /// performed when on-chain version is 0. + #[cfg(feature = "experimental")] + pub type VersionCheckedMigrateToV1 = frame_support::migrations::VersionedRuntimeUpgrade< + 0, + 1, + MigrateToV1, + Pallet, + ::DbWeight, + >; +} diff --git a/runtime/common/src/assigned_slots.rs b/runtime/common/src/assigned_slots/mod.rs similarity index 88% rename from runtime/common/src/assigned_slots.rs rename to runtime/common/src/assigned_slots/mod.rs index b3c1381c9ec9..4763c3e3f0b4 100644 --- a/runtime/common/src/assigned_slots.rs +++ b/runtime/common/src/assigned_slots/mod.rs @@ -23,10 +23,12 @@ //! This pallet should not be used on a production relay chain, //! only on a test relay chain (e.g. Rococo). +pub mod benchmarking; +pub mod migration; + use crate::{ - slots::{self, Pallet as Slots, WeightInfo}, + slots::{self, Pallet as Slots, WeightInfo as SlotsWeightInfo}, traits::{LeaseError, Leaser, Registrar}, - MAXIMUM_BLOCK_WEIGHT, }; use frame_support::{pallet_prelude::*, traits::Currency}; use frame_system::pallet_prelude::*; @@ -41,6 +43,8 @@ use scale_info::TypeInfo; use sp_runtime::traits::{One, Saturating, Zero}; use sp_std::prelude::*; +const LOG_TARGET: &str = "runtime::assigned_slots"; + /// Lease period an assigned slot should start from (current, or next one). #[derive(Encode, Decode, Clone, Copy, Eq, PartialEq, RuntimeDebug, TypeInfo)] pub enum SlotLeasePeriodStart { @@ -67,6 +71,33 @@ pub struct ParachainTemporarySlot { pub lease_count: u32, } +pub trait WeightInfo { + fn assign_perm_parachain_slot() -> Weight; + fn assign_temp_parachain_slot() -> Weight; + fn unassign_parachain_slot() -> Weight; + fn set_max_permanent_slots() -> Weight; + fn set_max_temporary_slots() -> Weight; +} + +pub struct TestWeightInfo; +impl WeightInfo for TestWeightInfo { + fn assign_perm_parachain_slot() -> Weight { + Weight::zero() + } + fn assign_temp_parachain_slot() -> Weight { + Weight::zero() + } + fn unassign_parachain_slot() -> Weight { + Weight::zero() + } + fn set_max_permanent_slots() -> Weight { + Weight::zero() + } + fn set_max_temporary_slots() -> Weight { + Weight::zero() + } +} + type BalanceOf = <<::Leaser as Leaser>>::Currency as Currency< ::AccountId, >>::Balance; @@ -76,7 +107,11 @@ type LeasePeriodOf = <::Leaser as Leaser>>::Le pub mod pallet { use super::*; + /// The current storage version. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); #[pallet::config] @@ -103,17 +138,12 @@ pub mod pallet { #[pallet::constant] type TemporarySlotLeasePeriodLength: Get; - /// The max number of permanent slots that can be assigned. - #[pallet::constant] - type MaxPermanentSlots: Get; - - /// The max number of temporary slots that can be assigned. - #[pallet::constant] - type MaxTemporarySlots: Get; - /// The max number of temporary slots to be scheduled per lease periods. #[pallet::constant] type MaxTemporarySlotPerLeasePeriod: Get; + + /// Weight Information for the Extrinsics in the Pallet + type WeightInfo: WeightInfo; } /// Assigned permanent slots, with their start lease period, and duration. @@ -148,13 +178,41 @@ pub mod pallet { #[pallet::getter(fn active_temporary_slot_count)] pub type ActiveTemporarySlotCount = StorageValue<_, u32, ValueQuery>; + /// The max number of temporary slots that can be assigned. + #[pallet::storage] + pub type MaxTemporarySlots = StorageValue<_, u32, ValueQuery>; + + /// The max number of permanent slots that can be assigned. + #[pallet::storage] + pub type MaxPermanentSlots = StorageValue<_, u32, ValueQuery>; + + #[pallet::genesis_config] + #[derive(frame_support::DefaultNoBound)] + pub struct GenesisConfig { + pub max_temporary_slots: u32, + pub max_permanent_slots: u32, + pub _config: PhantomData, + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) { + >::put(&self.max_permanent_slots); + >::put(&self.max_temporary_slots); + } + } + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// A para was assigned a permanent parachain slot + /// A parachain was assigned a permanent parachain slot PermanentSlotAssigned(ParaId), - /// A para was assigned a temporary parachain slot + /// A parachain was assigned a temporary parachain slot TemporarySlotAssigned(ParaId), + /// The maximum number of permanent slots has been changed + MaxPermanentSlotsChanged { slots: u32 }, + /// The maximum number of temporary slots has been changed + MaxTemporarySlotsChanged { slots: u32 }, } #[pallet::error] @@ -173,9 +231,9 @@ pub mod pallet { SlotNotAssigned, /// An ongoing lease already exists. OngoingLeaseExists, - // Maximum number of permanent slots exceeded + // The maximum number of permanent slots exceeded MaxPermanentSlotsExceeded, - // Maximum number of temporary slots exceeded + // The maximum number of temporary slots exceeded MaxTemporarySlotsExceeded, } @@ -196,16 +254,15 @@ pub mod pallet { #[pallet::call] impl Pallet { - // TODO: Benchmark this /// Assign a permanent parachain slot and immediately create a lease for it. #[pallet::call_index(0)] - #[pallet::weight(((MAXIMUM_BLOCK_WEIGHT / 10) as Weight, DispatchClass::Operational))] + #[pallet::weight((::WeightInfo::assign_perm_parachain_slot(), DispatchClass::Operational))] pub fn assign_perm_parachain_slot(origin: OriginFor, id: ParaId) -> DispatchResult { T::AssignSlotOrigin::ensure_origin(origin)?; let manager = T::Registrar::manager_of(id).ok_or(Error::::ParaDoesntExist)?; - ensure!(T::Registrar::is_parathread(id), Error::::NotParathread,); + ensure!(T::Registrar::is_parathread(id), Error::::NotParathread); ensure!( !Self::has_permanent_slot(id) && !Self::has_temporary_slot(id), @@ -227,7 +284,7 @@ pub mod pallet { ); ensure!( - PermanentSlotCount::::get() < T::MaxPermanentSlots::get(), + PermanentSlotCount::::get() < MaxPermanentSlots::::get(), Error::::MaxPermanentSlotsExceeded ); @@ -253,12 +310,11 @@ pub mod pallet { Ok(()) } - // TODO: Benchmark this /// Assign a temporary parachain slot. The function tries to create a lease for it /// immediately if `SlotLeasePeriodStart::Current` is specified, and if the number /// of currently active temporary slots is below `MaxTemporarySlotPerLeasePeriod`. #[pallet::call_index(1)] - #[pallet::weight(((MAXIMUM_BLOCK_WEIGHT / 10) as Weight, DispatchClass::Operational))] + #[pallet::weight((::WeightInfo::assign_temp_parachain_slot(), DispatchClass::Operational))] pub fn assign_temp_parachain_slot( origin: OriginFor, id: ParaId, @@ -290,7 +346,7 @@ pub mod pallet { ); ensure!( - TemporarySlotCount::::get() < T::MaxTemporarySlots::get(), + TemporarySlotCount::::get() < MaxTemporarySlots::::get(), Error::::MaxTemporarySlotsExceeded ); @@ -324,9 +380,12 @@ pub mod pallet { // Treat failed lease creation as warning .. slot will be allocated a lease // in a subsequent lease period by the `allocate_temporary_slot_leases` // function. - log::warn!(target: "assigned_slots", + log::warn!( + target: LOG_TARGET, "Failed to allocate a temp slot for para {:?} at period {:?}: {:?}", - id, current_lease_period, err + id, + current_lease_period, + err ); }, } @@ -340,10 +399,9 @@ pub mod pallet { Ok(()) } - // TODO: Benchmark this /// Unassign a permanent or temporary parachain slot #[pallet::call_index(2)] - #[pallet::weight(((MAXIMUM_BLOCK_WEIGHT / 10) as Weight, DispatchClass::Operational))] + #[pallet::weight((::WeightInfo::unassign_parachain_slot(), DispatchClass::Operational))] pub fn unassign_parachain_slot(origin: OriginFor, id: ParaId) -> DispatchResult { T::AssignSlotOrigin::ensure_origin(origin.clone())?; @@ -377,15 +435,42 @@ pub mod pallet { // Treat failed downgrade as warning .. slot lease has been cleared, // so the parachain will be downgraded anyway by the slots pallet // at the end of the lease period . - log::warn!(target: "assigned_slots", + log::warn!( + target: LOG_TARGET, "Failed to downgrade parachain {:?} at period {:?}: {:?}", - id, Self::current_lease_period_index(), err + id, + Self::current_lease_period_index(), + err ); } } Ok(()) } + + /// Sets the storage value [`MaxPermanentSlots`]. + #[pallet::call_index(3)] + #[pallet::weight((::WeightInfo::set_max_permanent_slots(), DispatchClass::Operational))] + pub fn set_max_permanent_slots(origin: OriginFor, slots: u32) -> DispatchResult { + ensure_root(origin)?; + + >::put(slots); + + Self::deposit_event(Event::::MaxPermanentSlotsChanged { slots }); + Ok(()) + } + + /// Sets the storage value [`MaxTemporarySlots`]. + #[pallet::call_index(4)] + #[pallet::weight((::WeightInfo::set_max_temporary_slots(), DispatchClass::Operational))] + pub fn set_max_temporary_slots(origin: OriginFor, slots: u32) -> DispatchResult { + ensure_root(origin)?; + + >::put(slots); + + Self::deposit_event(Event::::MaxTemporarySlotsChanged { slots }); + Ok(()) + } } } @@ -530,9 +615,11 @@ impl Pallet { // Note: leases that have ended in previous lease period, should have been cleaned in slots // pallet. if let Err(err) = Self::allocate_temporary_slot_leases(lease_period_index) { - log::error!(target: "assigned_slots", + log::error!( + target: LOG_TARGET, "Allocating slots failed for lease period {:?}, with: {:?}", - lease_period_index, err + lease_period_index, + err ); } ::WeightInfo::force_lease() * @@ -673,8 +760,6 @@ mod tests { parameter_types! { pub const PermanentSlotLeasePeriodLength: u32 = 3; pub const TemporarySlotLeasePeriodLength: u32 = 2; - pub const MaxPermanentSlots: u32 = 2; - pub const MaxTemporarySlots: u32 = 6; pub const MaxTemporarySlotPerLeasePeriod: u32 = 2; } @@ -684,9 +769,8 @@ mod tests { type Leaser = Slots; type PermanentSlotLeasePeriodLength = PermanentSlotLeasePeriodLength; type TemporarySlotLeasePeriodLength = TemporarySlotLeasePeriodLength; - type MaxPermanentSlots = MaxPermanentSlots; - type MaxTemporarySlots = MaxTemporarySlots; type MaxTemporarySlotPerLeasePeriod = MaxTemporarySlotPerLeasePeriod; + type WeightInfo = crate::assigned_slots::TestWeightInfo; } // This function basically just builds a genesis storage key/value store according to @@ -698,6 +782,15 @@ mod tests { } .assimilate_storage(&mut t) .unwrap(); + + crate::assigned_slots::GenesisConfig:: { + max_temporary_slots: 6, + max_permanent_slots: 2, + _config: Default::default(), + } + .assimilate_storage(&mut t) + .unwrap(); + t.into() } @@ -1324,4 +1417,47 @@ mod tests { assert_eq!(Slots::already_leased(ParaId::from(1_u32), 0, 1), false); }); } + #[test] + fn set_max_permanent_slots_fails_for_no_root_origin() { + new_test_ext().execute_with(|| { + run_to_block(1); + + assert_noop!( + AssignedSlots::set_max_permanent_slots(RuntimeOrigin::signed(1), 5), + BadOrigin + ); + }); + } + #[test] + fn set_max_permanent_slots_succeeds() { + new_test_ext().execute_with(|| { + run_to_block(1); + + assert_eq!(MaxPermanentSlots::::get(), 2); + assert_ok!(AssignedSlots::set_max_permanent_slots(RuntimeOrigin::root(), 10),); + assert_eq!(MaxPermanentSlots::::get(), 10); + }); + } + + #[test] + fn set_max_temporary_slots_fails_for_no_root_origin() { + new_test_ext().execute_with(|| { + run_to_block(1); + + assert_noop!( + AssignedSlots::set_max_temporary_slots(RuntimeOrigin::signed(1), 5), + BadOrigin + ); + }); + } + #[test] + fn set_max_temporary_slots_succeeds() { + new_test_ext().execute_with(|| { + run_to_block(1); + + assert_eq!(MaxTemporarySlots::::get(), 6); + assert_ok!(AssignedSlots::set_max_temporary_slots(RuntimeOrigin::root(), 12),); + assert_eq!(MaxTemporarySlots::::get(), 12); + }); + } } diff --git a/runtime/rococo/Cargo.toml b/runtime/rococo/Cargo.toml index 41d25d3aa6f6..f1f0d1cbe729 100644 --- a/runtime/rococo/Cargo.toml +++ b/runtime/rococo/Cargo.toml @@ -83,7 +83,7 @@ frame-try-runtime = { git = "https://github.com/paritytech/substrate", branch = frame-system-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } hex-literal = { version = "0.4.1" } -runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false } +runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false, features=["experimental"] } runtime-parachains = { package = "polkadot-runtime-parachains", path = "../parachains", default-features = false } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } polkadot-parachain = { path = "../../parachain", default-features = false } diff --git a/runtime/rococo/src/lib.rs b/runtime/rococo/src/lib.rs index 31b657c3a5fb..d923437a67e5 100644 --- a/runtime/rococo/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -1334,8 +1334,6 @@ impl paras_sudo_wrapper::Config for Runtime {} parameter_types! { pub const PermanentSlotLeasePeriodLength: u32 = 365; pub const TemporarySlotLeasePeriodLength: u32 = 5; - pub const MaxPermanentSlots: u32 = 100; - pub const MaxTemporarySlots: u32 = 100; pub const MaxTemporarySlotPerLeasePeriod: u32 = 5; } @@ -1345,9 +1343,8 @@ impl assigned_slots::Config for Runtime { type Leaser = Slots; type PermanentSlotLeasePeriodLength = PermanentSlotLeasePeriodLength; type TemporarySlotLeasePeriodLength = TemporarySlotLeasePeriodLength; - type MaxPermanentSlots = MaxPermanentSlots; - type MaxTemporarySlots = MaxTemporarySlots; type MaxTemporarySlotPerLeasePeriod = MaxTemporarySlotPerLeasePeriod; + type WeightInfo = weights::runtime_common_assigned_slots::WeightInfo; } impl validator_manager::Config for Runtime { @@ -1471,7 +1468,7 @@ construct_runtime! { MmrLeaf: pallet_beefy_mmr::{Pallet, Storage} = 242, ParasSudoWrapper: paras_sudo_wrapper::{Pallet, Call} = 250, - AssignedSlots: assigned_slots::{Pallet, Call, Storage, Event} = 251, + AssignedSlots: assigned_slots::{Pallet, Call, Storage, Event, Config} = 251, // Validator Manager pallet. ValidatorManager: validator_manager::{Pallet, Call, Storage, Event} = 252, @@ -1526,6 +1523,7 @@ pub mod migrations { pallet_society::migrations::VersionCheckedMigrateToV2, pallet_im_online::migration::v1::Migration, parachains_configuration::migration::v7::MigrateToV7, + assigned_slots::migration::v1::VersionCheckedMigrateToV1, ); } @@ -1572,6 +1570,7 @@ mod benches { // Polkadot // NOTE: Make sure to prefix these with `runtime_common::` so // the that path resolves correctly in the generated file. + [runtime_common::assigned_slots, AssignedSlots] [runtime_common::auctions, Auctions] [runtime_common::crowdloan, Crowdloan] [runtime_common::claims, Claims] diff --git a/runtime/rococo/src/weights/mod.rs b/runtime/rococo/src/weights/mod.rs index 5bc39330e28e..75acfe9a5d64 100644 --- a/runtime/rococo/src/weights/mod.rs +++ b/runtime/rococo/src/weights/mod.rs @@ -42,6 +42,7 @@ pub mod pallet_treasury; pub mod pallet_utility; pub mod pallet_vesting; pub mod pallet_xcm; +pub mod runtime_common_assigned_slots; pub mod runtime_common_auctions; pub mod runtime_common_claims; pub mod runtime_common_crowdloan; diff --git a/runtime/rococo/src/weights/runtime_common_assigned_slots.rs b/runtime/rococo/src/weights/runtime_common_assigned_slots.rs new file mode 100644 index 000000000000..a6beeded4286 --- /dev/null +++ b/runtime/rococo/src/weights/runtime_common_assigned_slots.rs @@ -0,0 +1,151 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `runtime_common::assigned_slots` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=runtime_common::assigned_slots +// --chain=rococo-dev +// --header=./file_header.txt +// --output=./runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `runtime_common::assigned_slots`. +pub struct WeightInfo(PhantomData); +impl runtime_common::assigned_slots::WeightInfo for WeightInfo { + /// Storage: `Registrar::Paras` (r:1 w:1) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AssignedSlots::PermanentSlots` (r:1 w:1) + /// Proof: `AssignedSlots::PermanentSlots` (`max_values`: None, `max_size`: Some(20), added: 2495, mode: `MaxEncodedLen`) + /// Storage: `AssignedSlots::TemporarySlots` (r:1 w:0) + /// Proof: `AssignedSlots::TemporarySlots` (`max_values`: None, `max_size`: Some(61), added: 2536, mode: `MaxEncodedLen`) + /// Storage: `Slots::Leases` (r:1 w:1) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AssignedSlots::PermanentSlotCount` (r:1 w:1) + /// Proof: `AssignedSlots::PermanentSlotCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `AssignedSlots::MaxPermanentSlots` (r:1 w:0) + /// Proof: `AssignedSlots::MaxPermanentSlots` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn assign_perm_parachain_slot() -> Weight { + // Proof Size summary in bytes: + // Measured: `673` + // Estimated: `4138` + // Minimum execution time: 84_646_000 picoseconds. + Weight::from_parts(91_791_000, 0) + .saturating_add(Weight::from_parts(0, 4138)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `Registrar::Paras` (r:1 w:1) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AssignedSlots::PermanentSlots` (r:1 w:0) + /// Proof: `AssignedSlots::PermanentSlots` (`max_values`: None, `max_size`: Some(20), added: 2495, mode: `MaxEncodedLen`) + /// Storage: `AssignedSlots::TemporarySlots` (r:1 w:1) + /// Proof: `AssignedSlots::TemporarySlots` (`max_values`: None, `max_size`: Some(61), added: 2536, mode: `MaxEncodedLen`) + /// Storage: `Slots::Leases` (r:1 w:1) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AssignedSlots::TemporarySlotCount` (r:1 w:1) + /// Proof: `AssignedSlots::TemporarySlotCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `AssignedSlots::MaxTemporarySlots` (r:1 w:0) + /// Proof: `AssignedSlots::MaxTemporarySlots` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `AssignedSlots::ActiveTemporarySlotCount` (r:1 w:1) + /// Proof: `AssignedSlots::ActiveTemporarySlotCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn assign_temp_parachain_slot() -> Weight { + // Proof Size summary in bytes: + // Measured: `673` + // Estimated: `4138` + // Minimum execution time: 68_091_000 picoseconds. + Weight::from_parts(77_310_000, 0) + .saturating_add(Weight::from_parts(0, 4138)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `AssignedSlots::PermanentSlots` (r:1 w:0) + /// Proof: `AssignedSlots::PermanentSlots` (`max_values`: None, `max_size`: Some(20), added: 2495, mode: `MaxEncodedLen`) + /// Storage: `AssignedSlots::TemporarySlots` (r:1 w:1) + /// Proof: `AssignedSlots::TemporarySlots` (`max_values`: None, `max_size`: Some(61), added: 2536, mode: `MaxEncodedLen`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Slots::Leases` (r:1 w:1) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AssignedSlots::TemporarySlotCount` (r:1 w:1) + /// Proof: `AssignedSlots::TemporarySlotCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn unassign_parachain_slot() -> Weight { + // Proof Size summary in bytes: + // Measured: `823` + // Estimated: `4288` + // Minimum execution time: 38_081_000 picoseconds. + Weight::from_parts(40_987_000, 0) + .saturating_add(Weight::from_parts(0, 4288)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AssignedSlots::MaxPermanentSlots` (r:0 w:1) + /// Proof: `AssignedSlots::MaxPermanentSlots` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn set_max_permanent_slots() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_182_000 picoseconds. + Weight::from_parts(7_437_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AssignedSlots::MaxTemporarySlots` (r:0 w:1) + /// Proof: `AssignedSlots::MaxTemporarySlots` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn set_max_temporary_slots() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_153_000 picoseconds. + Weight::from_parts(7_456_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/runtime/westend/Cargo.toml b/runtime/westend/Cargo.toml index 4773176e1762..e665a08b1ed1 100644 --- a/runtime/westend/Cargo.toml +++ b/runtime/westend/Cargo.toml @@ -89,7 +89,7 @@ pallet-offences-benchmarking = { git = "https://github.com/paritytech/substrate" pallet-session-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } hex-literal = { version = "0.4.1", optional = true } -runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false } +runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false, features=["experimental"] } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } polkadot-parachain = { path = "../../parachain", default-features = false } runtime-parachains = { package = "polkadot-runtime-parachains", path = "../parachains", default-features = false } diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index 9bb5a6db613d..9c322d6b8436 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -1007,8 +1007,6 @@ impl paras_sudo_wrapper::Config for Runtime {} parameter_types! { pub const PermanentSlotLeasePeriodLength: u32 = 26; pub const TemporarySlotLeasePeriodLength: u32 = 1; - pub const MaxPermanentSlots: u32 = 5; - pub const MaxTemporarySlots: u32 = 20; pub const MaxTemporarySlotPerLeasePeriod: u32 = 5; } @@ -1018,9 +1016,8 @@ impl assigned_slots::Config for Runtime { type Leaser = Slots; type PermanentSlotLeasePeriodLength = PermanentSlotLeasePeriodLength; type TemporarySlotLeasePeriodLength = TemporarySlotLeasePeriodLength; - type MaxPermanentSlots = MaxPermanentSlots; - type MaxTemporarySlots = MaxTemporarySlots; type MaxTemporarySlotPerLeasePeriod = MaxTemporarySlotPerLeasePeriod; + type WeightInfo = weights::runtime_common_assigned_slots::WeightInfo; } impl parachains_disputes::Config for Runtime { @@ -1231,7 +1228,7 @@ construct_runtime! { ParasSudoWrapper: paras_sudo_wrapper::{Pallet, Call} = 62, Auctions: auctions::{Pallet, Call, Storage, Event} = 63, Crowdloan: crowdloan::{Pallet, Call, Storage, Event} = 64, - AssignedSlots: assigned_slots::{Pallet, Call, Storage, Event} = 65, + AssignedSlots: assigned_slots::{Pallet, Call, Storage, Event, Config} = 65, // Pallet for sending XCM. XcmPallet: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 99, @@ -1285,6 +1282,7 @@ pub mod migrations { pub type Unreleased = ( pallet_im_online::migration::v1::Migration, parachains_configuration::migration::v7::MigrateToV7, + assigned_slots::migration::v1::VersionCheckedMigrateToV1, ); } @@ -1309,6 +1307,7 @@ mod benches { // Polkadot // NOTE: Make sure to prefix these with `runtime_common::` so // the that path resolves correctly in the generated file. + [runtime_common::assigned_slots, AssignedSlots] [runtime_common::auctions, Auctions] [runtime_common::crowdloan, Crowdloan] [runtime_common::paras_registrar, Registrar] diff --git a/runtime/westend/src/weights/mod.rs b/runtime/westend/src/weights/mod.rs index 6341b3da8b69..531de5527de5 100644 --- a/runtime/westend/src/weights/mod.rs +++ b/runtime/westend/src/weights/mod.rs @@ -37,6 +37,7 @@ pub mod pallet_timestamp; pub mod pallet_utility; pub mod pallet_vesting; pub mod pallet_xcm; +pub mod runtime_common_assigned_slots; pub mod runtime_common_auctions; pub mod runtime_common_crowdloan; pub mod runtime_common_paras_registrar; diff --git a/runtime/westend/src/weights/runtime_common_assigned_slots.rs b/runtime/westend/src/weights/runtime_common_assigned_slots.rs new file mode 100644 index 000000000000..c3f1060a9ac0 --- /dev/null +++ b/runtime/westend/src/weights/runtime_common_assigned_slots.rs @@ -0,0 +1,151 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `runtime_common::assigned_slots` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=runtime_common::assigned_slots +// --chain=westend-dev +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `runtime_common::assigned_slots`. +pub struct WeightInfo(PhantomData); +impl runtime_common::assigned_slots::WeightInfo for WeightInfo { + /// Storage: `Registrar::Paras` (r:1 w:1) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AssignedSlots::PermanentSlots` (r:1 w:1) + /// Proof: `AssignedSlots::PermanentSlots` (`max_values`: None, `max_size`: Some(20), added: 2495, mode: `MaxEncodedLen`) + /// Storage: `AssignedSlots::TemporarySlots` (r:1 w:0) + /// Proof: `AssignedSlots::TemporarySlots` (`max_values`: None, `max_size`: Some(61), added: 2536, mode: `MaxEncodedLen`) + /// Storage: `Slots::Leases` (r:1 w:1) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AssignedSlots::PermanentSlotCount` (r:1 w:1) + /// Proof: `AssignedSlots::PermanentSlotCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `AssignedSlots::MaxPermanentSlots` (r:1 w:0) + /// Proof: `AssignedSlots::MaxPermanentSlots` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn assign_perm_parachain_slot() -> Weight { + // Proof Size summary in bytes: + // Measured: `640` + // Estimated: `4105` + // Minimum execution time: 74_788_000 picoseconds. + Weight::from_parts(79_847_000, 0) + .saturating_add(Weight::from_parts(0, 4105)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `Registrar::Paras` (r:1 w:1) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AssignedSlots::PermanentSlots` (r:1 w:0) + /// Proof: `AssignedSlots::PermanentSlots` (`max_values`: None, `max_size`: Some(20), added: 2495, mode: `MaxEncodedLen`) + /// Storage: `AssignedSlots::TemporarySlots` (r:1 w:1) + /// Proof: `AssignedSlots::TemporarySlots` (`max_values`: None, `max_size`: Some(61), added: 2536, mode: `MaxEncodedLen`) + /// Storage: `Slots::Leases` (r:1 w:1) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AssignedSlots::TemporarySlotCount` (r:1 w:1) + /// Proof: `AssignedSlots::TemporarySlotCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `AssignedSlots::MaxTemporarySlots` (r:1 w:0) + /// Proof: `AssignedSlots::MaxTemporarySlots` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `AssignedSlots::ActiveTemporarySlotCount` (r:1 w:1) + /// Proof: `AssignedSlots::ActiveTemporarySlotCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn assign_temp_parachain_slot() -> Weight { + // Proof Size summary in bytes: + // Measured: `640` + // Estimated: `4105` + // Minimum execution time: 73_324_000 picoseconds. + Weight::from_parts(77_993_000, 0) + .saturating_add(Weight::from_parts(0, 4105)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `AssignedSlots::PermanentSlots` (r:1 w:0) + /// Proof: `AssignedSlots::PermanentSlots` (`max_values`: None, `max_size`: Some(20), added: 2495, mode: `MaxEncodedLen`) + /// Storage: `AssignedSlots::TemporarySlots` (r:1 w:1) + /// Proof: `AssignedSlots::TemporarySlots` (`max_values`: None, `max_size`: Some(61), added: 2536, mode: `MaxEncodedLen`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Slots::Leases` (r:1 w:1) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AssignedSlots::TemporarySlotCount` (r:1 w:1) + /// Proof: `AssignedSlots::TemporarySlotCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn unassign_parachain_slot() -> Weight { + // Proof Size summary in bytes: + // Measured: `592` + // Estimated: `4057` + // Minimum execution time: 32_796_000 picoseconds. + Weight::from_parts(35_365_000, 0) + .saturating_add(Weight::from_parts(0, 4057)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AssignedSlots::MaxPermanentSlots` (r:0 w:1) + /// Proof: `AssignedSlots::MaxPermanentSlots` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn set_max_permanent_slots() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_104_000 picoseconds. + Weight::from_parts(7_358_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AssignedSlots::MaxTemporarySlots` (r:0 w:1) + /// Proof: `AssignedSlots::MaxTemporarySlots` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn set_max_temporary_slots() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_097_000 picoseconds. + Weight::from_parts(7_429_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} From 964330d6b50179b77204a7810e841f0b27fc47a0 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Tue, 15 Aug 2023 19:06:07 +0300 Subject: [PATCH 18/27] Revert "companion for 14754: cli: move no-beefy flag to sc-cli (#7600)" (#7613) * Revert "companion for 14754: cli: move no-beefy flag to sc-cli (#7600)" This reverts commit 8f05479e4bd61341af69f0721e617f01cbad8bb2. * update lockfile for {"substrate"} * fix merge damage --------- Co-authored-by: parity-processbot <> --- Cargo.lock | 372 +++++++++--------- cli/src/cli.rs | 5 + cli/src/command.rs | 11 +- node/service/src/lib.rs | 3 +- node/test/service/src/lib.rs | 2 +- .../adder/collator/src/main.rs | 2 +- .../undying/collator/src/main.rs | 2 +- 7 files changed, 204 insertions(+), 193 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 85b0bf1cfa57..4d37230ea10f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -617,7 +617,7 @@ dependencies = [ [[package]] name = "binary-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "hash-db", "log", @@ -2446,7 +2446,7 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "parity-scale-codec", ] @@ -2469,7 +2469,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-support", "frame-support-procedural", @@ -2494,7 +2494,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "Inflector", "array-bytes", @@ -2542,7 +2542,7 @@ dependencies = [ [[package]] name = "frame-election-provider-solution-type" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2553,7 +2553,7 @@ dependencies = [ [[package]] name = "frame-election-provider-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-election-provider-solution-type", "frame-support", @@ -2570,7 +2570,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-support", "frame-system", @@ -2599,7 +2599,7 @@ dependencies = [ [[package]] name = "frame-remote-externalities" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "async-recursion", "futures", @@ -2620,7 +2620,7 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "aquamarine", "bitflags", @@ -2657,7 +2657,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "Inflector", "cfg-expr", @@ -2675,7 +2675,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", @@ -2687,7 +2687,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "proc-macro2", "quote", @@ -2697,7 +2697,7 @@ dependencies = [ [[package]] name = "frame-support-test" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-executive", @@ -2724,7 +2724,7 @@ dependencies = [ [[package]] name = "frame-support-test-pallet" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-support", "frame-system", @@ -2737,7 +2737,7 @@ dependencies = [ [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "cfg-if", "frame-support", @@ -2756,7 +2756,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -2771,7 +2771,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "parity-scale-codec", "sp-api", @@ -2780,7 +2780,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-support", "parity-scale-codec", @@ -2962,7 +2962,7 @@ dependencies = [ [[package]] name = "generate-bags" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "chrono", "frame-election-provider-support", @@ -4829,7 +4829,7 @@ dependencies = [ [[package]] name = "mmr-gadget" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "futures", "log", @@ -4848,7 +4848,7 @@ dependencies = [ [[package]] name = "mmr-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "anyhow", "jsonrpsee", @@ -5374,7 +5374,7 @@ dependencies = [ [[package]] name = "pallet-assets" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5389,7 +5389,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-support", "frame-system", @@ -5405,7 +5405,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-support", "frame-system", @@ -5419,7 +5419,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5443,8 +5443,10 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ + "aquamarine", + "docify", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -5463,7 +5465,7 @@ dependencies = [ [[package]] name = "pallet-bags-list-remote-tests" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-election-provider-support", "frame-remote-externalities", @@ -5482,7 +5484,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5497,7 +5499,7 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-support", "frame-system", @@ -5516,7 +5518,7 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "array-bytes", "binary-merkle-tree", @@ -5540,7 +5542,7 @@ dependencies = [ [[package]] name = "pallet-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5558,7 +5560,7 @@ dependencies = [ [[package]] name = "pallet-child-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5577,7 +5579,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5594,7 +5596,7 @@ dependencies = [ [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "assert_matches", "frame-benchmarking", @@ -5611,7 +5613,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5629,7 +5631,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5652,7 +5654,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-support-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5665,7 +5667,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5684,7 +5686,7 @@ dependencies = [ [[package]] name = "pallet-fast-unstake" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "docify", "frame-benchmarking", @@ -5703,7 +5705,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5726,7 +5728,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "enumflags2", "frame-benchmarking", @@ -5742,7 +5744,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5762,7 +5764,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5779,7 +5781,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5796,7 +5798,7 @@ dependencies = [ [[package]] name = "pallet-message-queue" version = "7.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5815,7 +5817,7 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5832,7 +5834,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5848,7 +5850,7 @@ dependencies = [ [[package]] name = "pallet-nis" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5864,7 +5866,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-support", "frame-system", @@ -5883,7 +5885,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-benchmarking" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5903,7 +5905,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-runtime-api" version = "1.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "pallet-nomination-pools", "parity-scale-codec", @@ -5914,7 +5916,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-support", "frame-system", @@ -5931,7 +5933,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5955,7 +5957,7 @@ dependencies = [ [[package]] name = "pallet-preimage" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5972,7 +5974,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5987,7 +5989,7 @@ dependencies = [ [[package]] name = "pallet-ranked-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6005,7 +6007,7 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6020,7 +6022,7 @@ dependencies = [ [[package]] name = "pallet-referenda" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "assert_matches", "frame-benchmarking", @@ -6039,8 +6041,9 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -6056,7 +6059,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-support", "frame-system", @@ -6077,7 +6080,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6093,7 +6096,7 @@ dependencies = [ [[package]] name = "pallet-society" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6111,7 +6114,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6134,7 +6137,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -6145,7 +6148,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-fn" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "log", "sp-arithmetic", @@ -6154,7 +6157,7 @@ dependencies = [ [[package]] name = "pallet-staking-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "parity-scale-codec", "sp-api", @@ -6163,7 +6166,7 @@ dependencies = [ [[package]] name = "pallet-state-trie-migration" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6180,7 +6183,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6195,7 +6198,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6213,7 +6216,7 @@ dependencies = [ [[package]] name = "pallet-tips" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6232,7 +6235,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-support", "frame-system", @@ -6248,7 +6251,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -6264,7 +6267,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -6276,7 +6279,7 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6293,7 +6296,7 @@ dependencies = [ [[package]] name = "pallet-uniques" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6308,7 +6311,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6324,7 +6327,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6339,7 +6342,7 @@ dependencies = [ [[package]] name = "pallet-whitelist" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-benchmarking", "frame-support", @@ -9361,7 +9364,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "log", "sp-core", @@ -9372,7 +9375,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "async-trait", "futures", @@ -9400,7 +9403,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "futures", "futures-timer", @@ -9423,7 +9426,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -9438,7 +9441,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "memmap2", "sc-chain-spec-derive", @@ -9457,7 +9460,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -9468,7 +9471,7 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "array-bytes", "chrono", @@ -9507,7 +9510,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "fnv", "futures", @@ -9533,7 +9536,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "hash-db", "kvdb", @@ -9559,7 +9562,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "async-trait", "futures", @@ -9584,7 +9587,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "async-trait", "fork-tree", @@ -9620,7 +9623,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "futures", "jsonrpsee", @@ -9642,7 +9645,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "array-bytes", "async-channel", @@ -9676,7 +9679,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "futures", "jsonrpsee", @@ -9695,7 +9698,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "fork-tree", "parity-scale-codec", @@ -9708,7 +9711,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "ahash 0.8.2", "array-bytes", @@ -9749,7 +9752,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "finality-grandpa", "futures", @@ -9769,7 +9772,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "async-trait", "futures", @@ -9792,7 +9795,7 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -9814,7 +9817,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "sc-allocator", "sp-maybe-compressed-blob", @@ -9826,7 +9829,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "anyhow", "cfg-if", @@ -9843,7 +9846,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "ansi_term", "futures", @@ -9859,7 +9862,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "array-bytes", "parking_lot 0.12.1", @@ -9873,7 +9876,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "array-bytes", "async-channel", @@ -9916,7 +9919,7 @@ dependencies = [ [[package]] name = "sc-network-bitswap" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "async-channel", "cid", @@ -9936,7 +9939,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "async-trait", "bitflags", @@ -9953,7 +9956,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "ahash 0.8.2", "futures", @@ -9972,7 +9975,7 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "array-bytes", "async-channel", @@ -9993,7 +9996,7 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "array-bytes", "async-channel", @@ -10027,7 +10030,7 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "array-bytes", "futures", @@ -10045,7 +10048,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "array-bytes", "bytes", @@ -10079,7 +10082,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -10088,7 +10091,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "futures", "jsonrpsee", @@ -10119,7 +10122,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -10138,7 +10141,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "http", "jsonrpsee", @@ -10153,7 +10156,7 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "array-bytes", "futures", @@ -10174,13 +10177,14 @@ dependencies = [ "sp-runtime", "sp-version", "thiserror", + "tokio", "tokio-stream", ] [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "async-trait", "directories", @@ -10244,7 +10248,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "log", "parity-scale-codec", @@ -10255,7 +10259,7 @@ dependencies = [ [[package]] name = "sc-storage-monitor" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "clap 4.2.5", "fs4", @@ -10269,7 +10273,7 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -10288,7 +10292,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "6.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "futures", "libc", @@ -10307,7 +10311,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "chrono", "futures", @@ -10326,7 +10330,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "ansi_term", "atty", @@ -10355,7 +10359,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -10366,7 +10370,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "async-trait", "futures", @@ -10392,7 +10396,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "async-trait", "futures", @@ -10408,7 +10412,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "async-channel", "futures", @@ -10956,7 +10960,7 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "hash-db", "log", @@ -10977,7 +10981,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "Inflector", "blake2", @@ -10991,7 +10995,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "parity-scale-codec", "scale-info", @@ -11004,7 +11008,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "16.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "integer-sqrt", "num-traits", @@ -11018,7 +11022,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "parity-scale-codec", "scale-info", @@ -11031,7 +11035,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "sp-api", "sp-inherents", @@ -11042,7 +11046,7 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "futures", "log", @@ -11060,7 +11064,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "async-trait", "futures", @@ -11075,7 +11079,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "async-trait", "parity-scale-codec", @@ -11092,7 +11096,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "async-trait", "parity-scale-codec", @@ -11111,7 +11115,7 @@ dependencies = [ [[package]] name = "sp-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "lazy_static", "parity-scale-codec", @@ -11130,7 +11134,7 @@ dependencies = [ [[package]] name = "sp-consensus-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "finality-grandpa", "log", @@ -11148,7 +11152,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "parity-scale-codec", "scale-info", @@ -11160,7 +11164,7 @@ dependencies = [ [[package]] name = "sp-core" version = "21.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "array-bytes", "arrayvec 0.7.4", @@ -11207,7 +11211,7 @@ dependencies = [ [[package]] name = "sp-core-hashing" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "blake2b_simd", "byteorder", @@ -11220,7 +11224,7 @@ dependencies = [ [[package]] name = "sp-core-hashing-proc-macro" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "quote", "sp-core-hashing", @@ -11230,7 +11234,7 @@ dependencies = [ [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "kvdb", "parking_lot 0.12.1", @@ -11239,7 +11243,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "proc-macro2", "quote", @@ -11249,7 +11253,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.19.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "environmental", "parity-scale-codec", @@ -11260,7 +11264,7 @@ dependencies = [ [[package]] name = "sp-genesis-builder" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "serde_json", "sp-api", @@ -11271,7 +11275,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -11285,7 +11289,7 @@ dependencies = [ [[package]] name = "sp-io" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "bytes", "ed25519", @@ -11310,7 +11314,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "lazy_static", "sp-core", @@ -11321,7 +11325,7 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.27.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -11333,7 +11337,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "thiserror", "zstd 0.12.3+zstd.1.5.2", @@ -11342,7 +11346,7 @@ dependencies = [ [[package]] name = "sp-metadata-ir" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-metadata", "parity-scale-codec", @@ -11353,7 +11357,7 @@ dependencies = [ [[package]] name = "sp-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "ckb-merkle-mountain-range", "log", @@ -11371,7 +11375,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "parity-scale-codec", "scale-info", @@ -11385,7 +11389,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "sp-api", "sp-core", @@ -11395,7 +11399,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "backtrace", "lazy_static", @@ -11405,7 +11409,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "rustc-hash", "serde", @@ -11415,7 +11419,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "either", "hash256-std-hasher", @@ -11437,7 +11441,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "17.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -11455,7 +11459,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "Inflector", "proc-macro-crate", @@ -11467,7 +11471,7 @@ dependencies = [ [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "parity-scale-codec", "scale-info", @@ -11482,7 +11486,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -11496,7 +11500,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.28.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "hash-db", "log", @@ -11517,7 +11521,7 @@ dependencies = [ [[package]] name = "sp-statement-store" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "aes-gcm 0.10.2", "curve25519-dalek 3.2.0", @@ -11541,12 +11545,12 @@ dependencies = [ [[package]] name = "sp-std" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" [[package]] name = "sp-storage" version = "13.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "impl-serde", "parity-scale-codec", @@ -11559,7 +11563,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "async-trait", "parity-scale-codec", @@ -11572,7 +11576,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "10.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "parity-scale-codec", "sp-std", @@ -11584,7 +11588,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "sp-api", "sp-runtime", @@ -11593,7 +11597,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "async-trait", "parity-scale-codec", @@ -11608,7 +11612,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "ahash 0.8.2", "hash-db", @@ -11631,7 +11635,7 @@ dependencies = [ [[package]] name = "sp-version" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "impl-serde", "parity-scale-codec", @@ -11648,7 +11652,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "parity-scale-codec", "proc-macro2", @@ -11659,7 +11663,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "14.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -11672,7 +11676,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "20.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "parity-scale-codec", "scale-info", @@ -11897,12 +11901,12 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "frame-system-rpc-runtime-api", "futures", @@ -11921,7 +11925,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "hyper", "log", @@ -11933,7 +11937,7 @@ dependencies = [ [[package]] name = "substrate-rpc-client" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "async-trait", "jsonrpsee", @@ -11946,7 +11950,7 @@ dependencies = [ [[package]] name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -11963,7 +11967,7 @@ dependencies = [ [[package]] name = "substrate-test-client" version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "array-bytes", "async-trait", @@ -11989,7 +11993,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "futures", "substrate-test-utils-derive", @@ -11999,7 +12003,7 @@ dependencies = [ [[package]] name = "substrate-test-utils-derive" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -12010,7 +12014,7 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "ansi_term", "build-helper", @@ -12882,7 +12886,7 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "try-runtime-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#2060c366fb1402deab188911551a43c3c41b36c0" +source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" dependencies = [ "async-trait", "clap 4.2.5", diff --git a/cli/src/cli.rs b/cli/src/cli.rs index c13340d91a04..696d381962b6 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -99,6 +99,11 @@ pub struct RunCmd { #[arg(long = "grandpa-pause", num_args = 2)] pub grandpa_pause: Vec, + /// Disable the BEEFY gadget + /// (currently enabled by default on Rococo, Wococo and Versi). + #[arg(long)] + pub no_beefy: bool, + /// Add the destination address to the jaeger agent. /// /// Must be valid socket address, of format `IP:Port` diff --git a/cli/src/command.rs b/cli/src/command.rs index dcffa09aaf91..dd76ed558695 100644 --- a/cli/src/command.rs +++ b/cli/src/command.rs @@ -235,11 +235,15 @@ fn run_node_inner( where F: FnOnce(&mut sc_cli::LoggerBuilder, &sc_service::Configuration), { - let mut runner = cli + let runner = cli .create_runner_with_logger_hook::(&cli.run.base, logger_hook) .map_err(Error::from)?; let chain_spec = &runner.config().chain_spec; + // By default, enable BEEFY on test networks. + let enable_beefy = (chain_spec.is_rococo() || chain_spec.is_wococo() || chain_spec.is_versi()) && + !cli.run.no_beefy; + set_default_ss58_version(chain_spec); let grandpa_pause = if cli.run.grandpa_pause.is_empty() { @@ -255,10 +259,6 @@ where info!(" KUSAMA FOUNDATION "); info!("----------------------------"); } - // BEEFY allowed only on test networks. - if !(chain_spec.is_rococo() || chain_spec.is_wococo() || chain_spec.is_versi()) { - runner.config_mut().disable_beefy = true; - } let jaeger_agent = if let Some(ref jaeger_agent) = cli.run.jaeger_agent { Some( @@ -289,6 +289,7 @@ where service::NewFullParams { is_parachain_node: service::IsParachainNode::No, grandpa_pause, + enable_beefy, jaeger_agent, telemetry_worker_handle: None, node_version, diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index dab69473c6ba..3a850c46279a 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -629,6 +629,7 @@ where pub struct NewFullParams { pub is_parachain_node: IsParachainNode, pub grandpa_pause: Option<(u32, u32)>, + pub enable_beefy: bool, pub jaeger_agent: Option, pub telemetry_worker_handle: Option, /// The version of the node. TESTING ONLY: `None` can be passed to skip the node/worker version @@ -719,6 +720,7 @@ pub fn new_full( NewFullParams { is_parachain_node, grandpa_pause, + enable_beefy, jaeger_agent, telemetry_worker_handle, node_version, @@ -752,7 +754,6 @@ pub fn new_full( Some(backoff) }; - let enable_beefy = !config.disable_beefy; // If not on a known test network, warn the user that BEEFY is still experimental. if enable_beefy && !config.chain_spec.is_rococo() && diff --git a/node/test/service/src/lib.rs b/node/test/service/src/lib.rs index 932e95a7cab6..4fc3f82eb4a9 100644 --- a/node/test/service/src/lib.rs +++ b/node/test/service/src/lib.rs @@ -81,6 +81,7 @@ pub fn new_full( polkadot_service::NewFullParams { is_parachain_node, grandpa_pause: None, + enable_beefy: true, jaeger_agent: None, telemetry_worker_handle: None, node_version: None, @@ -186,7 +187,6 @@ pub fn node_config( offchain_worker: Default::default(), force_authoring: false, disable_grandpa: false, - disable_beefy: false, dev_key_seed: Some(key_seed), tracing_targets: None, tracing_receiver: Default::default(), diff --git a/parachain/test-parachains/adder/collator/src/main.rs b/parachain/test-parachains/adder/collator/src/main.rs index f9efa9c68ad3..de1b37b50dab 100644 --- a/parachain/test-parachains/adder/collator/src/main.rs +++ b/parachain/test-parachains/adder/collator/src/main.rs @@ -57,7 +57,6 @@ fn main() -> Result<()> { runner.run_node_until_exit(|mut config| async move { let collator = Collator::new(); - config.disable_beefy = true; // Zombienet is spawning all collators currently with the same CLI, this means it // sets `--validator` and this is wrong here. config.role = Role::Full; @@ -68,6 +67,7 @@ fn main() -> Result<()> { collator.collator_key(), ), grandpa_pause: None, + enable_beefy: false, jaeger_agent: None, telemetry_worker_handle: None, diff --git a/parachain/test-parachains/undying/collator/src/main.rs b/parachain/test-parachains/undying/collator/src/main.rs index 8eadc233ae78..79420dbbc9d5 100644 --- a/parachain/test-parachains/undying/collator/src/main.rs +++ b/parachain/test-parachains/undying/collator/src/main.rs @@ -57,7 +57,6 @@ fn main() -> Result<()> { runner.run_node_until_exit(|mut config| async move { let collator = Collator::new(cli.run.pov_size, cli.run.pvf_complexity); - config.disable_beefy = true; // Zombienet is spawning all collators currently with the same CLI, this means it // sets `--validator` and this is wrong here. config.role = Role::Full; @@ -68,6 +67,7 @@ fn main() -> Result<()> { collator.collator_key(), ), grandpa_pause: None, + enable_beefy: false, jaeger_agent: None, telemetry_worker_handle: None, From a9f56c0d6cefbe0b1e5d529c6436f2eace1540ab Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 16 Aug 2023 07:37:41 +0000 Subject: [PATCH 19/27] Bump actions/setup-node from 3.7.0 to 3.8.0 (#7622) Bumps [actions/setup-node](https://github.com/actions/setup-node) from 3.7.0 to 3.8.0. - [Release notes](https://github.com/actions/setup-node/releases) - [Commits](https://github.com/actions/setup-node/compare/v3.7.0...v3.8.0) --- updated-dependencies: - dependency-name: actions/setup-node dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/check-licenses.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/check-licenses.yml b/.github/workflows/check-licenses.yml index 522037b6827c..b61005649eec 100644 --- a/.github/workflows/check-licenses.yml +++ b/.github/workflows/check-licenses.yml @@ -9,7 +9,7 @@ jobs: steps: - name: Checkout sources uses: actions/checkout@v3 - - uses: actions/setup-node@v3.7.0 + - uses: actions/setup-node@v3.8.0 with: node-version: '18.x' registry-url: 'https://npm.pkg.github.com' From 39397a5a82ff6ab77ce39e7af9113c7c9cbd4c1e Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Wed, 16 Aug 2023 14:39:40 +0300 Subject: [PATCH 20/27] Revert "[Substrate companion] update libp2p to 0.52.0 (#7472)" (#7583) * Revert "[Substrate companion] update libp2p to 0.52.0 (#7472)" This reverts commit 01fd49a7fafa01f133e2dec538a2ef7c697a26aa. * Update dependencies * update lockfile for {"substrate"} --------- Co-authored-by: parity-processbot <> --- Cargo.lock | 3873 +++++++++++------ node/core/approval-voting/Cargo.toml | 2 +- node/core/runtime-api/Cargo.toml | 2 +- node/gum/proc-macro/Cargo.toml | 2 +- .../network/bridge/src/validator_discovery.rs | 4 +- node/network/gossip-support/src/tests.rs | 6 +- node/overseer/Cargo.toml | 2 +- node/service/Cargo.toml | 2 +- node/subsystem-types/Cargo.toml | 2 +- runtime/kusama/Cargo.toml | 2 +- runtime/kusama/constants/Cargo.toml | 2 +- runtime/polkadot/Cargo.toml | 2 +- runtime/polkadot/constants/Cargo.toml | 2 +- runtime/rococo/Cargo.toml | 2 +- runtime/rococo/constants/Cargo.toml | 2 +- runtime/test-runtime/Cargo.toml | 2 +- runtime/test-runtime/constants/Cargo.toml | 2 +- runtime/westend/Cargo.toml | 2 +- runtime/westend/constants/Cargo.toml | 2 +- xcm/procedural/Cargo.toml | 2 +- 20 files changed, 2448 insertions(+), 1469 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4d37230ea10f..50e6128a7f0a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21,6 +21,15 @@ dependencies = [ "gimli", ] +[[package]] +name = "addr2line" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" +dependencies = [ + "gimli", +] + [[package]] name = "adler" version = "1.0.2" @@ -33,13 +42,23 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" +[[package]] +name = "aead" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" +dependencies = [ + "generic-array 0.14.7", +] + [[package]] name = "aead" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", + "rand_core 0.6.4", ] [[package]] @@ -49,7 +68,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ "crypto-common", - "generic-array 0.14.6", + "generic-array 0.14.7", +] + +[[package]] +name = "aes" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "884391ef1066acaa41e766ba8f596341b96e93ce34f9a43e7d24bf0a0eaf0561" +dependencies = [ + "aes-soft", + "aesni", + "cipher 0.2.5", ] [[package]] @@ -103,34 +133,54 @@ dependencies = [ "subtle", ] +[[package]] +name = "aes-soft" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be14c7498ea50828a38d0e24a765ed2effe92a705885b57d029cd67d45744072" +dependencies = [ + "cipher 0.2.5", + "opaque-debug 0.3.0", +] + +[[package]] +name = "aesni" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea2e11f5e94c2f7d386164cc2aa1f97823fed6f259e486940a71c174dd01b0ce" +dependencies = [ + "cipher 0.2.5", + "opaque-debug 0.3.0", +] + [[package]] name = "ahash" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.10", "once_cell", "version_check", ] [[package]] name = "ahash" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf6ccdb167abbf410dcb915cabd428929d7f6a04980b54a11f26a39f1c7f7107" +checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" dependencies = [ "cfg-if", - "getrandom 0.2.8", + "getrandom 0.2.10", "once_cell", "version_check", ] [[package]] name = "aho-corasick" -version = "0.7.18" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" dependencies = [ "memchr", ] @@ -143,9 +193,24 @@ checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" [[package]] name = "always-assert" -version = "0.1.2" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4436e0292ab1bb631b42973c61205e704475fe8126af845c8d923c0996328127" + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbf688625d06217d5b1bb0ea9d9c44a1635fd0ee3534466388d18203174f4d11" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] [[package]] name = "anes" @@ -179,15 +244,15 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41ed9a86bf92ae6580e0a31281f65a1b1d867c0cc68d5346e2ae128dddfa6a7d" +checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" [[package]] name = "anstyle-parse" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e765fd216e48e067936442276d1d57399e37bce53c264d6fefbe298080cb57ee" +checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" dependencies = [ "utf8parse", ] @@ -213,15 +278,15 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.69" +version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" +checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854" [[package]] name = "approx" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "072df7202e63b127ab55acfe16ce97013d5b97bf160489336d3f1840fd78e99e" +checksum = "cab112f0a86d568ea0e627cc1d6be74a1e9cd55214684db5561995f6dad897c6" dependencies = [ "num-traits", ] @@ -242,9 +307,15 @@ dependencies = [ [[package]] name = "arbitrary" -version = "1.2.0" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2d098ff73c1ca148721f37baad5ea6a465a13f9573aba8641fbbbae8164a54e" + +[[package]] +name = "arc-swap" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29d47fbf90d5149a107494b15a7dc8d69b351be2db3bb9691740e88ec17fd880" +checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" [[package]] name = "ark-bls12-381" @@ -412,9 +483,9 @@ checksum = "d9b1c5a481ec30a5abd8dfbd94ab5cf1bb4e9a66be7f1b3b322f2f1170c200fd" [[package]] name = "arrayref" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" @@ -428,15 +499,83 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +[[package]] +name = "asn1-rs" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30ff05a702273012438132f449575dbc804e27b2f3cbe3069aa237d26c98fa33" +dependencies = [ + "asn1-rs-derive 0.1.0", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror", + "time 0.3.25", +] + +[[package]] +name = "asn1-rs" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" +dependencies = [ + "asn1-rs-derive 0.4.0", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror", + "time 0.3.25", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db8b7511298d5b7784b40b092d9e9dcd3a627a5707e4b5e507931ab0d44eeebf" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", + "synstructure", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", + "synstructure", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "assert_cmd" -version = "2.0.4" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ae1ddd39efd67689deb1979d80bad3bf7f2b09c6e6117c8d1f2443b5e2f83e" +checksum = "88903cb14723e4d4003335bb7f8a14f27691649105346a0f0957466c096adfe6" dependencies = [ + "anstyle", "bstr", "doc-comment", - "predicates", + "predicates 3.0.3", "predicates-core", "predicates-tree", "wait-timeout", @@ -450,39 +589,40 @@ checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] name = "async-channel" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf46fee83e5ccffc220104713af3292ff9bc7c64c7de289f66dae8e38d826833" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" dependencies = [ - "concurrent-queue 2.1.0", + "concurrent-queue", "event-listener", "futures-core", ] [[package]] name = "async-io" -version = "1.6.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a811e6a479f2439f0c04038796b5cfb3d2ad56c230e0f2d3f7b04d68cfee607b" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ - "concurrent-queue 1.2.2", + "async-lock", + "autocfg", + "cfg-if", + "concurrent-queue", "futures-lite", - "libc", "log", - "once_cell", "parking", "polling", + "rustix 0.37.23", "slab", "socket2 0.4.9", "waker-fn", - "winapi", ] [[package]] name = "async-lock" -version = "2.4.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6a8ea61bf9947a1007c5cada31e647dbc77b103c679858150003ba697ea798b" +checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" dependencies = [ "event-listener", ] @@ -495,33 +635,39 @@ checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" dependencies = [ "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] name = "async-trait" -version = "0.1.68" +version = "0.1.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" +checksum = "cc6dde6e4ed435a4c1ee4e73592f5ba9da2151af10076cc04858746af9352d09" dependencies = [ "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] name = "asynchronous-codec" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06a0daa378f5fd10634e44b0a29b2a87b890657658e072a30d6f26e57ddee182" +checksum = "4057f2c32adbb2fc158e22fb38433c8e9bbf76b75a4732c7c0cbaf695fb65568" dependencies = [ "bytes", "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", ] +[[package]] +name = "atomic-waker" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" + [[package]] name = "atty" version = "0.2.14" @@ -541,16 +687,16 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "backtrace" -version = "0.3.67" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" +checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" dependencies = [ - "addr2line", + "addr2line 0.20.0", "cc", "cfg-if", "libc", "miniz_oxide", - "object", + "object 0.31.1", "rustc-demangle", ] @@ -577,9 +723,15 @@ dependencies = [ [[package]] name = "base-x" -version = "0.2.8" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" + +[[package]] +name = "base16ct" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" +checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" [[package]] name = "base16ct" @@ -589,27 +741,36 @@ checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" [[package]] name = "base64" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.0" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" +checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "base64ct" -version = "1.5.2" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + +[[package]] +name = "basic-toml" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea2b2456fd614d856680dcd9fcc660a51a820fa09daef2e49772b56a193c8474" +checksum = "7bfc506e7a2370ec239e1d072507b2a80c833083699d3c6fa176fbb4de8448c6" +dependencies = [ + "serde", +] [[package]] name = "beef" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bed554bd50246729a1ec158d08aa3235d1b69d94ad120ebe187e28894787e736" +checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1" dependencies = [ "serde", ] @@ -617,7 +778,7 @@ dependencies = [ [[package]] name = "binary-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "hash-db", "log", @@ -638,19 +799,19 @@ version = "0.65.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cexpr", "clang-sys", "lazy_static", "lazycell", "peeking_take_while", - "prettyplease", + "prettyplease 0.2.12", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] @@ -659,6 +820,12 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" + [[package]] name = "bitvec" version = "1.0.1" @@ -688,31 +855,31 @@ checksum = "3c2f0dc9a68c6317d884f97cc36cf5a3d20ba14ce404227df55e1af708ab04bc" dependencies = [ "arrayref", "arrayvec 0.7.4", - "constant_time_eq 0.2.4", + "constant_time_eq 0.2.6", ] [[package]] name = "blake2s_simd" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db539cc2b5f6003621f1cd9ef92d7ded8ea5232c7de0f9faa2de251cd98730d4" +checksum = "6637f448b9e61dfadbdcbae9a885fadee1f3eaffb1f8d3c1965d3ade8bdfd44f" dependencies = [ "arrayref", "arrayvec 0.7.4", - "constant_time_eq 0.1.5", + "constant_time_eq 0.2.6", ] [[package]] name = "blake3" -version = "1.3.1" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a08e53fc5a564bb15bfe6fae56bd71522205f1f91893f9c0116edad6496c183f" +checksum = "199c42ab6972d92c9f8995f086273d25c42fc0f7b2a1fcefba465c1352d25ba5" dependencies = [ "arrayref", "arrayvec 0.7.4", "cc", "cfg-if", - "constant_time_eq 0.1.5", + "constant_time_eq 0.3.0", "digest 0.10.7", ] @@ -722,7 +889,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ - "block-padding", + "block-padding 0.1.5", "byte-tools", "byteorder", "generic-array 0.12.4", @@ -734,16 +901,26 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", ] [[package]] name = "block-buffer" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", +] + +[[package]] +name = "block-modes" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57a0e8073e8baa88212fb5823574c02ebccb395136ba9a164ab89379ec6072f0" +dependencies = [ + "block-padding 0.2.1", + "cipher 0.2.5", ] [[package]] @@ -755,6 +932,12 @@ dependencies = [ "byte-tools", ] +[[package]] +name = "block-padding" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" + [[package]] name = "bounded-collections" version = "0.1.8" @@ -782,24 +965,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" -[[package]] -name = "bs58" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" -dependencies = [ - "tinyvec", -] - [[package]] name = "bstr" -version = "0.2.17" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" +checksum = "6798148dccfbff0fae41c7574d2fa8f1ef3492fba0face179de5d8d447d67b05" dependencies = [ - "lazy_static", "memchr", - "regex-automata", + "regex-automata 0.3.4", + "serde", ] [[package]] @@ -813,9 +987,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "byte-slice-cast" @@ -831,9 +1005,9 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "bytemuck" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c041d3eab048880cb0b86b256447da3f18859a163c3b8d8893f4e6368abe6393" +checksum = "17febce684fd15d89027105661fec94afb475cb995fbc59d2865198446ba2eea" [[package]] name = "byteorder" @@ -858,26 +1032,20 @@ dependencies = [ "pkg-config", ] -[[package]] -name = "cache-padded" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" - [[package]] name = "camino" -version = "1.1.2" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c77df041dc383319cc661b428b6961a005db4d6808d5e12536931b1ca9556055" +checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" dependencies = [ "serde", ] [[package]] name = "cargo-platform" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbdb825da8a5df079a43676dbe042702f1707b1109f713a01420fbb4cc71fa27" +checksum = "2cfa25e60aea747ec7e1124f238816749faa93759c6ff5b31f1ccdda137f4479" dependencies = [ "serde", ] @@ -890,7 +1058,7 @@ checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" dependencies = [ "camino", "cargo-platform", - "semver 1.0.16", + "semver 1.0.18", "serde", "serde_json", "thiserror", @@ -904,11 +1072,23 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.79" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" +checksum = "6c6b2562119bf28c3439f7f02db99faf0aa1a8cdfe5772a2ee155d32227239f0" dependencies = [ "jobserver", + "libc", +] + +[[package]] +name = "ccm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aca1a8fbc20b50ac9673ff014abfb2b5f4085ee1a850d408f14a159c5853ac7" +dependencies = [ + "aead 0.3.2", + "cipher 0.2.5", + "subtle", ] [[package]] @@ -922,9 +1102,9 @@ dependencies = [ [[package]] name = "cfg-expr" -version = "0.15.1" +version = "0.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8790cf1286da485c72cf5fc7aeba308438800036ec67d89425924c4807268c9" +checksum = "b40ccee03b5175c18cde8f37e7d2a33bcef6f8ec8f7cc0d81090d1bb380949c9" dependencies = [ "smallvec", ] @@ -968,22 +1148,24 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.19" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" dependencies = [ - "libc", - "num-integer", + "android-tzdata", + "iana-time-zone", + "js-sys", "num-traits", - "time", + "time 0.1.45", + "wasm-bindgen", "winapi", ] [[package]] name = "ciborium" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c137568cc60b904a7724001b35ce2630fd00d5d84805fbb608ab89509d788f" +checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926" dependencies = [ "ciborium-io", "ciborium-ll", @@ -992,15 +1174,15 @@ dependencies = [ [[package]] name = "ciborium-io" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "346de753af073cc87b52b2083a506b38ac176a44cfb05497b622e27be899b369" +checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656" [[package]] name = "ciborium-ll" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213030a2b5a4e0c0892b6652260cf6ccac84827b83a85a534e178e3906c4cf1b" +checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b" dependencies = [ "ciborium-io", "half", @@ -1014,18 +1196,27 @@ checksum = "b9b68e3193982cd54187d71afdb2a271ad4cf8af157858e9cb911b91321de143" dependencies = [ "core2", "multibase", - "multihash 0.17.0", + "multihash", "serde", "unsigned-varint", ] +[[package]] +name = "cipher" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" +dependencies = [ + "generic-array 0.14.7", +] + [[package]] name = "cipher" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", ] [[package]] @@ -1049,9 +1240,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.3.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa66045b9cb23c2e9c1520732030608b02ee07e5cfaa5a521ec15ded7fa24c90" +checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" dependencies = [ "glob", "libc", @@ -1060,15 +1251,15 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.23" +version = "3.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" +checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" dependencies = [ "atty", - "bitflags", - "clap_derive 3.2.18", + "bitflags 1.3.2", + "clap_derive 3.2.25", "clap_lex 0.2.4", - "indexmap", + "indexmap 1.9.3", "once_cell", "strsim", "termcolor", @@ -1077,33 +1268,32 @@ dependencies = [ [[package]] name = "clap" -version = "4.2.5" +version = "4.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a1f23fa97e1d1641371b51f35535cb26959b8e27ab50d167a8b996b5bada819" +checksum = "5fd304a20bff958a57f04c4e96a2e7594cc4490a0e809cbd48bb6437edaa452d" dependencies = [ "clap_builder", - "clap_derive 4.2.0", + "clap_derive 4.3.12", "once_cell", ] [[package]] name = "clap_builder" -version = "4.2.5" +version = "4.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fdc5d93c358224b4d6867ef1356d740de2303e9892edc06c5340daeccd96bab" +checksum = "01c6a3f08f1fe5662a35cfe393aec09c4df95f60ee93b7556505260f75eee9e1" dependencies = [ "anstream", "anstyle", - "bitflags", - "clap_lex 0.4.1", + "clap_lex 0.5.0", "strsim", ] [[package]] name = "clap_derive" -version = "3.2.18" +version = "3.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea0c8bce528c4be4da13ea6fead8965e95b6073585a2f05204bd8f4119f82a65" +checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" dependencies = [ "heck", "proc-macro-error", @@ -1114,14 +1304,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.2.0" +version = "4.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9644cd56d6b87dbe899ef8b053e331c0637664e9e21a33dfcdc36093f5c5c4" +checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] @@ -1135,15 +1325,15 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a2dd5a6fe8c6e3502f568a6353e5273bbb15193ad9a89e457b9970798efbea1" +checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" [[package]] name = "coarsetime" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "454038500439e141804c655b4cd1bc6a70bcb95cd2bc9463af5661b6956f0e46" +checksum = "a90d114103adbc625300f346d4d09dfb4ab1c4a8df6868435dd903392ecf4354" dependencies = [ "libc", "once_cell", @@ -1163,9 +1353,9 @@ dependencies = [ [[package]] name = "color-eyre" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ebf286c900a6d5867aeff75cfee3192857bb7f24b547d4f0df2ed6baa812c90" +checksum = "5a667583cca8c4f8436db8de46ea8233c42a7d9ae424a82d338f2e4675229204" dependencies = [ "backtrace", "eyre", @@ -1182,9 +1372,9 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "comfy-table" -version = "7.0.0" +version = "7.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9e1f7e5d046697d34b593bdba8ee31f4649366e452a2ccabb3baf3511e503d1" +checksum = "9ab77dbd8adecaf3f0db40581631b995f312a8a5ae3aa9993188bb8f23d83a5b" dependencies = [ "strum", "strum_macros", @@ -1213,40 +1403,31 @@ checksum = "2382f75942f4b3be3690fe4f86365e9c853c1587d6ee58212cebf6e2a9ccd101" [[package]] name = "concurrent-queue" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ed07550be01594c6026cff2a1d7fe9c8f683caa798e12b68694ac9e88286a3" -dependencies = [ - "cache-padded", -] - -[[package]] -name = "concurrent-queue" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c278839b831783b70278b14df4d45e1beb1aad306c07bb796637de9a0e323e8e" +checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" dependencies = [ "crossbeam-utils", ] [[package]] name = "console" -version = "0.15.5" +version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d79fbe8970a77e3e34151cc13d3b3e248aa0faaecb9f6091fa07ebefe5ad60" +checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8" dependencies = [ "encode_unicode", "lazy_static", "libc", "unicode-width", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] name = "const-oid" -version = "0.9.2" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" +checksum = "795bc6e66a8e340f075fcf6227e417a2dc976b92b91f3cdc778bb858778b6747" [[package]] name = "const-random" @@ -1264,7 +1445,7 @@ version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d7d6ab3c3a2282db210df5f02c4dab6e0a7057af0fb7ebd4070f30fe05c0ddb" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.10", "once_cell", "proc-macro-hack", "tiny-keccak", @@ -1272,15 +1453,15 @@ dependencies = [ [[package]] name = "constant_time_eq" -version = "0.1.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +checksum = "21a53c0a4d288377e7415b53dcfc3c04da5cdc2cc95c8d5ac178b58f0b861ad6" [[package]] name = "constant_time_eq" -version = "0.2.4" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3ad85c1f65dc7b37604eb0e89748faf0b9653065f2a8ef69f96a687ec1e9279" +checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" [[package]] name = "convert_case" @@ -1290,9 +1471,9 @@ checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" [[package]] name = "core-foundation" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6888e10551bb93e424d8df1d07f1a8b4fceb0001a3a4b048bfc47554946f47b3" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" dependencies = [ "core-foundation-sys", "libc", @@ -1300,9 +1481,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "core2" @@ -1315,9 +1496,18 @@ dependencies = [ [[package]] name = "cpp_demangle" -version = "0.3.4" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eeaa953eaad386a53111e47172c2fedba671e5684c8dd601a5f474f4f118710f" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "cpp_demangle" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "931ab2a3e6330a07900b8e7ca4e106cdcbb93f2b9a52df55e54ee53d8305b55d" +checksum = "ee34052ee3d93d6d8f3e6f81d85c47921f6653a19a7b70e939e3e602d893a674" dependencies = [ "cfg-if", ] @@ -1334,9 +1524,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.1" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" +checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" dependencies = [ "libc", ] @@ -1440,7 +1630,22 @@ dependencies = [ ] [[package]] -name = "crc32fast" +name = "crc" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" + +[[package]] +name = "crc32fast" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" @@ -1458,7 +1663,7 @@ dependencies = [ "atty", "cast", "ciborium", - "clap 3.2.23", + "clap 3.2.25", "criterion-plot", "itertools", "lazy_static", @@ -1484,9 +1689,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.5" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c02a4d71819009c192cf4872265391563fd6a84c81ff2c0f2a7026ca4c1d85c" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if", "crossbeam-utils", @@ -1494,9 +1699,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if", "crossbeam-epoch", @@ -1505,22 +1710,22 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.5" +version = "0.9.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ + "autocfg", "cfg-if", "crossbeam-utils", - "lazy_static", - "memoffset 0.6.4", + "memoffset 0.9.0", "scopeguard", ] [[package]] name = "crossbeam-queue" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f25d8400f4a7a5778f0e4e52384a48cbd9b5c495d110786187fc750075277a2" +checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" dependencies = [ "cfg-if", "crossbeam-utils", @@ -1528,9 +1733,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if", ] @@ -1541,13 +1746,25 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +[[package]] +name = "crypto-bigint" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" +dependencies = [ + "generic-array 0.14.7", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + [[package]] name = "crypto-bigint" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf4c2f4e1afd912bc40bfd6fed5d9dc1f288e0ba01bfcc835cc5bc3eb13efe15" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", "rand_core 0.6.4", "subtle", "zeroize", @@ -1559,7 +1776,7 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", "rand_core 0.6.4", "typenum", ] @@ -1570,7 +1787,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", "subtle", ] @@ -1580,20 +1797,10 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", "subtle", ] -[[package]] -name = "ctor" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" -dependencies = [ - "quote", - "syn 1.0.109", -] - [[package]] name = "ctr" version = "0.8.0" @@ -1654,9 +1861,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.80" +version = "1.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b7d4e43b25d3c994662706a1d4fcfc32aaa6afd287502c111b237093bb23f3a" +checksum = "f68e12e817cb19eaab81aaec582b4052d07debd3c3c6b083b9d361db47c7dc9d" dependencies = [ "cc", "cxxbridge-flags", @@ -1666,9 +1873,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.80" +version = "1.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84f8829ddc213e2c1368e51a2564c552b65a8cb6a28f31e576270ac81d5e5827" +checksum = "e789217e4ab7cf8cc9ce82253180a9fe331f35f5d339f0ccfe0270b39433f397" dependencies = [ "cc", "codespan-reporting", @@ -1676,37 +1883,72 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 1.0.109", + "syn 2.0.28", ] [[package]] name = "cxxbridge-flags" -version = "1.0.80" +version = "1.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e72537424b474af1460806647c41d4b6d35d09ef7fe031c5c2fa5766047cc56a" +checksum = "78a19f4c80fd9ab6c882286fa865e92e07688f4387370a209508014ead8751d0" [[package]] name = "cxxbridge-macro" -version = "1.0.80" +version = "1.0.102" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fcfa71f66c8563c4fa9dd2bb68368d50267856f831ac5d85367e0805f9606c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.28", +] + +[[package]] +name = "darling" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "309e4fb93eed90e1e14bea0da16b209f81813ba9fc7830c20ed151dd7bc0a4d7" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ + "fnv", + "ident_case", "proc-macro2", "quote", + "strsim", + "syn 1.0.109", +] + +[[package]] +name = "darling_macro" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" +dependencies = [ + "darling_core", + "quote", "syn 1.0.109", ] [[package]] name = "dashmap" -version = "5.4.0" +version = "5.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "907076dfda823b0b36d2a1bb5f90c96660a5bbcd7729e10727f07858f22c4edc" +checksum = "6943ae99c34386c84a470c499d3414f66502a41340aa895406e0d2e4a207b91d" dependencies = [ "cfg-if", - "hashbrown 0.12.3", + "hashbrown 0.14.0", "lock_api", "once_cell", - "parking_lot_core 0.9.6", + "parking_lot_core 0.9.8", ] [[package]] @@ -1717,9 +1959,9 @@ checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" [[package]] name = "data-encoding-macro" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86927b7cd2fe88fa698b87404b287ab98d1a0063a34071d92e575b72d3029aca" +checksum = "c904b33cc60130e1aeea4956ab803d08a3f4a0ca82d64ed757afac3891f2bb99" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -1727,9 +1969,9 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5bbed42daaa95e780b60a50546aa345b8413a1e46f9a40a12907d3598f038db" +checksum = "8fdf3fce3ce863539ec1d7fd1b6dcc3c645663376b43ed376bbf887733e4f772" dependencies = [ "data-encoding", "syn 1.0.109", @@ -1744,6 +1986,17 @@ dependencies = [ "uuid", ] +[[package]] +name = "der" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" +dependencies = [ + "const-oid", + "pem-rfc7468", + "zeroize", +] + [[package]] name = "der" version = "0.7.7" @@ -1754,6 +2007,40 @@ dependencies = [ "zeroize", ] +[[package]] +name = "der-parser" +version = "7.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe398ac75057914d7d07307bf67dc7f3f574a26783b4fc7805a20ffa9f506e82" +dependencies = [ + "asn1-rs 0.3.1", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + +[[package]] +name = "der-parser" +version = "8.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" +dependencies = [ + "asn1-rs 0.5.2", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + +[[package]] +name = "deranged" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7684a49fb1af197853ef7b2ee694bc1f5b4179556f1e5710e1760c5db6f5e929" + [[package]] name = "derivative" version = "2.2.0" @@ -1776,6 +2063,37 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "derive_builder" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d07adf7be193b71cc36b193d0f5fe60b918a3a9db4dad0449f57bcfd519704a3" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f91d4cfa921f1c05904dc3c57b4a32c38aed3340cce209f3a6fd1478babafc4" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "derive_builder_macro" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f0314b72bed045f3a68671b3c86328386762c93f82d98c65c3cb5e5f573dd68" +dependencies = [ + "derive_builder_core", + "syn 1.0.109", +] + [[package]] name = "derive_more" version = "0.99.17" @@ -1791,9 +2109,9 @@ dependencies = [ [[package]] name = "diff" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e25ea47919b1560c4e3b7fe0aaab9becf5b84a10325ddf7db0f0ba5e1026499" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" [[package]] name = "difflib" @@ -1816,7 +2134,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", ] [[package]] @@ -1825,7 +2143,7 @@ version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ - "block-buffer 0.10.3", + "block-buffer 0.10.4", "const-oid", "crypto-common", "subtle", @@ -1852,9 +2170,9 @@ dependencies = [ [[package]] name = "dirs-sys" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03d86534ed367a67548dc68113a0f5db55432fdfbb6e6f9d77704397d95d5780" +checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" dependencies = [ "libc", "redox_users", @@ -1872,11 +2190,22 @@ dependencies = [ "winapi", ] +[[package]] +name = "displaydoc" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.28", +] + [[package]] name = "dissimilar" -version = "1.0.3" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31ad93652f40969dead8d4bf897a41e9462095152eb21c56e5830537e41179dd" +checksum = "86e3bdc80eee6e16b2b6b0f87fbc98c04bee3455e35174c0de1a125d0688c632" [[package]] name = "dleq_vrf" @@ -1930,9 +2259,9 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.20", + "syn 2.0.28", "termcolor", - "toml 0.7.3", + "toml 0.7.6", "walkdir", ] @@ -1944,9 +2273,9 @@ checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" [[package]] name = "dtoa" -version = "1.0.2" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5caaa75cbd2b960ff1e5392d2cfb1f44717fffe12fc1f32b7b5d1267f99732a6" +checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" [[package]] name = "dyn-clonable" @@ -1971,22 +2300,34 @@ dependencies = [ [[package]] name = "dyn-clone" -version = "1.0.4" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "304e6508efa593091e97a9abbc10f90aa7ca635b6d2784feff3c89d41dd12272" + +[[package]] +name = "ecdsa" +version = "0.14.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2626afccd7561a06cf1367e2950c4718ea04565e20fb5029b6c7d8ad09abcf" +checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" +dependencies = [ + "der 0.6.1", + "elliptic-curve 0.12.3", + "rfc6979 0.3.1", + "signature 1.6.4", +] [[package]] name = "ecdsa" -version = "0.16.7" +version = "0.16.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0997c976637b606099b9985693efa3581e84e41f5c11ba5255f88711058ad428" +checksum = "a4b1e0c257a9e9f25f90ff76d7a68360ed497ee519c8e428d1825ef0000799d4" dependencies = [ - "der", + "der 0.7.7", "digest 0.10.7", - "elliptic-curve", - "rfc6979", + "elliptic-curve 0.13.5", + "rfc6979 0.4.0", "signature 2.1.0", - "spki", + "spki 0.7.2", ] [[package]] @@ -2008,7 +2349,7 @@ dependencies = [ "ed25519", "rand 0.7.3", "serde", - "sha2 0.9.8", + "sha2 0.9.9", "zeroize", ] @@ -2022,7 +2363,7 @@ dependencies = [ "hashbrown 0.12.3", "hex", "rand_core 0.6.4", - "sha2 0.9.8", + "sha2 0.9.9", "zeroize", ] @@ -2032,21 +2373,43 @@ version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +[[package]] +name = "elliptic-curve" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" +dependencies = [ + "base16ct 0.1.1", + "crypto-bigint 0.4.9", + "der 0.6.1", + "digest 0.10.7", + "ff 0.12.1", + "generic-array 0.14.7", + "group 0.12.1", + "hkdf", + "pem-rfc7468", + "pkcs8 0.9.0", + "rand_core 0.6.4", + "sec1 0.3.0", + "subtle", + "zeroize", +] + [[package]] name = "elliptic-curve" version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "968405c8fdc9b3bf4df0a6638858cc0b52462836ab6b1c87377785dd09cf1c0b" dependencies = [ - "base16ct", - "crypto-bigint", + "base16ct 0.2.0", + "crypto-bigint 0.5.2", "digest 0.10.7", - "ff", - "generic-array 0.14.6", - "group", - "pkcs8", + "ff 0.13.0", + "generic-array 0.14.7", + "group 0.13.0", + "pkcs8 0.10.2", "rand_core 0.6.4", - "sec1", + "sec1 0.7.3", "subtle", "zeroize", ] @@ -2059,9 +2422,9 @@ checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] name = "encoding_rs" -version = "0.8.30" +version = "0.8.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dc8abb250ffdda33912550faa54c88ec8b998dec0b2c55ab224921ce11df" +checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" dependencies = [ "cfg-if", ] @@ -2095,28 +2458,28 @@ checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] name = "enumn" -version = "0.1.8" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48016319042fb7c87b78d2993084a831793a897a5cd1a2a67cab9d1eeb4b7d76" +checksum = "b893c4eb2dc092c811165f84dc7447fae16fb66521717968c34c509b39b1a5c5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] name = "env_logger" -version = "0.7.1" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" +checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" dependencies = [ "atty", - "humantime 1.3.0", + "humantime", "log", "regex", "termcolor", @@ -2124,12 +2487,12 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3" +checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" dependencies = [ - "atty", - "humantime 2.1.0", + "humantime", + "is-terminal", "log", "regex", "termcolor", @@ -2141,11 +2504,17 @@ version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e48c92028aaa870e83d51c64e5d4e0b6981b360c522198c23959f219a4e1b15b" +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + [[package]] name = "erased-serde" -version = "0.3.20" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad132dd8d0d0b546348d7d86cb3191aad14b34e5f979781fc005c80d4ac67ffd" +checksum = "da96524cc884f6558f1769b6c46686af2fe8e8b4cd253bd5a3cdba8181b8e070" dependencies = [ "serde", ] @@ -2162,20 +2531,9 @@ dependencies = [ [[package]] name = "errno" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" -dependencies = [ - "errno-dragonfly", - "libc", - "winapi", -] - -[[package]] -name = "errno" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" +checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" dependencies = [ "errno-dragonfly", "libc", @@ -2194,9 +2552,9 @@ dependencies = [ [[package]] name = "event-listener" -version = "2.5.1" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7531096570974c3a9dcf9e4b8e1cede1ec26cf5046219fb3b9d897503b9be59" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "exit-future" @@ -2247,14 +2605,14 @@ dependencies = [ "fs-err", "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] name = "eyre" -version = "0.6.5" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "221239d1d5ea86bf5d6f91c9d6bc3646ffe471b08ff9b0f91c44f115ac969d2b" +checksum = "4c2b6b5a29c02cdc822728b7d7b8ae1bab3e3b05d44522770ddd49722eeac7eb" dependencies = [ "indenter", "once_cell", @@ -2274,13 +2632,19 @@ checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" [[package]] name = "fastrand" -version = "1.7.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] +[[package]] +name = "fastrand" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" + [[package]] name = "fatality" version = "0.0.6" @@ -2298,7 +2662,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f5aa1e3ae159e592ad222dc90c5acbad632b527779ba88486abe92782ab268bd" dependencies = [ "expander 0.0.4", - "indexmap", + "indexmap 1.9.3", "proc-macro-crate", "proc-macro2", "quote", @@ -2331,6 +2695,16 @@ dependencies = [ "web-sys", ] +[[package]] +name = "ff" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "ff" version = "0.13.0" @@ -2362,24 +2736,24 @@ checksum = "e825f6987101665dea6ec934c09ec6d721de7bc1bf92248e1d5810c8cd636b77" [[package]] name = "file-per-thread-logger" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fdbe0d94371f9ce939b555dd342d0686cc4c0cadbcd4b61d70af5ff97eb4126" +checksum = "84f2e425d9790201ba4af4630191feac6dcc98765b118d4d18e91d23c2353866" dependencies = [ - "env_logger 0.7.1", + "env_logger 0.10.0", "log", ] [[package]] name = "filetime" -version = "0.2.17" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94a7bbaa59354bc20dd75b67f23e2797b4490e9d6928203fb105c79e448c86c" +checksum = "5cbc844cecaee9d4443931972e1289c8ff485cb4cc2767cb03ca139ed6885153" dependencies = [ "cfg-if", "libc", - "redox_syscall", - "windows-sys 0.36.1", + "redox_syscall 0.2.16", + "windows-sys 0.48.0", ] [[package]] @@ -2424,9 +2798,20 @@ dependencies = [ [[package]] name = "fixedbitset" -version = "0.4.0" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + +[[package]] +name = "flate2" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "398ea4fabe40b9b0d885340a2a991a44c8a645624075ad966d21f88688e2b69e" +checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" +dependencies = [ + "crc32fast", + "libz-sys", + "miniz_oxide", +] [[package]] name = "float-cmp" @@ -2446,16 +2831,16 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "parity-scale-codec", ] [[package]] name = "form_urlencoded" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ "percent-encoding", ] @@ -2469,7 +2854,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-support", "frame-support-procedural", @@ -2494,12 +2879,12 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "Inflector", "array-bytes", "chrono", - "clap 4.2.5", + "clap 4.3.19", "comfy-table", "frame-benchmarking", "frame-support", @@ -2542,18 +2927,18 @@ dependencies = [ [[package]] name = "frame-election-provider-solution-type" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] name = "frame-election-provider-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-election-provider-solution-type", "frame-support", @@ -2570,7 +2955,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-support", "frame-system", @@ -2599,7 +2984,7 @@ dependencies = [ [[package]] name = "frame-remote-externalities" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "async-recursion", "futures", @@ -2620,10 +3005,11 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "aquamarine", - "bitflags", + "bitflags 1.3.2", + "docify", "environmental", "frame-metadata", "frame-support-procedural", @@ -2657,7 +3043,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "Inflector", "cfg-expr", @@ -2669,35 +3055,35 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] name = "frame-support-test" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-executive", @@ -2724,7 +3110,7 @@ dependencies = [ [[package]] name = "frame-support-test-pallet" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-support", "frame-system", @@ -2737,7 +3123,7 @@ dependencies = [ [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "cfg-if", "frame-support", @@ -2756,7 +3142,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -2771,7 +3157,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "parity-scale-codec", "sp-api", @@ -2780,7 +3166,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-support", "parity-scale-codec", @@ -2791,9 +3177,9 @@ dependencies = [ [[package]] name = "fs-err" -version = "2.6.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ebd3504ad6116843b8375ad70df74e7bfe83cac77a1f3fe73200c844d43bfe0" +checksum = "0845fa252299212f0389d64ba26f34fa32cfe41588355f21ed507c59a0f64541" [[package]] name = "fs2" @@ -2807,21 +3193,14 @@ dependencies = [ [[package]] name = "fs4" -version = "0.6.3" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea55201cc351fdb478217c0fb641b59813da9b4efe4c414a9d8f989a657d149" +checksum = "2eeb4ed9e12f43b7fa0baae3f9cdda28352770132ef2e09a23760c29cae8bd47" dependencies = [ - "libc", - "rustix 0.35.13", - "winapi", + "rustix 0.38.6", + "windows-sys 0.48.0", ] -[[package]] -name = "fs_extra" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" - [[package]] name = "funty" version = "2.0.0" @@ -2879,16 +3258,16 @@ checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-lite" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" dependencies = [ - "fastrand", + "fastrand 1.9.0", "futures-core", "futures-io", "memchr", "parking", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "waker-fn", ] @@ -2900,7 +3279,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] @@ -2910,8 +3289,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2411eed028cdf8c8034eaf21f9915f956b6c3abec4d4c7949ee67f0721127bd" dependencies = [ "futures-io", - "rustls 0.20.7", - "webpki", + "rustls 0.20.8", + "webpki 0.22.0", ] [[package]] @@ -2945,7 +3324,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "pin-utils", "slab", ] @@ -2962,7 +3341,7 @@ dependencies = [ [[package]] name = "generate-bags" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "chrono", "frame-election-provider-support", @@ -2984,9 +3363,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.6" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -3016,9 +3395,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "libc", @@ -3047,26 +3426,26 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.0" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dec7af912d60cdbd3677c1af9352ebae6fb8394d165568a2234df0fa00f87793" +checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" dependencies = [ "fallible-iterator", - "indexmap", + "indexmap 1.9.3", "stable_deref_trait", ] [[package]] name = "glob" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "globset" -version = "0.4.8" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10463d9ff00a2a068db14231982f5132edebad0d7660cd956a1c30292dbcbfbd" +checksum = "aca8bbd8e0707c1887a8bbb7e6b40e228f251ff5d62c8220a4a7a53c73aff006" dependencies = [ "aho-corasick", "bstr", @@ -3075,22 +3454,33 @@ dependencies = [ "regex", ] +[[package]] +name = "group" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" +dependencies = [ + "ff 0.12.1", + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff", + "ff 0.13.0", "rand_core 0.6.4", "subtle", ] [[package]] name = "h2" -version = "0.3.17" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66b91535aa35fea1523ad1b86cb6b53c28e0ae566ba4a460f4457e936cad7c6f" +checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" dependencies = [ "bytes", "fnv", @@ -3098,7 +3488,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap", + "indexmap 1.9.3", "slab", "tokio", "tokio-util", @@ -3113,16 +3503,16 @@ checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" [[package]] name = "handlebars" -version = "4.2.2" +version = "4.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d6a30320f094710245150395bc763ad23128d6a1ebbad7594dc4164b62c56b" +checksum = "83c3372087601b532857d332f5957cbae686da52bb7810bf038c3e3c3cc2fa0d" dependencies = [ "log", "pest", "pest_derive", - "quick-error 2.0.1", "serde", "serde_json", + "thiserror", ] [[package]] @@ -3155,7 +3545,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.2", + "ahash 0.8.3", ] [[package]] @@ -3164,15 +3554,15 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" dependencies = [ - "ahash 0.8.2", + "ahash 0.8.3", "allocator-api2", ] [[package]] name = "heck" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" @@ -3185,9 +3575,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" +checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" [[package]] name = "hex" @@ -3252,7 +3642,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" dependencies = [ "digest 0.9.0", - "generic-array 0.14.6", + "generic-array 0.14.7", "hmac 0.8.1", ] @@ -3281,9 +3671,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes", "fnv", @@ -3298,20 +3688,20 @@ checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", ] [[package]] name = "http-range-header" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" +checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" [[package]] name = "httparse" -version = "1.6.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9100414882e15fb7feccb4897e5f0ff0ff1ca7d1a86a23208ada4d7a18e6c6c4" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" @@ -3319,15 +3709,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" -[[package]] -name = "humantime" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" -dependencies = [ - "quick-error 1.2.3", -] - [[package]] name = "humantime" version = "2.1.0" @@ -3336,9 +3717,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.20" +version = "0.14.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac" +checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" dependencies = [ "bytes", "futures-channel", @@ -3350,7 +3731,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "socket2 0.4.9", "tokio", "tower-service", @@ -3360,51 +3741,81 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.23.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" +checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" dependencies = [ "http", "hyper", "log", - "rustls 0.20.7", + "rustls 0.20.8", "rustls-native-certs", "tokio", - "tokio-rustls 0.23.2", - "webpki-roots 0.22.2", + "tokio-rustls 0.23.4", + "webpki-roots", ] [[package]] name = "hyper-rustls" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7" +checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" dependencies = [ + "futures-util", "http", "hyper", "log", - "rustls 0.21.2", + "rustls 0.21.6", "rustls-native-certs", "tokio", "tokio-rustls 0.24.1", ] [[package]] -name = "idna" -version = "0.2.3" +name = "iana-time-zone" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", -] + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows 0.48.0", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.3.0" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +dependencies = [ + "matches", + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "idna" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -3436,7 +3847,7 @@ dependencies = [ "rtnetlink", "system-configuration", "tokio", - "windows", + "windows 0.34.0", ] [[package]] @@ -3495,22 +3906,33 @@ checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" [[package]] name = "indexmap" -version = "1.9.1" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown 0.12.3", "serde", ] +[[package]] +name = "indexmap" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +dependencies = [ + "equivalent", + "hashbrown 0.14.0", +] + [[package]] name = "indicatif" -version = "0.17.3" +version = "0.17.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cef509aa9bc73864d6756f0d34d35504af3cf0844373afe9b8669a5b8005a729" +checksum = "0b297dc40733f23a0e52728a58fa9489a5b7638a324932de16b41adc3ef80730" dependencies = [ "console", + "instant", "number_prefix", "portable-atomic", "unicode-width", @@ -3522,7 +3944,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", ] [[package]] @@ -3536,9 +3958,9 @@ dependencies = [ [[package]] name = "integer-encoding" -version = "3.0.2" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90c11140ffea82edce8dcd74137ce9324ec24b3cf0175fc9d7e29164da9915b8" +checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" [[package]] name = "integer-sqrt" @@ -3550,18 +3972,31 @@ dependencies = [ ] [[package]] -name = "io-lifetimes" -version = "0.7.5" +name = "interceptor" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59ce5ef949d49ee85593fc4d3f3f95ad61657076395cbbce23e2121fc5542074" +checksum = "1e8a11ae2da61704edada656798b61c94b35ecac2c58eb955156987d5e6be90b" +dependencies = [ + "async-trait", + "bytes", + "log", + "rand 0.8.5", + "rtcp", + "rtp", + "thiserror", + "tokio", + "waitgroup", + "webrtc-srtp", + "webrtc-util", +] [[package]] name = "io-lifetimes" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.1", + "hermit-abi 0.3.2", "libc", "windows-sys 0.48.0", ] @@ -3574,31 +4009,30 @@ checksum = "aa2f047c0a98b2f299aa5d6d7088443570faae494e9ae1305e48be000c9e0eb1" [[package]] name = "ipconfig" -version = "0.3.0" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "723519edce41262b05d4143ceb95050e4c614f483e78e9fd9e39a8275a84ad98" +checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.4.9", + "socket2 0.5.3", "widestring", - "winapi", - "winreg 0.7.0", + "windows-sys 0.48.0", + "winreg 0.50.0", ] [[package]] name = "ipnet" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11b0d96e660696543b251e58030cf9787df56da39dab19ad60eae7353040917e" +checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" [[package]] name = "is-terminal" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" +checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ - "hermit-abi 0.3.1", - "io-lifetimes 1.0.10", - "rustix 0.37.18", + "hermit-abi 0.3.2", + "rustix 0.38.6", "windows-sys 0.48.0", ] @@ -3613,18 +4047,18 @@ dependencies = [ [[package]] name = "itertools" -version = "0.10.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ "either", ] [[package]] name = "itoa" -version = "1.0.4" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" +checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "jobserver" @@ -3680,10 +4114,10 @@ dependencies = [ "soketto", "thiserror", "tokio", - "tokio-rustls 0.23.2", + "tokio-rustls 0.23.4", "tokio-util", "tracing", - "webpki-roots 0.22.2", + "webpki-roots", ] [[package]] @@ -3722,7 +4156,7 @@ checksum = "cc345b0a43c6bc49b947ebeb936e886a419ee3d894421790c969cc56040542ad" dependencies = [ "async-trait", "hyper", - "hyper-rustls 0.23.0", + "hyper-rustls 0.23.2", "jsonrpsee-core", "jsonrpsee-types", "rustc-hash", @@ -3801,17 +4235,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cadb76004ed8e97623117f3df85b17aaa6626ab0b0831e6573f104df16cd1bcc" dependencies = [ "cfg-if", - "ecdsa", - "elliptic-curve", + "ecdsa 0.16.8", + "elliptic-curve 0.13.5", "once_cell", "sha2 0.10.7", ] [[package]] name = "keccak" -version = "0.1.0" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" +checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" +dependencies = [ + "cpufeatures", +] [[package]] name = "kusama-runtime" @@ -4006,9 +4443,9 @@ checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" [[package]] name = "libflate" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97822bf791bd4d5b403713886a5fbe8bf49520fe78e323b0dc480ca1a03e50b0" +checksum = "5ff4ae71b685bbad2f2f391fe74f6b7659a34871c08b210fdc039e43bee07d18" dependencies = [ "adler32", "crc32fast", @@ -4026,9 +4463,9 @@ dependencies = [ [[package]] name = "libloading" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afe203d669ec979b7128619bae5a63b7b42e9203c1b29146079ee05e2f604b52" +checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" dependencies = [ "cfg-if", "winapi", @@ -4042,14 +4479,14 @@ checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a" [[package]] name = "libp2p" -version = "0.52.1" +version = "0.51.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38039ba2df4f3255842050845daef4a004cc1f26da03dbc645535088b51910ef" +checksum = "f210d259724eae82005b5c48078619b7745edb7b76de370b03f8ba59ea103097" dependencies = [ "bytes", "futures", "futures-timer", - "getrandom 0.2.8", + "getrandom 0.2.10", "instant", "libp2p-allow-block-list", "libp2p-connection-limits", @@ -4062,10 +4499,12 @@ dependencies = [ "libp2p-metrics", "libp2p-noise", "libp2p-ping", + "libp2p-quic", "libp2p-request-response", "libp2p-swarm", "libp2p-tcp", "libp2p-wasm-ext", + "libp2p-webrtc", "libp2p-websocket", "libp2p-yamux", "multiaddr", @@ -4074,9 +4513,9 @@ dependencies = [ [[package]] name = "libp2p-allow-block-list" -version = "0.2.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55b46558c5c0bf99d3e2a1a38fd54ff5476ca66dd1737b12466a1824dd219311" +checksum = "510daa05efbc25184458db837f6f9a5143888f1caa742426d92e1833ddd38a50" dependencies = [ "libp2p-core", "libp2p-identity", @@ -4086,9 +4525,9 @@ dependencies = [ [[package]] name = "libp2p-connection-limits" -version = "0.2.0" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d45dd90e8f0e1fa59e85ff5316dd4d1ac41a9a507e79cda1b0e9b7be43ad1a56" +checksum = "4caa33f1d26ed664c4fe2cca81a08c8e07d4c1c04f2f4ac7655c2dd85467fda0" dependencies = [ "libp2p-core", "libp2p-identity", @@ -4098,9 +4537,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.40.0" +version = "0.39.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef7dd7b09e71aac9271c60031d0e558966cdb3253ba0308ab369bb2de80630d0" +checksum = "3c1df63c0b582aa434fb09b2d86897fa2b419ffeccf934b36f87fcedc8e835c2" dependencies = [ "either", "fnv", @@ -4110,7 +4549,7 @@ dependencies = [ "libp2p-identity", "log", "multiaddr", - "multihash 0.19.0", + "multihash", "multistream-select", "once_cell", "parking_lot 0.12.1", @@ -4126,13 +4565,12 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.40.0" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd4394c81c0c06d7b4a60f3face7e8e8a9b246840f98d2c80508d0721b032147" +checksum = "146ff7034daae62077c415c2376b8057368042df6ab95f5432ad5e88568b1554" dependencies = [ "futures", "libp2p-core", - "libp2p-identity", "log", "parking_lot 0.12.1", "smallvec", @@ -4141,9 +4579,9 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.43.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a29675a32dbcc87790db6cf599709e64308f1ae9d5ecea2d259155889982db8" +checksum = "5455f472243e63b9c497ff320ded0314254a9eb751799a39c283c6f20b793f3c" dependencies = [ "asynchronous-codec", "either", @@ -4153,7 +4591,7 @@ dependencies = [ "libp2p-identity", "libp2p-swarm", "log", - "lru 0.10.0", + "lru 0.10.1", "quick-protobuf", "quick-protobuf-codec", "smallvec", @@ -4163,14 +4601,15 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.2.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2874d9c6575f1d7a151022af5c42bb0ffdcdfbafe0a6fd039de870b384835a2" +checksum = "9e2d584751cecb2aabaa56106be6be91338a60a0f4e420cf2af639204f596fc1" dependencies = [ - "bs58 0.5.0", + "bs58", "ed25519-dalek", "log", - "multihash 0.19.0", + "multiaddr", + "multihash", "quick-protobuf", "rand 0.8.5", "sha2 0.10.7", @@ -4180,9 +4619,9 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.44.3" +version = "0.43.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f2584b0c27f879a1cca4b753fd96874109e5a2f46bd6e30924096456c2ba9b2" +checksum = "39d5ef876a2b2323d63c258e63c2f8e36f205fe5a11f0b3095d59635650790ff" dependencies = [ "arrayvec 0.7.4", "asynchronous-codec", @@ -4208,9 +4647,9 @@ dependencies = [ [[package]] name = "libp2p-mdns" -version = "0.44.0" +version = "0.43.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42a2567c305232f5ef54185e9604579a894fd0674819402bb0ac0246da82f52a" +checksum = "19983e1f949f979a928f2c603de1cf180cc0dc23e4ac93a62651ccb18341460b" dependencies = [ "data-encoding", "futures", @@ -4221,7 +4660,7 @@ dependencies = [ "log", "rand 0.8.5", "smallvec", - "socket2 0.5.3", + "socket2 0.4.9", "tokio", "trust-dns-proto", "void", @@ -4229,26 +4668,23 @@ dependencies = [ [[package]] name = "libp2p-metrics" -version = "0.13.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3787ea81798dcc5bf1d8b40a8e8245cf894b168d04dd70aa48cb3ff2fff141d2" +checksum = "a42ec91e227d7d0dafa4ce88b333cdf5f277253873ab087555c92798db2ddd46" dependencies = [ - "instant", "libp2p-core", "libp2p-identify", - "libp2p-identity", "libp2p-kad", "libp2p-ping", "libp2p-swarm", - "once_cell", "prometheus-client", ] [[package]] name = "libp2p-noise" -version = "0.43.0" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87945db2b3f977af09b62b9aa0a5f3e4870995a577ecd845cdeba94cdf6bbca7" +checksum = "9c3673da89d29936bc6435bafc638e2f184180d554ce844db65915113f86ec5e" dependencies = [ "bytes", "curve25519-dalek 3.2.0", @@ -4256,8 +4692,6 @@ dependencies = [ "libp2p-core", "libp2p-identity", "log", - "multiaddr", - "multihash 0.19.0", "once_cell", "quick-protobuf", "rand 0.8.5", @@ -4271,27 +4705,48 @@ dependencies = [ [[package]] name = "libp2p-ping" -version = "0.43.0" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cd5ee3270229443a2b34b27ed0cb7470ef6b4a6e45e54e89a8771fa683bab48" +checksum = "3e57759c19c28a73ef1eb3585ca410cefb72c1a709fcf6de1612a378e4219202" dependencies = [ "either", "futures", "futures-timer", "instant", "libp2p-core", - "libp2p-identity", "libp2p-swarm", "log", "rand 0.8.5", "void", ] +[[package]] +name = "libp2p-quic" +version = "0.7.0-alpha.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6b26abd81cd2398382a1edfe739b539775be8a90fa6914f39b2ab49571ec735" +dependencies = [ + "bytes", + "futures", + "futures-timer", + "if-watch", + "libp2p-core", + "libp2p-identity", + "libp2p-tls", + "log", + "parking_lot 0.12.1", + "quinn-proto", + "rand 0.8.5", + "rustls 0.20.8", + "thiserror", + "tokio", +] + [[package]] name = "libp2p-request-response" -version = "0.25.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20bd837798cdcce4283d2675f08bcd3756a650d56eab4d4367e1b3f27eed6887" +checksum = "7ffdb374267d42dc5ed5bc53f6e601d4a64ac5964779c6e40bb9e4f14c1e30d5" dependencies = [ "async-trait", "futures", @@ -4299,17 +4754,15 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "log", "rand 0.8.5", "smallvec", - "void", ] [[package]] name = "libp2p-swarm" -version = "0.43.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de15b2097fc3bde063df8c202803538ff467fedb18f01c13bc5da55913d246c" +checksum = "903b3d592d7694e56204d211f29d31bc004be99386644ba8731fc3e3ef27b296" dependencies = [ "either", "fnv", @@ -4320,8 +4773,6 @@ dependencies = [ "libp2p-identity", "libp2p-swarm-derive", "log", - "multistream-select", - "once_cell", "rand 0.8.5", "smallvec", "tokio", @@ -4330,73 +4781,119 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.33.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4d5ec2a3df00c7836d7696c136274c9c59705bac69133253696a6c932cd1d74" +checksum = "0fba456131824ab6acd4c7bf61e9c0f0a3014b5fc9868ccb8e10d344594cdc4f" dependencies = [ "heck", - "proc-macro-warning", - "proc-macro2", "quote", - "syn 2.0.20", + "syn 1.0.109", ] [[package]] name = "libp2p-tcp" -version = "0.40.0" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09bfdfb6f945c5c014b87872a0bdb6e0aef90e92f380ef57cd9013f118f9289d" +checksum = "33d33698596d7722d85d3ab0c86c2c322254fce1241e91208e3679b4eb3026cf" dependencies = [ "futures", "futures-timer", "if-watch", "libc", "libp2p-core", - "libp2p-identity", "log", - "socket2 0.5.3", + "socket2 0.4.9", "tokio", ] +[[package]] +name = "libp2p-tls" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff08d13d0dc66e5e9ba6279c1de417b84fa0d0adc3b03e5732928c180ec02781" +dependencies = [ + "futures", + "futures-rustls", + "libp2p-core", + "libp2p-identity", + "rcgen 0.10.0", + "ring 0.16.20", + "rustls 0.20.8", + "thiserror", + "webpki 0.22.0", + "x509-parser 0.14.0", + "yasna", +] + [[package]] name = "libp2p-wasm-ext" -version = "0.40.0" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e5d8e3a9e07da0ef5b55a9f26c009c8fb3c725d492d8bb4b431715786eea79c" +checksum = "77dff9d32353a5887adb86c8afc1de1a94d9e8c3bc6df8b2201d7cdf5c848f43" dependencies = [ "futures", "js-sys", "libp2p-core", - "send_wrapper", + "parity-send-wrapper", "wasm-bindgen", "wasm-bindgen-futures", ] +[[package]] +name = "libp2p-webrtc" +version = "0.4.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dba48592edbc2f60b4bc7c10d65445b0c3964c07df26fdf493b6880d33be36f8" +dependencies = [ + "async-trait", + "asynchronous-codec", + "bytes", + "futures", + "futures-timer", + "hex", + "if-watch", + "libp2p-core", + "libp2p-identity", + "libp2p-noise", + "log", + "multihash", + "quick-protobuf", + "quick-protobuf-codec", + "rand 0.8.5", + "rcgen 0.9.3", + "serde", + "stun", + "thiserror", + "tinytemplate", + "tokio", + "tokio-util", + "webrtc", +] + [[package]] name = "libp2p-websocket" -version = "0.42.0" +version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "956d981ebc84abc3377e5875483c06d94ff57bc6b25f725047f9fd52592f72d4" +checksum = "111273f7b3d3510524c752e8b7a5314b7f7a1fee7e68161c01a7d72cbb06db9f" dependencies = [ "either", "futures", "futures-rustls", "libp2p-core", - "libp2p-identity", "log", "parking_lot 0.12.1", "quicksink", "rw-stream-sink", "soketto", "url", - "webpki-roots 0.23.1", + "webpki-roots", ] [[package]] name = "libp2p-yamux" -version = "0.44.0" +version = "0.43.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0a9b42ab6de15c6f076d8fb11dc5f48d899a10b55a2e16b12be9012a05287b0" +checksum = "4dcd21d950662700a385d4c6d68e2f5f54d778e97068cdd718522222ef513bda" dependencies = [ "futures", "libp2p-core", @@ -4422,12 +4919,12 @@ dependencies = [ [[package]] name = "libsecp256k1" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0452aac8bab02242429380e9b2f94ea20cea2b37e2c1777a1358799bbe97f37" +checksum = "95b09eff1b35ed3b33b877ced3a691fc7a481919c7e29c53c906226fcf55e2a1" dependencies = [ "arrayref", - "base64 0.13.0", + "base64 0.13.1", "digest 0.9.0", "hmac-drbg", "libsecp256k1-core", @@ -4435,7 +4932,7 @@ dependencies = [ "libsecp256k1-gen-genmult", "rand 0.8.5", "serde", - "sha2 0.9.8", + "sha2 0.9.9", "typenum", ] @@ -4470,9 +4967,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.3" +version = "1.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de5435b8549c16d423ed0c03dbaafe57cf6c3344744f1242520d59c9d8ecec66" +checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" dependencies = [ "cc", "pkg-config", @@ -4481,18 +4978,18 @@ dependencies = [ [[package]] name = "link-cplusplus" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369" +checksum = "9d240c6f7e1ba3a28b0249f774e6a9dd0175054b52dfbb61b16eb8505c3785c9" dependencies = [ "cc", ] [[package]] name = "linked-hash-map" -version = "0.5.4" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linked_hash_set" @@ -4505,36 +5002,36 @@ dependencies = [ [[package]] name = "linregress" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "475015a7f8f017edb28d2e69813be23500ad4b32cfe3421c4148efc97324ee52" +checksum = "4de0b5f52a9f84544d268f5fabb71b38962d6aa3c6600b8bcd27d44ccf9c9c45" dependencies = [ "nalgebra", ] [[package]] name = "linux-raw-sys" -version = "0.0.46" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4d2456c373231a208ad294c33dc5bff30051eafd954cd4caae83a712b12854d" +checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" [[package]] name = "linux-raw-sys" -version = "0.1.4" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.3.6" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b64f40e5e03e0d54f03845c8197d0291253cdbedfb1cb46b13c2c117554a9f4c" +checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" [[package]] name = "lock_api" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ "autocfg", "scopeguard", @@ -4552,9 +5049,9 @@ dependencies = [ [[package]] name = "lru" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03f1160296536f10c833a82dca22267d5486734230d47bf00bf435885814ba1e" +checksum = "718e8fae447df0c7e1ba7f5189829e63fd536945c8988d61444c19039f16b670" dependencies = [ "hashbrown 0.13.2", ] @@ -4615,7 +5112,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] @@ -4629,7 +5126,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] @@ -4640,7 +5137,7 @@ checksum = "c12469fc165526520dff2807c2975310ab47cf7190a45b99b49a7dc8befab17b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] @@ -4651,7 +5148,7 @@ checksum = "b8fb85ec1620619edf2984a7693497d4ec88a9665d8b87e942856884c92dbf2a" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] @@ -4672,7 +5169,7 @@ version = "0.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" dependencies = [ - "regex-automata", + "regex-automata 0.1.10", ] [[package]] @@ -4681,37 +5178,47 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ - "regex-automata", + "regex-automata 0.1.10", ] [[package]] name = "matches" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" +checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matrixmultiply" -version = "0.3.2" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add85d4dd35074e6fedc608f8c8f513a3548619a9024b751949ef0e8e45a4d84" +checksum = "090126dc04f95dc0d1c1c91f61bdd474b3930ca064c1edc8a849da2c6cbe1e77" dependencies = [ + "autocfg", "rawpointer", ] +[[package]] +name = "md-5" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "memchr" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memfd" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b20a59d985586e4a5aef64564ac77299f8586d8be6cf9106a5a40207e8908efb" +checksum = "ffc89ccdc6e10d6907450f753537ebc5c5d3460d2e4e62ea74bd571db62c0f9e" dependencies = [ - "rustix 0.36.7", + "rustix 0.37.23", ] [[package]] @@ -4725,9 +5232,9 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" dependencies = [ "autocfg", ] @@ -4750,6 +5257,15 @@ dependencies = [ "autocfg", ] +[[package]] +name = "memoffset" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" +dependencies = [ + "autocfg", +] + [[package]] name = "memory-db" version = "0.32.0" @@ -4796,9 +5312,9 @@ dependencies = [ [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "minimal-lexical" @@ -4808,9 +5324,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.6.2" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" dependencies = [ "adler", ] @@ -4829,7 +5345,7 @@ dependencies = [ [[package]] name = "mmr-gadget" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "futures", "log", @@ -4848,7 +5364,7 @@ dependencies = [ [[package]] name = "mmr-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "anyhow", "jsonrpsee", @@ -4863,24 +5379,24 @@ dependencies = [ [[package]] name = "mockall" -version = "0.11.3" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50e4a1c770583dac7ab5e2f6c139153b783a53a1bbee9729613f193e59828326" +checksum = "4c84490118f2ee2d74570d114f3d0493cbf02790df303d2707606c3e14e07c96" dependencies = [ "cfg-if", "downcast", "fragile", "lazy_static", "mockall_derive", - "predicates", + "predicates 2.1.5", "predicates-tree", ] [[package]] name = "mockall_derive" -version = "0.11.3" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "832663583d5fa284ca8810bf7015e46c9fff9622d3cf34bd1eea5003fec06dd0" +checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" dependencies = [ "cfg-if", "proc-macro2", @@ -4890,16 +5406,16 @@ dependencies = [ [[package]] name = "multiaddr" -version = "0.18.0" +version = "0.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92a651988b3ed3ad1bc8c87d016bb92f6f395b84ed1db9b926b32b1fc5a2c8b5" +checksum = "2b36f567c7099511fa8612bbbb52dda2419ce0bdbacf31714e3a5ffdb766d3bd" dependencies = [ "arrayref", "byteorder", "data-encoding", - "libp2p-identity", + "log", "multibase", - "multihash 0.19.0", + "multihash", "percent-encoding", "serde", "static_assertions", @@ -4929,41 +5445,12 @@ dependencies = [ "blake3", "core2", "digest 0.10.7", - "multihash-derive 0.8.0", + "multihash-derive", "sha2 0.10.7", "sha3", "unsigned-varint", ] -[[package]] -name = "multihash" -version = "0.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd59dcc2bbe70baabeac52cd22ae52c55eefe6c38ff11a9439f16a350a939f2" -dependencies = [ - "core2", - "unsigned-varint", -] - -[[package]] -name = "multihash-codetable" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e46d7ff0b9b8d818e709e12135bfb6582fcde982ba2be48ea52e6e1df098c7a4" -dependencies = [ - "blake2b_simd", - "blake2s_simd", - "blake3", - "core2", - "digest 0.10.7", - "multihash-derive 0.9.0", - "ripemd", - "sha-1 0.10.0", - "sha2 0.10.7", - "sha3", - "strobe-rs", -] - [[package]] name = "multihash-derive" version = "0.8.0" @@ -4978,31 +5465,6 @@ dependencies = [ "synstructure", ] -[[package]] -name = "multihash-derive" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "890e72cb7396cb99ed98c1246a97b243cc16394470d94e0bc8b0c2c11d84290e" -dependencies = [ - "core2", - "multihash 0.19.0", - "multihash-derive-impl", -] - -[[package]] -name = "multihash-derive-impl" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d38685e08adb338659871ecfc6ee47ba9b22dcc8abcf6975d379cc49145c3040" -dependencies = [ - "proc-macro-crate", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", - "synstructure", -] - [[package]] name = "multimap" version = "0.8.3" @@ -5011,9 +5473,9 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "multistream-select" -version = "0.13.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea0df8e5eec2298a62b326ee4f0d7fe1a6b90a09dfcf9df37b38f947a8c42f19" +checksum = "c8552ab875c1313b97b8d20cb857b9fd63e2d1d6a0a1b53ce9821e575405f27a" dependencies = [ "bytes", "futures", @@ -5025,9 +5487,9 @@ dependencies = [ [[package]] name = "nalgebra" -version = "0.32.1" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6515c882ebfddccaa73ead7320ca28036c4bc84c9bcca3cc0cbba8efe89223a" +checksum = "307ed9b18cc2423f29e83f84fd23a8e73628727990181f18641a8b5dc2ab1caa" dependencies = [ "approx", "matrixmultiply", @@ -5041,9 +5503,9 @@ dependencies = [ [[package]] name = "nalgebra-macros" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d232c68884c0c99810a5a4d333ef7e47689cfd0edc85efc9e54e1e6bf5212766" +checksum = "91761aed67d03ad966ef783ae962ef9bbaca728d2dd7ceb7939ec110fffad998" dependencies = [ "proc-macro2", "quote", @@ -5056,13 +5518,22 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7d66043b25d4a6cccb23619d10c19c25304b355a7dccd4a8e11423dd2382146" dependencies = [ - "clap 3.2.23", "rand 0.8.5", ] [[package]] -name = "nanorand" -version = "0.7.0" +name = "names" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bddcd3bf5144b6392de80e04c347cd7fab2508f6df16a85fc496ecd5cec39bc" +dependencies = [ + "clap 3.2.25", + "rand 0.8.5", +] + +[[package]] +name = "nanorand" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" @@ -5085,7 +5556,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9ea4302b9759a7a88242299225ea3688e63c85ea136371bb6cf94fd674efaab" dependencies = [ "anyhow", - "bitflags", + "bitflags 1.3.2", "byteorder", "libc", "netlink-packet-core", @@ -5094,9 +5565,9 @@ dependencies = [ [[package]] name = "netlink-packet-utils" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25af9cf0dc55498b7bd94a1508af7a78706aa0ab715a73c5169273e03c84845e" +checksum = "0ede8a08c71ad5a95cdd0e4e52facd37190977039a4704eb82a283f713747d34" dependencies = [ "anyhow", "byteorder", @@ -5121,9 +5592,9 @@ dependencies = [ [[package]] name = "netlink-sys" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92b654097027250401127914afb37cb1f311df6610a9891ff07a757e94199027" +checksum = "6471bf08e7ac0135876a9581bf3217ef0333c191c128d34878079f42ee150411" dependencies = [ "bytes", "futures", @@ -5134,13 +5605,14 @@ dependencies = [ [[package]] name = "nix" -version = "0.24.1" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f17df307904acd05aa8e32e97bb20f2a0df1728bbc2d771ae8f9a90463441e9" +checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if", "libc", + "memoffset 0.6.5", ] [[package]] @@ -5149,7 +5621,7 @@ version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if", "libc", "memoffset 0.7.1", @@ -5165,13 +5637,12 @@ checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" [[package]] name = "nom" -version = "7.1.0" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1d11e1ef389c76fe5b81bcaf2ea32cf88b62bc494e19f493d0b30e7a930109" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ "memchr", "minimal-lexical", - "version_check", ] [[package]] @@ -5180,6 +5651,16 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + [[package]] name = "num-bigint" version = "0.4.3" @@ -5193,18 +5674,18 @@ dependencies = [ [[package]] name = "num-complex" -version = "0.4.0" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26873667bbbb7c5182d4a37c1add32cdf09f841af72da53318fdb81543c15085" +checksum = "02e0d21255c828d6f128a1e41534206671e8c3ea0c62f32291e808dc82cff17d" dependencies = [ "num-traits", ] [[package]] name = "num-format" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54b862ff8df690cf089058c98b183676a7ed0f974cc08b426800093227cbff3b" +checksum = "a652d9771a63711fd3c3deb670acfbe5c30a4072e664d7a3bf5a9e1056ac72c3" dependencies = [ "arrayvec 0.7.4", "itoa", @@ -5212,9 +5693,9 @@ dependencies = [ [[package]] name = "num-integer" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ "autocfg", "num-traits", @@ -5234,20 +5715,20 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.14" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" dependencies = [ "autocfg", ] [[package]] name = "num_cpus" -version = "1.13.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.1.19", + "hermit-abi 0.3.2", "libc", ] @@ -5259,16 +5740,43 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.30.3" +version = "0.30.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" +checksum = "03b4680b86d9cfafba8fc491dc9b6df26b68cf40e9e6cd73909194759a63c385" dependencies = [ "crc32fast", "hashbrown 0.13.2", - "indexmap", + "indexmap 1.9.3", + "memchr", +] + +[[package]] +name = "object" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" +dependencies = [ "memchr", ] +[[package]] +name = "oid-registry" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38e20717fa0541f39bd146692035c37bedfa532b3e5071b35761082407546b2a" +dependencies = [ + "asn1-rs 0.3.1", +] + +[[package]] +name = "oid-registry" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" +dependencies = [ + "asn1-rs 0.5.2", +] + [[package]] name = "once_cell" version = "1.18.0" @@ -5295,9 +5803,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl-probe" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "orchestra" @@ -5342,24 +5850,43 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.0.0" +version = "6.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" +checksum = "4d5d9eb14b174ee9aa2ef96dc2b94637a2d4b6e7cb873c7e171f0c20c6cf3eac" [[package]] -name = "output_vt100" -version = "0.1.2" +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "owo-colors" +version = "3.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" + +[[package]] +name = "p256" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53cdc5b785b7a58c5aad8216b3dfa114df64b0b06ae6e1501cef91df2fbdf8f9" +checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" dependencies = [ - "winapi", + "ecdsa 0.14.8", + "elliptic-curve 0.12.3", + "sha2 0.10.7", ] [[package]] -name = "owo-colors" -version = "3.2.0" +name = "p384" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20448fd678ec04e6ea15bbe0476874af65e98a01515d667aa49f1434dc44ebf4" +checksum = "dfc8c5bf642dde52bb9e87c0ecd8ca5a76faac2eeed98dedb7c717997e1080aa" +dependencies = [ + "ecdsa 0.14.8", + "elliptic-curve 0.12.3", + "sha2 0.10.7", +] [[package]] name = "packed_simd_2" @@ -5374,7 +5901,7 @@ dependencies = [ [[package]] name = "pallet-assets" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5389,7 +5916,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-support", "frame-system", @@ -5405,7 +5932,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-support", "frame-system", @@ -5419,7 +5946,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5443,7 +5970,7 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "aquamarine", "docify", @@ -5465,7 +5992,7 @@ dependencies = [ [[package]] name = "pallet-bags-list-remote-tests" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-election-provider-support", "frame-remote-externalities", @@ -5484,7 +6011,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5499,7 +6026,7 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-support", "frame-system", @@ -5518,7 +6045,7 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "array-bytes", "binary-merkle-tree", @@ -5542,7 +6069,7 @@ dependencies = [ [[package]] name = "pallet-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5560,7 +6087,7 @@ dependencies = [ [[package]] name = "pallet-child-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5579,7 +6106,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5596,7 +6123,7 @@ dependencies = [ [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "assert_matches", "frame-benchmarking", @@ -5613,7 +6140,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5631,7 +6158,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5654,7 +6181,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-support-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5667,7 +6194,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5686,7 +6213,7 @@ dependencies = [ [[package]] name = "pallet-fast-unstake" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "docify", "frame-benchmarking", @@ -5705,7 +6232,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5728,7 +6255,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "enumflags2", "frame-benchmarking", @@ -5744,7 +6271,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5764,7 +6291,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5781,7 +6308,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5798,7 +6325,7 @@ dependencies = [ [[package]] name = "pallet-message-queue" version = "7.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5817,7 +6344,7 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5834,7 +6361,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5850,7 +6377,7 @@ dependencies = [ [[package]] name = "pallet-nis" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5866,7 +6393,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-support", "frame-system", @@ -5885,7 +6412,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-benchmarking" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5905,7 +6432,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-runtime-api" version = "1.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "pallet-nomination-pools", "parity-scale-codec", @@ -5916,7 +6443,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-support", "frame-system", @@ -5933,7 +6460,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5957,7 +6484,7 @@ dependencies = [ [[package]] name = "pallet-preimage" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5974,7 +6501,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -5989,7 +6516,7 @@ dependencies = [ [[package]] name = "pallet-ranked-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6007,7 +6534,7 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6022,7 +6549,7 @@ dependencies = [ [[package]] name = "pallet-referenda" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "assert_matches", "frame-benchmarking", @@ -6041,7 +6568,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "docify", "frame-benchmarking", @@ -6059,7 +6586,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-support", "frame-system", @@ -6080,7 +6607,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6096,7 +6623,7 @@ dependencies = [ [[package]] name = "pallet-society" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6114,7 +6641,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6137,18 +6664,18 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] name = "pallet-staking-reward-fn" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "log", "sp-arithmetic", @@ -6157,7 +6684,7 @@ dependencies = [ [[package]] name = "pallet-staking-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "parity-scale-codec", "sp-api", @@ -6166,7 +6693,7 @@ dependencies = [ [[package]] name = "pallet-state-trie-migration" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6183,7 +6710,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6198,7 +6725,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6216,7 +6743,7 @@ dependencies = [ [[package]] name = "pallet-tips" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6235,7 +6762,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-support", "frame-system", @@ -6251,7 +6778,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -6267,7 +6794,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -6279,7 +6806,7 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6296,7 +6823,7 @@ dependencies = [ [[package]] name = "pallet-uniques" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6311,7 +6838,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6327,7 +6854,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6342,7 +6869,7 @@ dependencies = [ [[package]] name = "pallet-whitelist" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-benchmarking", "frame-support", @@ -6405,9 +6932,9 @@ dependencies = [ [[package]] name = "parity-db" -version = "0.4.8" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4890dcb9556136a4ec2b0c51fa4a08c8b733b829506af8fff2e853f3a065985b" +checksum = "78f19d20a0d2cc52327a88d131fa1c4ea81ea4a04714aedcfeca2dd410049cf8" dependencies = [ "blake2", "crc32fast", @@ -6425,9 +6952,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.1" +version = "3.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2287753623c76f953acd29d15d8100bcab84d29db78fb6f352adb3c53e83b967" +checksum = "dd8e946cc0cc711189c0b0249fb8b599cbeeab9784d83c415719368bb8d4ac64" dependencies = [ "arrayvec 0.7.4", "bitvec", @@ -6440,9 +6967,9 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.1" +version = "3.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b6937b5e67bfba3351b87b040d48352a2fcb6ad72f81855412ce97b45c8f110" +checksum = "2a296c3079b5fefbc499e1de58dc26c09b1b9a5952d26694ee89f04a43ebbb3e" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -6450,6 +6977,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "parity-send-wrapper" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f" + [[package]] name = "parity-wasm" version = "0.45.0" @@ -6458,9 +6991,9 @@ checksum = "e1ad0aff30c1da14b1254fcb2af73e1fa9a28670e584a626f53a369d0e157304" [[package]] name = "parking" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" +checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" [[package]] name = "parking_lot" @@ -6470,7 +7003,7 @@ checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ "instant", "lock_api", - "parking_lot_core 0.8.5", + "parking_lot_core 0.8.6", ] [[package]] @@ -6480,34 +7013,34 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.6", + "parking_lot_core 0.9.8", ] [[package]] name = "parking_lot_core" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" +checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" dependencies = [ "cfg-if", "instant", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", "winapi", ] [[package]] name = "parking_lot_core" -version = "0.9.6" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba1ef8814b5c993410bb3adfad7a5ed269563e4a2f90c41f5d85be7fb47133bf" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.3.5", "smallvec", - "windows-sys 0.42.0", + "windows-targets 0.48.1", ] [[package]] @@ -6518,9 +7051,9 @@ checksum = "7924d1d0ad836f665c9065e26d016c673ece3993f30d340068b16f282afc1156" [[package]] name = "paste" -version = "1.0.7" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c520e05135d6e763148b6426a837e239041653ba7becd2e538c076c738025fc" +checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" [[package]] name = "pbkdf2" @@ -6546,26 +7079,45 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" +[[package]] +name = "pem" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" +dependencies = [ + "base64 0.13.1", +] + +[[package]] +name = "pem-rfc7468" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d159833a9105500e0398934e205e0773f0b27529557134ecfc51c27646adac" +dependencies = [ + "base64ct", +] + [[package]] name = "percent-encoding" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" -version = "2.1.3" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53" +checksum = "1acb4a4365a13f749a93f1a094a7805e5cfa0955373a9de860d962eaa3a5fe5a" dependencies = [ + "thiserror", "ucd-trie", ] [[package]] name = "pest_derive" -version = "2.1.0" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "833d1ae558dc601e9a60366421196a8d94bc0ac980476d0b67e1d0988d72b2d0" +checksum = "666d00490d4ac815001da55838c500eafb0320019bbaa44444137c48b443a853" dependencies = [ "pest", "pest_generator", @@ -6573,36 +7125,36 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.1.3" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99b8db626e31e5b81787b9783425769681b347011cc59471e33ea46d2ea0cf55" +checksum = "68ca01446f50dbda87c1786af8770d535423fa8a53aec03b8f4e3d7eb10e0929" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.28", ] [[package]] name = "pest_meta" -version = "2.1.3" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54be6e404f5317079812fc8f9f5279de376d8856929e21c184ecf6bbd692a11d" +checksum = "56af0a30af74d0445c0bf6d9d051c979b516a1a5af790d251daee76005420a48" dependencies = [ - "maplit", + "once_cell", "pest", - "sha-1 0.8.2", + "sha2 0.10.7", ] [[package]] name = "petgraph" -version = "0.6.0" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a13a2fa9d0b63e5f22328828741e523766fff0ee9e779316902290dff3f824f" +checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" dependencies = [ "fixedbitset", - "indexmap", + "indexmap 1.9.3", ] [[package]] @@ -6622,7 +7174,7 @@ checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] @@ -6633,9 +7185,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" +checksum = "4c40d25201921e5ff0c862a505c6557ea88568a4e3ace775ab55e93f2f4f9d57" [[package]] name = "pin-utils" @@ -6643,21 +7195,31 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkcs8" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" +dependencies = [ + "der 0.6.1", + "spki 0.6.0", +] + [[package]] name = "pkcs8" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der", - "spki", + "der 0.7.7", + "spki 0.7.2", ] [[package]] name = "pkg-config" -version = "0.3.22" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12295df4f294471248581bc09bef3c38a5e46f1e36d6a37353621a0c6c357e1f" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "platforms" @@ -6691,7 +7253,7 @@ name = "polkadot-approval-distribution" version = "0.9.43" dependencies = [ "assert_matches", - "env_logger 0.9.0", + "env_logger 0.9.3", "futures", "futures-timer", "log", @@ -6719,7 +7281,7 @@ version = "0.9.43" dependencies = [ "assert_matches", "bitvec", - "env_logger 0.9.0", + "env_logger 0.9.3", "futures", "futures-timer", "log", @@ -6773,7 +7335,7 @@ name = "polkadot-availability-recovery" version = "0.9.43" dependencies = [ "assert_matches", - "env_logger 0.9.0", + "env_logger 0.9.3", "fatality", "futures", "futures-timer", @@ -6801,7 +7363,7 @@ dependencies = [ name = "polkadot-cli" version = "0.9.43" dependencies = [ - "clap 4.2.5", + "clap 4.3.19", "frame-benchmarking-cli", "futures", "log", @@ -6832,7 +7394,7 @@ dependencies = [ "always-assert", "assert_matches", "bitvec", - "env_logger 0.9.0", + "env_logger 0.9.3", "fatality", "futures", "futures-timer", @@ -6876,7 +7438,7 @@ dependencies = [ "fatality", "futures", "futures-timer", - "indexmap", + "indexmap 1.9.3", "lazy_static", "lru 0.11.0", "parity-scale-codec", @@ -7033,7 +7595,7 @@ version = "0.9.43" dependencies = [ "assert_matches", "bitvec", - "env_logger 0.9.0", + "env_logger 0.9.3", "futures", "futures-timer", "kvdb", @@ -7394,7 +7956,7 @@ name = "polkadot-node-metrics" version = "0.9.43" dependencies = [ "assert_cmd", - "bs58 0.4.0", + "bs58", "futures", "futures-timer", "hyper", @@ -7517,7 +8079,7 @@ dependencies = [ "assert_matches", "async-trait", "derive_more", - "env_logger 0.9.0", + "env_logger 0.9.3", "fatality", "futures", "futures-channel", @@ -7597,7 +8159,7 @@ dependencies = [ name = "polkadot-performance-test" version = "0.9.43" dependencies = [ - "env_logger 0.9.0", + "env_logger 0.9.3", "kusama-runtime", "log", "polkadot-erasure-coding", @@ -7849,7 +8411,7 @@ dependencies = [ name = "polkadot-runtime-metrics" version = "0.9.43" dependencies = [ - "bs58 0.4.0", + "bs58", "frame-benchmarking", "parity-scale-codec", "polkadot-primitives", @@ -7862,7 +8424,7 @@ name = "polkadot-runtime-parachains" version = "0.9.43" dependencies = [ "assert_matches", - "bitflags", + "bitflags 1.3.2", "bitvec", "derive_more", "frame-benchmarking", @@ -7917,7 +8479,7 @@ version = "0.9.43" dependencies = [ "assert_matches", "async-trait", - "env_logger 0.9.0", + "env_logger 0.9.3", "frame-benchmarking", "frame-benchmarking-cli", "frame-support", @@ -8048,7 +8610,7 @@ dependencies = [ "fatality", "futures", "futures-timer", - "indexmap", + "indexmap 1.9.3", "parity-scale-codec", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -8114,7 +8676,7 @@ version = "0.9.43" dependencies = [ "assert_matches", "async-trait", - "clap 4.2.5", + "clap 4.3.19", "color-eyre", "futures", "futures-timer", @@ -8260,7 +8822,7 @@ dependencies = [ name = "polkadot-voter-bags" version = "0.9.43" dependencies = [ - "clap 4.2.5", + "clap 4.3.19", "generate-bags", "kusama-runtime", "polkadot-runtime", @@ -8270,15 +8832,18 @@ dependencies = [ [[package]] name = "polling" -version = "2.2.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "685404d509889fade3e86fe3a5803bca2ec09b0c0778d5ada6ec8bf7a8de5259" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" dependencies = [ + "autocfg", + "bitflags 1.3.2", "cfg-if", + "concurrent-queue", "libc", "log", - "wepoll-ffi", - "winapi", + "pin-project-lite 0.2.10", + "windows-sys 0.48.0", ] [[package]] @@ -8318,22 +8883,22 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "0.3.19" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26f6a7b87c2e435a3241addceeeff740ff8b7e76b74c13bf9acb17fa454ea00b" +checksum = "f32154ba0af3a075eefa1eda8bb414ee928f62303a54ea85b8d6638ff1a6ee9e" [[package]] name = "pprof" -version = "0.10.1" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6472bfed9475542ac46c518734a8d06d71b0f6cb2c17f904aa301711a57786f" +checksum = "196ded5d4be535690899a4631cc9f18cdc41b7ebf24a79400f46f48e49a11059" dependencies = [ "backtrace", "cfg-if", "findshlibs", "libc", "log", - "nix 0.24.1", + "nix 0.26.2", "once_cell", "parking_lot 0.12.1", "smallvec", @@ -8350,9 +8915,9 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "predicates" -version = "2.1.0" +version = "2.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95e5a7689e456ab905c22c2b48225bb921aba7c8dfa58440d68ba13f6222a715" +checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" dependencies = [ "difflib", "float-cmp", @@ -8362,17 +8927,29 @@ dependencies = [ "regex", ] +[[package]] +name = "predicates" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09963355b9f467184c04017ced4a2ba2d75cbcb4e7462690d388233253d4b1a9" +dependencies = [ + "anstyle", + "difflib", + "itertools", + "predicates-core", +] + [[package]] name = "predicates-core" -version = "1.0.2" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57e35a3326b75e49aa85f5dc6ec15b41108cf5aee58eabb1f274dd18b73c2451" +checksum = "b794032607612e7abeb4db69adb4e33590fa6cf1149e95fd7cb00e634b92f174" [[package]] name = "predicates-tree" -version = "1.0.4" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "338c7be2905b732ae3984a2f40032b5e94fd8f52505b186c7d4d68d193445df7" +checksum = "368ba315fb8c5052ab692e68a0eefec6ec57b23a36959c14496f0b0df2c0cecf" dependencies = [ "predicates-core", "termtree", @@ -8380,24 +8957,32 @@ dependencies = [ [[package]] name = "pretty_assertions" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a25e9bcb20aa780fd0bb16b72403a9064d6b3f22f026946029acb941a50af755" +checksum = "af7cee1a6c8a5b9208b3cb1061f10c0cb689087b3d8ce85fb9d2dd7a29b6ba66" dependencies = [ - "ctor", "diff", - "output_vt100", "yansi", ] [[package]] name = "prettyplease" -version = "0.2.4" +version = "0.1.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" +dependencies = [ + "proc-macro2", + "syn 1.0.109", +] + +[[package]] +name = "prettyplease" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" +checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" dependencies = [ "proc-macro2", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] @@ -8477,37 +9062,37 @@ checksum = "70550716265d1ec349c41f70dd4f964b4fd88394efe4405f0c1da679c4799a07" dependencies = [ "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] name = "proc-macro2" -version = "1.0.60" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dec2b086b7a862cf4de201096214fa870344cf922b2b30c167badb3af3195406" +checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" dependencies = [ "unicode-ident", ] [[package]] name = "prometheus" -version = "0.13.0" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7f64969ffd5dd8f39bd57a68ac53c163a095ed9d0fb707146da1b27025a3504" +checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c" dependencies = [ "cfg-if", "fnv", "lazy_static", "memchr", - "parking_lot 0.11.2", + "parking_lot 0.12.1", "thiserror", ] [[package]] name = "prometheus-client" -version = "0.21.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c99afa9a01501019ac3a14d71d9f94050346f55ca471ce90c799a15c58f61e2" +checksum = "5d6fa99d535dd930d1249e6c79cb3c2915f9172a540fe2b02a4c8f9ca954721e" dependencies = [ "dtoa", "itoa", @@ -8528,40 +9113,31 @@ dependencies = [ [[package]] name = "prometheus-parse" -version = "0.2.2" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c996f3caea1c51aa034c0d2dfd8447a12c555f4567b02677ef8a865ac4cce712" +checksum = "0c2aa5feb83bf4b2c8919eaf563f51dbab41183de73ba2353c0e03cd7b6bd892" dependencies = [ "chrono", - "lazy_static", + "itertools", + "once_cell", "regex", ] [[package]] name = "prost" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71adf41db68aa0daaefc69bb30bcd68ded9b9abaad5d1fbb6304c4fb390e083e" -dependencies = [ - "bytes", - "prost-derive 0.10.1", -] - -[[package]] -name = "prost" -version = "0.11.0" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "399c3c31cdec40583bb68f0b18403400d01ec4289c383aa047560439952c4dd7" +checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" dependencies = [ "bytes", - "prost-derive 0.11.0", + "prost-derive", ] [[package]] name = "prost-build" -version = "0.11.1" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f835c582e6bd972ba8347313300219fed5bfa52caf175298d860b61ff6069bb" +checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", "heck", @@ -8570,31 +9146,20 @@ dependencies = [ "log", "multimap", "petgraph", - "prost 0.11.0", + "prettyplease 0.1.25", + "prost", "prost-types", "regex", + "syn 1.0.109", "tempfile", "which", ] [[package]] name = "prost-derive" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b670f45da57fb8542ebdbb6105a925fe571b67f9e7ed9f47a06a84e72b4e7cc" -dependencies = [ - "anyhow", - "itertools", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "prost-derive" -version = "0.11.0" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7345d5f0e08c0536d7ac7229952590239e77abf0a0100a1b1d890add6ea96364" +checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools", @@ -8605,35 +9170,34 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.11.1" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dfaa718ad76a44b3415e6c4d53b17c8f99160dcb3a99b10470fce8ad43f6e3e" +checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" dependencies = [ - "bytes", - "prost 0.11.0", + "prost", ] [[package]] name = "psm" -version = "0.1.16" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd136ff4382c4753fc061cb9e4712ab2af263376b95bbd5bd8cd50c020b78e69" +checksum = "5787f7cda34e3033a72192c018bc5883100330f362ef279a8cbccfce8bb4e874" dependencies = [ "cc", ] [[package]] name = "pyroscope" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6636d352280fb587c8716f10e1d61fe88cb002660e0a8b0d3e47de17f3b5aaed" +checksum = "91ce54d81c50f7fd6442ee671597f661a068ccebd82ed1557775b6791b14aba7" dependencies = [ "json", "libc", "libflate", "log", - "names", - "prost 0.10.4", + "names 0.14.0", + "prost", "reqwest", "thiserror", "url", @@ -8642,9 +9206,9 @@ dependencies = [ [[package]] name = "pyroscope_pprofrs" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14e699bf3e7da41b3a7573d5944d77b1bd96a187aa72f5fa96afb4ed5609cc45" +checksum = "57add45daa57783490913a5d3d88e3249126971b61ac97ee0c7bac293ef0114a" dependencies = [ "log", "pprof", @@ -8658,12 +9222,6 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" -[[package]] -name = "quick-error" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" - [[package]] name = "quick-protobuf" version = "0.8.1" @@ -8675,9 +9233,9 @@ dependencies = [ [[package]] name = "quick-protobuf-codec" -version = "0.2.0" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ededb1cd78531627244d51dd0c7139fbe736c7d57af0092a76f0ffb2f56e98" +checksum = "1693116345026436eb2f10b677806169c1a1260c1c60eaaffe3fb5a29ae23d8b" dependencies = [ "asynchronous-codec", "bytes", @@ -8697,11 +9255,29 @@ dependencies = [ "pin-project-lite 0.1.12", ] +[[package]] +name = "quinn-proto" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31999cfc7927c4e212e60fd50934ab40e8e8bfd2d493d6095d2d306bc0764d9" +dependencies = [ + "bytes", + "rand 0.8.5", + "ring 0.16.20", + "rustc-hash", + "rustls 0.20.8", + "slab", + "thiserror", + "tinyvec", + "tracing", + "webpki 0.22.0", +] + [[package]] name = "quote" -version = "1.0.28" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" +checksum = "50f3b39ccfb720540debaa0164757101c08ecb8d326b15358ce76a62c7e85965" dependencies = [ "proc-macro2", ] @@ -8771,7 +9347,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.10", ] [[package]] @@ -8800,46 +9376,78 @@ checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" [[package]] name = "rayon" -version = "1.5.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90" +checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" dependencies = [ - "autocfg", - "crossbeam-deque", "either", "rayon-core", ] [[package]] name = "rayon-core" -version = "1.9.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", - "lazy_static", "num_cpus", ] +[[package]] +name = "rcgen" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6413f3de1edee53342e6138e75b56d32e7bc6e332b3bd62d497b1929d4cfbcdd" +dependencies = [ + "pem", + "ring 0.16.20", + "time 0.3.25", + "x509-parser 0.13.2", + "yasna", +] + +[[package]] +name = "rcgen" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" +dependencies = [ + "pem", + "ring 0.16.20", + "time 0.3.25", + "yasna", +] + [[package]] name = "redox_syscall" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ - "bitflags", + "bitflags 1.3.2", +] + +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags 1.3.2", ] [[package]] name = "redox_users" -version = "0.4.0" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" +checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.8", - "redox_syscall", + "getrandom 0.2.10", + "redox_syscall 0.2.16", + "thiserror", ] [[package]] @@ -8857,22 +9465,22 @@ dependencies = [ [[package]] name = "ref-cast" -version = "1.0.6" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" +checksum = "61ef7e18e8841942ddb1cf845054f8008410030a3997875d9e49b7a363063df1" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.6" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" +checksum = "2dfaf0c85b766276c797f3791f5bc6d5bd116b41d53049af2789666b0c0bc9fa" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.28", ] [[package]] @@ -8889,13 +9497,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.6.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" +checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-automata 0.3.4", + "regex-syntax 0.7.4", ] [[package]] @@ -8904,20 +9513,37 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ - "regex-syntax", + "regex-syntax 0.6.29", ] +[[package]] +name = "regex-automata" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7b6d6190b7594385f61bd3911cd1be99dfddcfc365a4160cc2ab5bff4aed294" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.7.4", +] + +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + [[package]] name = "regex-syntax" -version = "0.6.27" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" +checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" [[package]] name = "remote-ext-tests-bags-list" version = "0.9.43" dependencies = [ - "clap 4.2.5", + "clap 4.3.19", "frame-system", "kusama-runtime", "kusama-runtime-constants", @@ -8933,21 +9559,12 @@ dependencies = [ ] [[package]] -name = "remove_dir_all" -version = "0.5.3" +name = "reqwest" +version = "0.11.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" dependencies = [ - "winapi", -] - -[[package]] -name = "reqwest" -version = "0.11.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13293b639a097af28fc8a90f22add145a9c954e49d77da06263d58cf44d5fb91" -dependencies = [ - "base64 0.21.0", + "base64 0.21.2", "bytes", "encoding_rs", "futures-core", @@ -8956,27 +9573,27 @@ dependencies = [ "http", "http-body", "hyper", - "hyper-rustls 0.23.0", + "hyper-rustls 0.24.1", "ipnet", "js-sys", "log", "mime", "once_cell", "percent-encoding", - "pin-project-lite 0.2.9", - "rustls 0.20.7", - "rustls-pemfile 1.0.2", + "pin-project-lite 0.2.10", + "rustls 0.21.6", + "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls 0.23.2", + "tokio-rustls 0.24.1", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots 0.22.2", + "webpki-roots", "winreg 0.10.1", ] @@ -8987,7 +9604,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" dependencies = [ "hostname", - "quick-error 1.2.3", + "quick-error", +] + +[[package]] +name = "rfc6979" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" +dependencies = [ + "crypto-bigint 0.4.9", + "hmac 0.12.1", + "zeroize", ] [[package]] @@ -9030,15 +9658,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "ripemd" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" -dependencies = [ - "digest 0.10.7", -] - [[package]] name = "rle-decode-fast" version = "1.0.3" @@ -9164,14 +9783,26 @@ dependencies = [ [[package]] name = "rpassword" -version = "7.0.0" +version = "7.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b763cb66df1c928432cc35053f8bd4cec3335d8559fc16010017d16b3c1680" +checksum = "6678cf63ab3491898c0d021b493c94c9b221d91295294a2a5746eacbe5928322" dependencies = [ "libc", + "rtoolbox", "winapi", ] +[[package]] +name = "rtcp" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1919efd6d4a6a85d13388f9487549bb8e359f17198cc03ffd72f79b553873691" +dependencies = [ + "bytes", + "thiserror", + "webrtc-util", +] + [[package]] name = "rtnetlink" version = "0.10.1" @@ -9182,16 +9813,40 @@ dependencies = [ "log", "netlink-packet-route", "netlink-proto", - "nix 0.24.1", + "nix 0.24.3", "thiserror", "tokio", ] +[[package]] +name = "rtoolbox" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "034e22c514f5c0cb8a10ff341b9b048b5ceb21591f31c8f44c43b960f9b3524a" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "rtp" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2a095411ff00eed7b12e4c6a118ba984d113e1079582570d56a5ee723f11f80" +dependencies = [ + "async-trait", + "bytes", + "rand 0.8.5", + "serde", + "thiserror", + "webrtc-util", +] + [[package]] name = "rustc-demangle" -version = "0.1.21" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc-hash" @@ -9211,110 +9866,122 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.16", + "semver 1.0.18", +] + +[[package]] +name = "rusticata-macros" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +dependencies = [ + "nom", ] [[package]] name = "rustix" -version = "0.35.13" +version = "0.36.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "727a1a6d65f786ec22df8a81ca3121107f235970dc1705ed681d3e6e8b9cd5f9" +checksum = "c37f1bd5ef1b5422177b7646cba67430579cfe2ace80f284fee876bca52ad941" dependencies = [ - "bitflags", - "errno 0.2.8", - "io-lifetimes 0.7.5", + "bitflags 1.3.2", + "errno", + "io-lifetimes", "libc", - "linux-raw-sys 0.0.46", - "windows-sys 0.42.0", + "linux-raw-sys 0.1.4", + "windows-sys 0.45.0", ] [[package]] name = "rustix" -version = "0.36.7" +version = "0.37.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fdebc4b395b7fbb9ab11e462e20ed9051e7b16e42d24042c776eca0ac81b03" +checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" dependencies = [ - "bitflags", - "errno 0.2.8", - "io-lifetimes 1.0.10", + "bitflags 1.3.2", + "errno", + "io-lifetimes", "libc", - "linux-raw-sys 0.1.4", - "windows-sys 0.42.0", + "linux-raw-sys 0.3.8", + "windows-sys 0.48.0", ] [[package]] name = "rustix" -version = "0.37.18" +version = "0.38.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bbfc1d1c7c40c01715f47d71444744a81669ca84e8b63e25a55e169b1f86433" +checksum = "1ee020b1716f0a80e2ace9b03441a749e402e86712f15f16fe8a8f75afac732f" dependencies = [ - "bitflags", - "errno 0.3.1", - "io-lifetimes 1.0.10", + "bitflags 2.3.3", + "errno", "libc", - "linux-raw-sys 0.3.6", + "linux-raw-sys 0.4.5", "windows-sys 0.48.0", ] [[package]] name = "rustls" -version = "0.20.7" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "539a2bfe908f471bfa933876bd1eb6a19cf2176d375f82ef7f99530a40e48c2c" +checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" dependencies = [ + "base64 0.13.1", "log", "ring 0.16.20", - "sct", - "webpki", + "sct 0.6.1", + "webpki 0.21.4", ] [[package]] name = "rustls" -version = "0.21.2" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e32ca28af694bc1bbf399c33a516dbdf1c90090b8ab23c2bc24f834aa2247f5f" +checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", "ring 0.16.20", - "rustls-webpki", - "sct", + "sct 0.7.0", + "webpki 0.22.0", ] [[package]] -name = "rustls-native-certs" -version = "0.6.1" +name = "rustls" +version = "0.21.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca9ebdfa27d3fc180e42879037b5338ab1c040c06affd00d8338598e7800943" +checksum = "1d1feddffcfcc0b33f5c6ce9a29e341e4cd59c3f78e7ee45f4a40c038b1d6cbb" dependencies = [ - "openssl-probe", - "rustls-pemfile 0.2.1", - "schannel", - "security-framework", + "log", + "ring 0.16.20", + "rustls-webpki", + "sct 0.7.0", ] [[package]] -name = "rustls-pemfile" -version = "0.2.1" +name = "rustls-native-certs" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" dependencies = [ - "base64 0.13.0", + "openssl-probe", + "rustls-pemfile", + "schannel", + "security-framework", ] [[package]] name = "rustls-pemfile" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" +checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.0", + "base64 0.21.2", ] [[package]] name = "rustls-webpki" -version = "0.100.1" +version = "0.101.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" +checksum = "513722fd73ad80a71f72b61009ea1b584bcfa1483ca93949c8f290298837fa59" dependencies = [ "ring 0.16.20", "untrusted", @@ -9322,15 +9989,15 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.6" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "rw-stream-sink" -version = "0.4.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8c9026ff5d2f23da5e45bbc283f156383001bfb09c4e44256d02c1a685fe9a1" +checksum = "26338f5e09bb721b85b135ea05af7767c90b52f6de4f087d4f4a3a9d64e7dc04" dependencies = [ "futures", "pin-project", @@ -9339,15 +10006,15 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.6" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c9613b5a66ab9ba26415184cfc41156594925a9cf3a2057e57f31ff145f6568" +checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" [[package]] name = "safe_arch" -version = "0.6.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "794821e4ccb0d9f979512f9c1973480123f9bd62a90d74ab0f9426fcf8f4a529" +checksum = "f398075ce1e6a179b46f51bd88d0598b92b00d3551f1a2d4ac49e771b56ac354" dependencies = [ "bytemuck", ] @@ -9364,7 +10031,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "log", "sp-core", @@ -9375,7 +10042,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "async-trait", "futures", @@ -9383,9 +10050,9 @@ dependencies = [ "ip_network", "libp2p", "log", - "multihash-codetable", + "multihash", "parity-scale-codec", - "prost 0.11.0", + "prost", "prost-build", "rand 0.8.5", "sc-client-api", @@ -9403,7 +10070,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "futures", "futures-timer", @@ -9426,7 +10093,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -9441,7 +10108,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "memmap2", "sc-chain-spec-derive", @@ -9460,27 +10127,27 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "array-bytes", "chrono", - "clap 4.2.5", + "clap 4.3.19", "fdlimit", "futures", "libp2p-identity", "log", - "names", + "names 0.13.0", "parity-scale-codec", "rand 0.8.5", "regex", @@ -9510,7 +10177,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "fnv", "futures", @@ -9536,7 +10203,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "hash-db", "kvdb", @@ -9562,7 +10229,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "async-trait", "futures", @@ -9587,7 +10254,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "async-trait", "fork-tree", @@ -9623,7 +10290,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "futures", "jsonrpsee", @@ -9645,7 +10312,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "array-bytes", "async-channel", @@ -9679,7 +10346,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "futures", "jsonrpsee", @@ -9698,7 +10365,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "fork-tree", "parity-scale-codec", @@ -9711,9 +10378,9 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ - "ahash 0.8.2", + "ahash 0.8.3", "array-bytes", "async-trait", "dyn-clone", @@ -9752,7 +10419,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "finality-grandpa", "futures", @@ -9772,7 +10439,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "async-trait", "futures", @@ -9795,7 +10462,7 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -9817,7 +10484,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "sc-allocator", "sp-maybe-compressed-blob", @@ -9829,13 +10496,13 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "anyhow", "cfg-if", "libc", "log", - "rustix 0.36.7", + "rustix 0.36.15", "sc-allocator", "sc-executor-common", "sp-runtime-interface", @@ -9846,7 +10513,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "ansi_term", "futures", @@ -9862,7 +10529,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "array-bytes", "parking_lot 0.12.1", @@ -9876,7 +10543,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "array-bytes", "async-channel", @@ -9889,7 +10556,6 @@ dependencies = [ "futures-timer", "ip_network", "libp2p", - "libp2p-kad", "linked_hash_set", "log", "mockall", @@ -9911,7 +10577,6 @@ dependencies = [ "substrate-prometheus-endpoint", "thiserror", "unsigned-varint", - "void", "wasm-timer", "zeroize", ] @@ -9919,14 +10584,14 @@ dependencies = [ [[package]] name = "sc-network-bitswap" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "async-channel", "cid", "futures", "libp2p-identity", "log", - "prost 0.11.0", + "prost", "prost-build", "sc-client-api", "sc-network", @@ -9939,10 +10604,10 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "async-trait", - "bitflags", + "bitflags 1.3.2", "futures", "libp2p-identity", "parity-scale-codec", @@ -9956,14 +10621,13 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ - "ahash 0.8.2", + "ahash 0.8.3", "futures", "futures-timer", - "libp2p-identity", + "libp2p", "log", - "multiaddr", "sc-network", "sc-network-common", "schnellru", @@ -9975,7 +10639,7 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "array-bytes", "async-channel", @@ -9983,7 +10647,7 @@ dependencies = [ "libp2p-identity", "log", "parity-scale-codec", - "prost 0.11.0", + "prost", "prost-build", "sc-client-api", "sc-network", @@ -9996,7 +10660,7 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "array-bytes", "async-channel", @@ -10008,7 +10672,7 @@ dependencies = [ "log", "mockall", "parity-scale-codec", - "prost 0.11.0", + "prost", "prost-build", "sc-client-api", "sc-consensus", @@ -10030,7 +10694,7 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "array-bytes", "futures", @@ -10048,7 +10712,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "array-bytes", "bytes", @@ -10056,7 +10720,7 @@ dependencies = [ "futures", "futures-timer", "hyper", - "hyper-rustls 0.24.0", + "hyper-rustls 0.24.1", "libp2p", "log", "num_cpus", @@ -10082,7 +10746,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -10091,7 +10755,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "futures", "jsonrpsee", @@ -10122,7 +10786,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -10141,7 +10805,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "http", "jsonrpsee", @@ -10156,7 +10820,7 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "array-bytes", "futures", @@ -10184,7 +10848,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "async-trait", "directories", @@ -10248,7 +10912,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "log", "parity-scale-codec", @@ -10259,9 +10923,9 @@ dependencies = [ [[package]] name = "sc-storage-monitor" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ - "clap 4.2.5", + "clap 4.3.19", "fs4", "log", "sc-client-db", @@ -10273,7 +10937,7 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -10292,7 +10956,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "6.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "futures", "libc", @@ -10311,7 +10975,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "chrono", "futures", @@ -10330,7 +10994,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "ansi_term", "atty", @@ -10359,18 +11023,18 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "async-trait", "futures", @@ -10396,7 +11060,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "async-trait", "futures", @@ -10412,7 +11076,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "async-channel", "futures", @@ -10426,9 +11090,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.5.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cfdffd972d76b22f3d7f81c8be34b2296afd3a25e0a547bd9abe340a4dbbe97" +checksum = "35c0a159d0c45c12b20c5a844feb1fe4bea86e28f17b92a5f0c42193634d3782" dependencies = [ "bitvec", "cfg-if", @@ -10440,9 +11104,9 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.5.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61fa974aea2d63dd18a4ec3a49d59af9f34178c73a4f56d2f18205628d00681e" +checksum = "912e55f6d20e0e80d63733872b40e1227c0bce1e1ab81ba67d696339bfd7fd29" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -10452,12 +11116,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.19" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ - "lazy_static", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -10466,7 +11129,7 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "772575a524feeb803e5b0fcbc6dd9f367e579488197c94c6e4023aad2305774d" dependencies = [ - "ahash 0.8.2", + "ahash 0.8.3", "cfg-if", "hashbrown 0.13.2", ] @@ -10491,15 +11154,25 @@ dependencies = [ [[package]] name = "scopeguard" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "scratch" -version = "1.0.2" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3cf7c11c38cb994f3d40e8a8cde3bbd1f72a435e4c49e85d6553d8312306152" + +[[package]] +name = "sct" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898" +checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" +dependencies = [ + "ring 0.16.20", + "untrusted", +] [[package]] name = "sct" @@ -10511,34 +11184,60 @@ dependencies = [ "untrusted", ] +[[package]] +name = "sdp" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d22a5ef407871893fd72b4562ee15e4742269b173959db4b8df6f538c414e13" +dependencies = [ + "rand 0.8.5", + "substring", + "thiserror", + "url", +] + [[package]] name = "sec1" -version = "0.7.1" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" +dependencies = [ + "base16ct 0.1.1", + "der 0.6.1", + "generic-array 0.14.7", + "pkcs8 0.9.0", + "subtle", + "zeroize", +] + +[[package]] +name = "sec1" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48518a2b5775ba8ca5b46596aae011caa431e6ce7e4a67ead66d92f08884220e" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ - "base16ct", - "der", - "generic-array 0.14.6", - "pkcs8", + "base16ct 0.2.0", + "der 0.7.7", + "generic-array 0.14.7", + "pkcs8 0.10.2", "subtle", "zeroize", ] [[package]] name = "secp256k1" -version = "0.24.2" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9512ffd81e3a3503ed401f79c33168b9148c75038956039166cd750eaa037c3" +checksum = "6b1629c9c557ef9b293568b338dddfc8208c98a18c59d722a9d53f859d9c9b62" dependencies = [ "secp256k1-sys", ] [[package]] name = "secp256k1-sys" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7058dc8eaf3f2810d7828680320acda0b25a288f6d288e19278e249bbf74226b" +checksum = "83080e2c2fc1006e625be82e5d1eb6a43b7fd9578b617fcc55814daf286bba4b" dependencies = [ "cc", ] @@ -10554,11 +11253,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.4.2" +version = "2.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525bc1abfda2e1998d152c45cf13e696f76d0a4972310b22fac1658b05df7c87" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" dependencies = [ - "bitflags", + "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", @@ -10567,9 +11266,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.4.2" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9dd14d83160b528b7bfd66439110573efcfbe281b17fc2ca9f39f550d619c7e" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" dependencies = [ "core-foundation-sys", "libc", @@ -10586,9 +11285,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.16" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" +checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" dependencies = [ "serde", ] @@ -10599,12 +11298,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" -[[package]] -name = "send_wrapper" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" - [[package]] name = "separator" version = "0.4.1" @@ -10613,38 +11306,38 @@ checksum = "f97841a747eef040fcd2e7b3b9a220a7205926e60488e673d9e4926d27772ce5" [[package]] name = "serde" -version = "1.0.164" +version = "1.0.180" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d" +checksum = "0ea67f183f058fe88a4e3ec6e2788e003840893b91bac4559cabedd00863b3ed" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.164" +version = "1.0.180" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" +checksum = "24e744d7782b686ab3b73267ef05697159cc0e5abbed3f47f9933165e5219036" dependencies = [ "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] name = "serde_fmt" -version = "1.0.1" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2963a69a2b3918c1dc75a45a18bd3fcd1120e31d3f59deb1b2f9b5d5ffb8baa4" +checksum = "e1d4ddca14104cd60529e8c7f7ba71a2c8acd8f7f5cfcdc2faf97eeb7c3010a4" dependencies = [ "serde", ] [[package]] name = "serde_json" -version = "1.0.96" +version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" +checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c" dependencies = [ "itoa", "ryu", @@ -10653,9 +11346,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.1" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0efd8caf556a6cebd3b285caf480045fcc1ac04f6bd786b09a6f11af30c4fcf4" +checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" dependencies = [ "serde", ] @@ -10694,19 +11387,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.20", -] - -[[package]] -name = "sha-1" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d94d0bede923b3cea61f3f1ff57ff8cdfd77b400fb8f9998949e0cf04163df" -dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", + "syn 2.0.28", ] [[package]] @@ -10724,9 +11405,20 @@ dependencies = [ [[package]] name = "sha-1" -version = "0.10.0" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.7", +] + +[[package]] +name = "sha1" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" +checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if", "cpufeatures", @@ -10747,9 +11439,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.9.8" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b69f9a4c9740d74c5baa3fd2e547f9525fa8088a8a958e0ca2409a514e33f5fa" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer 0.9.0", "cfg-if", @@ -10796,9 +11488,9 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook" -version = "0.3.14" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a253b5e89e2698464fc26b545c9edceb338e18a89effeeecfea192c3025be29d" +checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801" dependencies = [ "libc", "signal-hook-registry", @@ -10806,9 +11498,9 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] @@ -10830,6 +11522,10 @@ name = "signature" version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" +dependencies = [ + "digest 0.10.7", + "rand_core 0.6.4", +] [[package]] name = "signature" @@ -10843,9 +11539,9 @@ dependencies = [ [[package]] name = "simba" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50582927ed6f77e4ac020c057f37a268fc6aebc29225050365aacbb9deeeddc4" +checksum = "061507c94fc6ab4ba1c9a0305018408e312e17c041eb63bef8aa726fa33aceae" dependencies = [ "approx", "num-complex", @@ -10862,15 +11558,18 @@ checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de" [[package]] name = "slab" -version = "0.4.5" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" +dependencies = [ + "autocfg", +] [[package]] name = "slice-group-by" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03b634d87b960ab1a38c4fe143b508576f075e7c978bfad18217645ebfdfa2ec" +checksum = "826167069c09b99d56f31e9ae5c99049e932a98c9dc2dac47645b08dbbf76ba7" [[package]] name = "slot-range-helper" @@ -10947,8 +11646,9 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ - "base64 0.13.0", + "base64 0.13.1", "bytes", + "flate2", "futures", "http", "httparse", @@ -10960,7 +11660,7 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "hash-db", "log", @@ -10981,7 +11681,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "Inflector", "blake2", @@ -10989,13 +11689,13 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] name = "sp-application-crypto" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "parity-scale-codec", "scale-info", @@ -11008,7 +11708,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "16.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "integer-sqrt", "num-traits", @@ -11022,7 +11722,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "parity-scale-codec", "scale-info", @@ -11035,7 +11735,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "sp-api", "sp-inherents", @@ -11046,7 +11746,7 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "futures", "log", @@ -11064,7 +11764,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "async-trait", "futures", @@ -11079,7 +11779,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "async-trait", "parity-scale-codec", @@ -11096,7 +11796,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "async-trait", "parity-scale-codec", @@ -11115,7 +11815,7 @@ dependencies = [ [[package]] name = "sp-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "lazy_static", "parity-scale-codec", @@ -11134,7 +11834,7 @@ dependencies = [ [[package]] name = "sp-consensus-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "finality-grandpa", "log", @@ -11152,7 +11852,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "parity-scale-codec", "scale-info", @@ -11164,15 +11864,15 @@ dependencies = [ [[package]] name = "sp-core" version = "21.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "array-bytes", "arrayvec 0.7.4", "bandersnatch_vrfs", - "bitflags", + "bitflags 1.3.2", "blake2", "bounded-collections", - "bs58 0.4.0", + "bs58", "dyn-clonable", "ed25519-zebra", "futures", @@ -11211,7 +11911,7 @@ dependencies = [ [[package]] name = "sp-core-hashing" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "blake2b_simd", "byteorder", @@ -11224,17 +11924,17 @@ dependencies = [ [[package]] name = "sp-core-hashing-proc-macro" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "quote", "sp-core-hashing", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "kvdb", "parking_lot 0.12.1", @@ -11243,17 +11943,17 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] name = "sp-externalities" version = "0.19.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "environmental", "parity-scale-codec", @@ -11264,7 +11964,7 @@ dependencies = [ [[package]] name = "sp-genesis-builder" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "serde_json", "sp-api", @@ -11275,7 +11975,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -11289,7 +11989,7 @@ dependencies = [ [[package]] name = "sp-io" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "bytes", "ed25519", @@ -11314,7 +12014,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "lazy_static", "sp-core", @@ -11325,7 +12025,7 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.27.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -11337,16 +12037,16 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "thiserror", - "zstd 0.12.3+zstd.1.5.2", + "zstd 0.12.4", ] [[package]] name = "sp-metadata-ir" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-metadata", "parity-scale-codec", @@ -11357,7 +12057,7 @@ dependencies = [ [[package]] name = "sp-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "ckb-merkle-mountain-range", "log", @@ -11375,7 +12075,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "parity-scale-codec", "scale-info", @@ -11389,7 +12089,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "sp-api", "sp-core", @@ -11399,7 +12099,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "backtrace", "lazy_static", @@ -11409,7 +12109,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "rustc-hash", "serde", @@ -11419,7 +12119,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "either", "hash256-std-hasher", @@ -11441,7 +12141,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "17.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -11459,19 +12159,19 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "Inflector", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "parity-scale-codec", "scale-info", @@ -11486,7 +12186,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -11500,7 +12200,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.28.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "hash-db", "log", @@ -11521,7 +12221,7 @@ dependencies = [ [[package]] name = "sp-statement-store" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "aes-gcm 0.10.2", "curve25519-dalek 3.2.0", @@ -11545,12 +12245,12 @@ dependencies = [ [[package]] name = "sp-std" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" [[package]] name = "sp-storage" version = "13.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "impl-serde", "parity-scale-codec", @@ -11563,7 +12263,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "async-trait", "parity-scale-codec", @@ -11576,7 +12276,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "10.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "parity-scale-codec", "sp-std", @@ -11588,7 +12288,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "sp-api", "sp-runtime", @@ -11597,7 +12297,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "async-trait", "parity-scale-codec", @@ -11612,9 +12312,9 @@ dependencies = [ [[package]] name = "sp-trie" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ - "ahash 0.8.2", + "ahash 0.8.3", "hash-db", "hashbrown 0.13.2", "lazy_static", @@ -11635,7 +12335,7 @@ dependencies = [ [[package]] name = "sp-version" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "impl-serde", "parity-scale-codec", @@ -11652,18 +12352,18 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "parity-scale-codec", "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] name = "sp-wasm-interface" version = "14.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -11676,7 +12376,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "20.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "parity-scale-codec", "scale-info", @@ -11705,6 +12405,16 @@ dependencies = [ "strum", ] +[[package]] +name = "spki" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" +dependencies = [ + "base64ct", + "der 0.6.1", +] + [[package]] name = "spki" version = "0.7.2" @@ -11712,14 +12422,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" dependencies = [ "base64ct", - "der", + "der 0.7.7", ] [[package]] name = "ss58-registry" -version = "1.36.0" +version = "1.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d92659e7d18d82b803824a9ba5a6022cff101c3491d027c1c1d8d30e749284" +checksum = "bfc443bad666016e012538782d9e3006213a7db43e9fb1dda91657dc06a6fa08" dependencies = [ "Inflector", "num-format", @@ -11741,7 +12451,7 @@ name = "staking-miner" version = "0.9.43" dependencies = [ "assert_cmd", - "clap 4.2.5", + "clap 4.3.19", "exitcode", "frame-election-provider-support", "frame-remote-externalities", @@ -11773,7 +12483,7 @@ dependencies = [ "sub-tokens", "thiserror", "tokio", - "tracing-subscriber 0.3.11", + "tracing-subscriber 0.3.17", "westend-runtime", ] @@ -11801,11 +12511,11 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a2a1c578e98c1c16fc3b8ec1328f7659a500737d7a0c6d625e73e830ff9c1f6" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg_aliases", "libc", "parking_lot 0.11.2", - "parking_lot_core 0.8.5", + "parking_lot_core 0.8.6", "static_init_macro 1.0.2", "winapi", ] @@ -11836,19 +12546,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "strobe-rs" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fabb238a1cccccfa4c4fb703670c0d157e1256c1ba695abf1b93bd2bb14bab2d" -dependencies = [ - "bitflags", - "byteorder", - "keccak", - "subtle", - "zeroize", -] - [[package]] name = "strsim" version = "0.10.0" @@ -11866,9 +12563,9 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.24.0" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6878079b17446e4d3eba6192bb0a2950d5b14f0ed8424b852310e5a94345d0ef" +checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck", "proc-macro2", @@ -11877,6 +12574,25 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "stun" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7e94b1ec00bad60e6410e058b52f1c66de3dc5fe4d62d09b3e52bb7d3b73e25" +dependencies = [ + "base64 0.13.1", + "crc", + "lazy_static", + "md-5", + "rand 0.8.5", + "ring 0.16.20", + "subtle", + "thiserror", + "tokio", + "url", + "webrtc-util", +] + [[package]] name = "sub-tokens" version = "0.1.0" @@ -11894,19 +12610,19 @@ dependencies = [ "hmac 0.11.0", "pbkdf2 0.8.0", "schnorrkel", - "sha2 0.9.8", + "sha2 0.9.9", "zeroize", ] [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "frame-system-rpc-runtime-api", "futures", @@ -11925,7 +12641,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "hyper", "log", @@ -11937,7 +12653,7 @@ dependencies = [ [[package]] name = "substrate-rpc-client" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "async-trait", "jsonrpsee", @@ -11950,7 +12666,7 @@ dependencies = [ [[package]] name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -11967,7 +12683,7 @@ dependencies = [ [[package]] name = "substrate-test-client" version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "array-bytes", "async-trait", @@ -11993,7 +12709,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "futures", "substrate-test-utils-derive", @@ -12003,18 +12719,18 @@ dependencies = [ [[package]] name = "substrate-test-utils-derive" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "ansi_term", "build-helper", @@ -12024,11 +12740,20 @@ dependencies = [ "sp-maybe-compressed-blob", "strum", "tempfile", - "toml 0.7.3", + "toml 0.7.6", "walkdir", "wasm-opt", ] +[[package]] +name = "substring" +version = "1.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ee6433ecef213b2e72f587ef64a2f5943e7cd16fbd82dbe8bc07486c534c86" +dependencies = [ + "autocfg", +] + [[package]] name = "subtle" version = "2.4.1" @@ -12105,9 +12830,9 @@ dependencies = [ [[package]] name = "symbolic-common" -version = "9.2.1" +version = "10.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "800963ba330b09a2ae4a4f7c6392b81fbc2784099a98c1eac68c3437aa9382b2" +checksum = "1b55cdc318ede251d0957f07afe5fed912119b8c1bc5a7804151826db999e737" dependencies = [ "debugid", "memmap2", @@ -12117,11 +12842,11 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "9.2.1" +version = "10.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b940a1fdbc72bb3369e38714efe6cd332dbbe46d093cf03d668b9ac390d1ad0" +checksum = "79be897be8a483a81fff6a3a4e195b4ac838ef73ca42d348b3f722da9902e489" dependencies = [ - "cpp_demangle", + "cpp_demangle 0.4.2", "rustc-demangle", "symbolic-common", ] @@ -12139,9 +12864,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.20" +version = "2.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb8d4cebc40aa517dfb69618fa647a346562e67228e2236ae0042ee6ac14775" +checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567" dependencies = [ "proc-macro2", "quote", @@ -12162,11 +12887,11 @@ dependencies = [ [[package]] name = "system-configuration" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d75182f12f490e953596550b65ee31bda7c8e043d9386174b353bda50838c3fd" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ - "bitflags", + "bitflags 1.3.2", "core-foundation", "system-configuration-sys", ] @@ -12189,38 +12914,37 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "target-lexicon" -version = "0.12.5" +version = "0.12.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9410d0f6853b1d94f0e519fb95df60f29d2c1eff2d921ffdf01a4c8a3b54f12d" +checksum = "9d0e916b1148c8e263850e1ebcbd046f333e0683c724876bb0da63ea4373dc8a" [[package]] name = "tempfile" -version = "3.3.0" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +checksum = "5486094ee78b2e5038a6382ed7645bc084dc2ec433426ca4c3cb61e2007b8998" dependencies = [ "cfg-if", - "fastrand", - "libc", - "redox_syscall", - "remove_dir_all", - "winapi", + "fastrand 2.0.0", + "redox_syscall 0.3.5", + "rustix 0.38.6", + "windows-sys 0.48.0", ] [[package]] name = "termcolor" -version = "1.1.2" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" +checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" dependencies = [ "winapi-util", ] [[package]] name = "termtree" -version = "0.2.3" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13a4ec180a2de59b57434704ccfad967f789b12737738798fa08798cd5824c16" +checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test-parachain-adder" @@ -12239,7 +12963,7 @@ dependencies = [ name = "test-parachain-adder-collator" version = "0.9.43" dependencies = [ - "clap 4.2.5", + "clap 4.3.19", "futures", "futures-timer", "log", @@ -12288,7 +13012,7 @@ dependencies = [ name = "test-parachain-undying-collator" version = "0.9.43" dependencies = [ - "clap 4.2.5", + "clap 4.3.19", "futures", "futures-timer", "log", @@ -12343,22 +13067,22 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.40" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" +checksum = "611040a08a0439f8248d1990b111c95baa9c704c805fa1f62104b39655fd7f90" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.40" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" +checksum = "090198534930841fab3a5d1bb637cde49e339654e606195f8d9c76eeb081dc96" dependencies = [ "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] @@ -12369,10 +13093,11 @@ checksum = "3bf63baf9f5039dadc247375c29eb13706706cfde997d0330d05aa63a77d8820" [[package]] name = "thread_local" -version = "1.1.4" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ + "cfg-if", "once_cell", ] @@ -12400,9 +13125,9 @@ dependencies = [ [[package]] name = "tikv-jemalloc-ctl" -version = "0.5.0" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e37706572f4b151dff7a0146e040804e9c26fe3a3118591112f05cf12a4216c1" +checksum = "619bfed27d807b54f7f776b9430d4f8060e66ee138a28632ca898584d462c31c" dependencies = [ "libc", "paste", @@ -12411,20 +13136,19 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" -version = "0.5.2+5.3.0-patched" +version = "0.5.4+5.3.0-patched" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec45c14da997d0925c7835883e4d5c181f196fa142f8c19d7643d1e9af2592c3" +checksum = "9402443cb8fd499b6f327e40565234ff34dbda27460c5b47db0db77443dd85d1" dependencies = [ "cc", - "fs_extra", "libc", ] [[package]] name = "tikv-jemallocator" -version = "0.5.0" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20612db8a13a6c06d57ec83953694185a367e16945f66565e8028d2c0bd76979" +checksum = "965fe0c26be5c56c94e38ba547249074803efd52adfb66de62107d95aab3eaca" dependencies = [ "libc", "tikv-jemalloc-sys", @@ -12432,15 +13156,43 @@ dependencies = [ [[package]] name = "time" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" dependencies = [ "libc", "wasi 0.10.0+wasi-snapshot-preview1", "winapi", ] +[[package]] +name = "time" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fdd63d58b18d663fbdf70e049f00a22c8e42be082203be7f26589213cd75ea" +dependencies = [ + "deranged", + "itoa", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" + +[[package]] +name = "time-macros" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb71511c991639bb078fd5bf97757e03914361c48100d52878b8e52b46fb92cd" +dependencies = [ + "time-core", +] + [[package]] name = "tiny-bip39" version = "1.0.0" @@ -12490,9 +13242,9 @@ dependencies = [ [[package]] name = "tinyvec_macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" @@ -12507,7 +13259,7 @@ dependencies = [ "mio", "num_cpus", "parking_lot 0.12.1", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "signal-hook-registry", "socket2 0.4.9", "tokio-macros", @@ -12522,7 +13274,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] @@ -12538,13 +13290,13 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.23.2" +version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a27d5f2b839802bd8267fa19b0530f5a08b9c08cd417976be2a65d130fe1c11b" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ - "rustls 0.20.7", + "rustls 0.20.8", "tokio", - "webpki", + "webpki 0.22.0", ] [[package]] @@ -12553,27 +13305,27 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.2", + "rustls 0.21.6", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.9" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df54d54117d6fdc4e4fea40fe1e4e566b3505700e148a6827e59b34b0d2600d9" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "tokio", "tokio-util", ] [[package]] name = "tokio-tungstenite" -version = "0.17.1" +version = "0.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06cda1232a49558c46f8a504d5b93101d42c0bf7f911f12a105ba48168f821ae" +checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" dependencies = [ "futures-util", "log", @@ -12583,15 +13335,15 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.1" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0edfdeb067411dba2044da6d1cb2df793dd35add7888d73c16e3381ded401764" +checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" dependencies = [ "bytes", "futures-core", "futures-io", "futures-sink", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "tokio", "tracing", ] @@ -12607,9 +13359,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.3" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b403acf6f2bb0859c93c7f0d967cb4a75a7ac552100f9322faf64dc047669b21" +checksum = "c17e963a819c331dcacd7ab957d80bc2b9a9c1e71c804826d2f283dd65306542" dependencies = [ "serde", "serde_spanned", @@ -12619,20 +13371,20 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.1" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" +checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.19.8" +version = "0.19.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239410c8609e8125456927e6707163a3b1fdb40561e4b803bc041f466ccfdc13" +checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" dependencies = [ - "indexmap", + "indexmap 2.0.0", "serde", "serde_spanned", "toml_datetime", @@ -12652,18 +13404,18 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.4.0" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d1d42a9b3f3ec46ba828e8d376aec14592ea199f70a06a548587ecd1c4ab658" +checksum = "55ae70283aba8d2a8b411c695c437fe25b8b5e44e23e780662002fc72fb47a82" dependencies = [ - "bitflags", + "bitflags 2.3.3", "bytes", "futures-core", "futures-util", "http", "http-body", "http-range-header", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "tower-layer", "tower-service", ] @@ -12676,9 +13428,9 @@ checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" [[package]] name = "tower-service" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" @@ -12688,27 +13440,27 @@ checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", "log", - "pin-project-lite 0.2.9", + "pin-project-lite 0.2.10", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" -version = "0.1.23" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" +checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.28", ] [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", "valuable", @@ -12744,7 +13496,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] @@ -12760,9 +13512,9 @@ dependencies = [ [[package]] name = "tracing-serde" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" +checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" dependencies = [ "serde", "tracing-core", @@ -12793,13 +13545,13 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.11" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bc28f93baff38037f64e6f43d34cfa1605f27a49c34e8a04c5e78b0babf2596" +checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" dependencies = [ - "ansi_term", - "lazy_static", "matchers 0.1.0", + "nu-ansi-term", + "once_cell", "regex", "sharded-slab", "smallvec", @@ -12879,17 +13631,17 @@ dependencies = [ [[package]] name = "try-lock" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "try-runtime-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#8a2c2658a27fcf85c91c02c4f6769ceaebc53e4d" +source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" dependencies = [ "async-trait", - "clap 4.2.5", + "clap 4.3.19", "frame-remote-externalities", "frame-try-runtime", "hex", @@ -12916,15 +13668,16 @@ dependencies = [ "sp-version", "sp-weights", "substrate-rpc-client", - "zstd 0.12.3+zstd.1.5.2", + "zstd 0.12.4", ] [[package]] name = "trybuild" -version = "1.0.75" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1212c215a87a183687a7cc7065901b1a98da6b37277d51a1b5faedbb4efd4f3" +checksum = "a84e0202ea606ba5ebee8507ab2bfbe89b98551ed9b8f0be198109275cff284b" dependencies = [ + "basic-toml", "dissimilar", "glob", "once_cell", @@ -12932,34 +13685,52 @@ dependencies = [ "serde_derive", "serde_json", "termcolor", - "toml 0.5.11", ] [[package]] name = "tt-call" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e66dcbec4290c69dd03c57e76c2469ea5c7ce109c6dd4351c13055cf71ea055" +checksum = "f4f195fd851901624eee5a58c4bb2b4f06399148fcd0ed336e6f1cb60a9881df" [[package]] name = "tungstenite" -version = "0.17.2" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d96a2dea40e7570482f28eb57afbe42d97551905da6a9400acc5c328d24004f5" +checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" dependencies = [ - "base64 0.13.0", + "base64 0.13.1", "byteorder", "bytes", "http", "httparse", "log", "rand 0.8.5", - "sha-1 0.10.0", + "sha-1 0.10.1", "thiserror", "url", "utf-8", ] +[[package]] +name = "turn" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4712ee30d123ec7ae26d1e1b218395a16c87cdbaf4b3925d170d684af62ea5e8" +dependencies = [ + "async-trait", + "base64 0.13.1", + "futures", + "log", + "md-5", + "rand 0.8.5", + "ring 0.16.20", + "stun", + "thiserror", + "tokio", + "webrtc-util", +] + [[package]] name = "twox-hash" version = "1.6.3" @@ -12980,15 +13751,15 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "ucd-trie" -version = "0.1.3" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" [[package]] name = "uint" -version = "0.9.1" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6470ab50f482bde894a037a57064480a246dbfdd5960bd65a44824693f08da5f" +checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" dependencies = [ "byteorder", "crunchy", @@ -12998,36 +13769,36 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.7" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.8" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" +checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" [[package]] name = "unicode-normalization" -version = "0.1.19" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] name = "unicode-width" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" [[package]] name = "unicode-xid" -version = "0.2.2" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] name = "universal-hash" @@ -13035,7 +13806,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f214e8f697e925001e66ec2c6e37a4ef93f0f78c2eed7814394e10c62025b05" dependencies = [ - "generic-array 0.14.6", + "generic-array 0.14.7", "subtle", ] @@ -13069,12 +13840,12 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" dependencies = [ "form_urlencoded", - "idna 0.3.0", + "idna 0.4.0", "percent-encoding", ] @@ -13092,9 +13863,12 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.2.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c" +checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" +dependencies = [ + "getrandom 0.2.10", +] [[package]] name = "valuable" @@ -13165,6 +13939,15 @@ dependencies = [ "libc", ] +[[package]] +name = "waitgroup" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1f50000a783467e6c0200f9d10642f4bc424e39efc1b770203e88b488f79292" +dependencies = [ + "atomic-waker", +] + [[package]] name = "waker-fn" version = "1.1.0" @@ -13173,22 +13956,20 @@ checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" [[package]] name = "walkdir" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" dependencies = [ "same-file", - "winapi", "winapi-util", ] [[package]] name = "want" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ - "log", "try-lock", ] @@ -13233,7 +14014,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", "wasm-bindgen-shared", ] @@ -13267,7 +14048,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -13289,9 +14070,9 @@ dependencies = [ [[package]] name = "wasm-opt" -version = "0.112.0" +version = "0.114.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87fef6d0d508f08334e0ab0e6877feb4c0ecb3956bcf2cb950699b22fedf3e9c" +checksum = "4d005a95f934878a1fb446a816d51c3601a0120ff929005ba3bab3c749cfd1c7" dependencies = [ "anyhow", "libc", @@ -13305,9 +14086,9 @@ dependencies = [ [[package]] name = "wasm-opt-cxx-sys" -version = "0.112.0" +version = "0.114.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc816bbc1596c8f2e8127e137a760c798023ef3d378f2ae51f0f1840e2dfa445" +checksum = "6d04e240598162810fad3b2e96fa0dec6dba1eb65a03f3bd99a9248ab8b56caa" dependencies = [ "anyhow", "cxx", @@ -13317,9 +14098,9 @@ dependencies = [ [[package]] name = "wasm-opt-sys" -version = "0.112.0" +version = "0.114.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40199e4f68ef1071b3c6d0bd8026a12b481865d4b9e49c156932ea9a6234dd14" +checksum = "2efd2aaca519d64098c4faefc8b7433a97ed511caf4c9e516384eb6aef1ff4f9" dependencies = [ "anyhow", "cc", @@ -13348,7 +14129,7 @@ version = "0.102.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48134de3d7598219ab9eaf6b91b15d8e50d31da76b8519fe4ecfcec2cf35104b" dependencies = [ - "indexmap", + "indexmap 1.9.3", "url", ] @@ -13361,10 +14142,10 @@ dependencies = [ "anyhow", "bincode", "cfg-if", - "indexmap", + "indexmap 1.9.3", "libc", "log", - "object", + "object 0.30.4", "once_cell", "paste", "psm", @@ -13396,12 +14177,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c86437fa68626fe896e5afc69234bb2b5894949083586535f200385adfd71213" dependencies = [ "anyhow", - "base64 0.21.0", + "base64 0.21.2", "bincode", "directories-next", "file-per-thread-logger", "log", - "rustix 0.36.7", + "rustix 0.36.15", "serde", "sha2 0.10.7", "toml 0.5.11", @@ -13423,7 +14204,7 @@ dependencies = [ "cranelift-wasm", "gimli", "log", - "object", + "object 0.30.4", "target-lexicon", "thiserror", "wasmparser", @@ -13441,7 +14222,7 @@ dependencies = [ "cranelift-codegen", "cranelift-native", "gimli", - "object", + "object 0.30.4", "target-lexicon", "wasmtime-environ", ] @@ -13455,9 +14236,9 @@ dependencies = [ "anyhow", "cranelift-entity", "gimli", - "indexmap", + "indexmap 1.9.3", "log", - "object", + "object 0.30.4", "serde", "target-lexicon", "thiserror", @@ -13471,14 +14252,14 @@ version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0de48df552cfca1c9b750002d3e07b45772dd033b0b206d5c0968496abf31244" dependencies = [ - "addr2line", + "addr2line 0.19.0", "anyhow", "bincode", "cfg-if", - "cpp_demangle", + "cpp_demangle 0.3.5", "gimli", "log", - "object", + "object 0.30.4", "rustc-demangle", "serde", "target-lexicon", @@ -13495,9 +14276,9 @@ version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e0554b84c15a27d76281d06838aed94e13a77d7bf604bbbaf548aa20eb93846" dependencies = [ - "object", + "object 0.30.4", "once_cell", - "rustix 0.36.7", + "rustix 0.36.15", ] [[package]] @@ -13520,7 +14301,7 @@ dependencies = [ "anyhow", "cc", "cfg-if", - "indexmap", + "indexmap 1.9.3", "libc", "log", "mach", @@ -13528,7 +14309,7 @@ dependencies = [ "memoffset 0.8.0", "paste", "rand 0.8.5", - "rustix 0.36.7", + "rustix 0.36.15", "wasmtime-asm-macros", "wasmtime-environ", "wasmtime-jit-debug", @@ -13549,14 +14330,24 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.55" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38eb105f1c59d9eaa6b5cdc92b859d85b926e82cb2e0945cd0c9259faa6fe9fb" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", ] +[[package]] +name = "webpki" +version = "0.21.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" +dependencies = [ + "ring 0.16.20", + "untrusted", +] + [[package]] name = "webpki" version = "0.22.0" @@ -13569,29 +14360,220 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.2" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552ceb903e957524388c4d3475725ff2c8b7960922063af6ce53c9a43da07449" +checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" dependencies = [ - "webpki", + "webpki 0.22.0", ] [[package]] -name = "webpki-roots" -version = "0.23.1" +name = "webrtc" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" +checksum = "2d3bc9049bdb2cea52f5fd4f6f728184225bdb867ed0dc2410eab6df5bdd67bb" dependencies = [ - "rustls-webpki", + "arc-swap", + "async-trait", + "bytes", + "hex", + "interceptor", + "lazy_static", + "log", + "rand 0.8.5", + "rcgen 0.9.3", + "regex", + "ring 0.16.20", + "rtcp", + "rtp", + "rustls 0.19.1", + "sdp", + "serde", + "serde_json", + "sha2 0.10.7", + "stun", + "thiserror", + "time 0.3.25", + "tokio", + "turn", + "url", + "waitgroup", + "webrtc-data", + "webrtc-dtls", + "webrtc-ice", + "webrtc-mdns", + "webrtc-media", + "webrtc-sctp", + "webrtc-srtp", + "webrtc-util", ] [[package]] -name = "wepoll-ffi" -version = "0.1.2" +name = "webrtc-data" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ef36a4d12baa6e842582fe9ec16a57184ba35e1a09308307b67d43ec8883100" +dependencies = [ + "bytes", + "derive_builder", + "log", + "thiserror", + "tokio", + "webrtc-sctp", + "webrtc-util", +] + +[[package]] +name = "webrtc-dtls" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" +checksum = "942be5bd85f072c3128396f6e5a9bfb93ca8c1939ded735d177b7bcba9a13d05" dependencies = [ + "aes 0.6.0", + "aes-gcm 0.10.2", + "async-trait", + "bincode", + "block-modes", + "byteorder", + "ccm", + "curve25519-dalek 3.2.0", + "der-parser 8.2.0", + "elliptic-curve 0.12.3", + "hkdf", + "hmac 0.12.1", + "log", + "oid-registry 0.6.1", + "p256", + "p384", + "rand 0.8.5", + "rand_core 0.6.4", + "rcgen 0.9.3", + "ring 0.16.20", + "rustls 0.19.1", + "sec1 0.3.0", + "serde", + "sha1", + "sha2 0.10.7", + "signature 1.6.4", + "subtle", + "thiserror", + "tokio", + "webpki 0.21.4", + "webrtc-util", + "x25519-dalek 2.0.0-pre.1", + "x509-parser 0.13.2", +] + +[[package]] +name = "webrtc-ice" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "465a03cc11e9a7d7b4f9f99870558fe37a102b65b93f8045392fef7c67b39e80" +dependencies = [ + "arc-swap", + "async-trait", + "crc", + "log", + "rand 0.8.5", + "serde", + "serde_json", + "stun", + "thiserror", + "tokio", + "turn", + "url", + "uuid", + "waitgroup", + "webrtc-mdns", + "webrtc-util", +] + +[[package]] +name = "webrtc-mdns" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f08dfd7a6e3987e255c4dbe710dde5d94d0f0574f8a21afa95d171376c143106" +dependencies = [ + "log", + "socket2 0.4.9", + "thiserror", + "tokio", + "webrtc-util", +] + +[[package]] +name = "webrtc-media" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f72e1650a8ae006017d1a5280efb49e2610c19ccc3c0905b03b648aee9554991" +dependencies = [ + "byteorder", + "bytes", + "rand 0.8.5", + "rtp", + "thiserror", +] + +[[package]] +name = "webrtc-sctp" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d47adcd9427eb3ede33d5a7f3424038f63c965491beafcc20bc650a2f6679c0" +dependencies = [ + "arc-swap", + "async-trait", + "bytes", + "crc", + "log", + "rand 0.8.5", + "thiserror", + "tokio", + "webrtc-util", +] + +[[package]] +name = "webrtc-srtp" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6183edc4c1c6c0175f8812eefdce84dfa0aea9c3ece71c2bf6ddd3c964de3da5" +dependencies = [ + "aead 0.4.3", + "aes 0.7.5", + "aes-gcm 0.9.4", + "async-trait", + "byteorder", + "bytes", + "ctr 0.8.0", + "hmac 0.11.0", + "log", + "rtcp", + "rtp", + "sha-1 0.9.8", + "subtle", + "thiserror", + "tokio", + "webrtc-util", +] + +[[package]] +name = "webrtc-util" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93f1db1727772c05cf7a2cfece52c3aca8045ca1e176cd517d323489aa3c6d87" +dependencies = [ + "async-trait", + "bitflags 1.3.2", + "bytes", "cc", + "ipnet", + "lazy_static", + "libc", + "log", + "nix 0.24.3", + "rand 0.8.5", + "thiserror", + "tokio", + "winapi", ] [[package]] @@ -13707,20 +14689,20 @@ dependencies = [ [[package]] name = "which" -version = "4.2.2" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea187a8ef279bc014ec368c27a920da2024d2a711109bfbe3440585d5cf27ad9" +checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" dependencies = [ "either", - "lazy_static", "libc", + "once_cell", ] [[package]] name = "wide" -version = "0.7.6" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feff0a412894d67223777b6cc8d68c0dab06d52d95e9890d5f2d47f10dd9366c" +checksum = "aa469ffa65ef7e0ba0f164183697b89b854253fd31aeb92358b7b6155177d62f" dependencies = [ "bytemuck", "safe_arch", @@ -13728,9 +14710,9 @@ dependencies = [ [[package]] name = "widestring" -version = "0.5.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17882f045410753661207383517a6f62ec3dbeb6a4ed2acce01f0728238d1983" +checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" [[package]] name = "winapi" @@ -13777,31 +14759,12 @@ dependencies = [ ] [[package]] -name = "windows-sys" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" -dependencies = [ - "windows_aarch64_msvc 0.36.1", - "windows_i686_gnu 0.36.1", - "windows_i686_msvc 0.36.1", - "windows_x86_64_gnu 0.36.1", - "windows_x86_64_msvc 0.36.1", -] - -[[package]] -name = "windows-sys" -version = "0.42.0" +name = "windows" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", + "windows-targets 0.48.1", ] [[package]] @@ -13819,7 +14782,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.48.0", + "windows-targets 0.48.1", ] [[package]] @@ -13839,9 +14802,9 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.48.0" +version = "0.48.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" dependencies = [ "windows_aarch64_gnullvm 0.48.0", "windows_aarch64_msvc 0.48.0", @@ -13870,12 +14833,6 @@ version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" -[[package]] -name = "windows_aarch64_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" - [[package]] name = "windows_aarch64_msvc" version = "0.42.2" @@ -13894,12 +14851,6 @@ version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" -[[package]] -name = "windows_i686_gnu" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" - [[package]] name = "windows_i686_gnu" version = "0.42.2" @@ -13918,12 +14869,6 @@ version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" -[[package]] -name = "windows_i686_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" - [[package]] name = "windows_i686_msvc" version = "0.42.2" @@ -13942,12 +14887,6 @@ version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" -[[package]] -name = "windows_x86_64_gnu" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" - [[package]] name = "windows_x86_64_gnu" version = "0.42.2" @@ -13978,12 +14917,6 @@ version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" -[[package]] -name = "windows_x86_64_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" - [[package]] name = "windows_x86_64_msvc" version = "0.42.2" @@ -13998,29 +14931,30 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winnow" -version = "0.4.0" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "deac0939bd6e4f24ab5919fbf751c97a8cfc8543bb083a305ed5c0c10bb241d1" +checksum = "f46aab759304e4d7b2075a9aecba26228bb073ee8c50db796b2c72c676b5d807" dependencies = [ "memchr", ] [[package]] name = "winreg" -version = "0.7.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" +checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" dependencies = [ "winapi", ] [[package]] name = "winreg" -version = "0.10.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ - "winapi", + "cfg-if", + "windows-sys 0.48.0", ] [[package]] @@ -14054,6 +14988,43 @@ dependencies = [ "zeroize", ] +[[package]] +name = "x509-parser" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fb9bace5b5589ffead1afb76e43e34cff39cd0f3ce7e170ae0c29e53b88eb1c" +dependencies = [ + "asn1-rs 0.3.1", + "base64 0.13.1", + "data-encoding", + "der-parser 7.0.0", + "lazy_static", + "nom", + "oid-registry 0.4.0", + "ring 0.16.20", + "rusticata-macros", + "thiserror", + "time 0.3.25", +] + +[[package]] +name = "x509-parser" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0ecbeb7b67ce215e40e3cc7f2ff902f94a223acf44995934763467e7b1febc8" +dependencies = [ + "asn1-rs 0.5.2", + "base64 0.13.1", + "data-encoding", + "der-parser 8.2.0", + "lazy_static", + "nom", + "oid-registry 0.6.1", + "rusticata-macros", + "thiserror", + "time 0.3.25", +] + [[package]] name = "xcm" version = "0.9.43" @@ -14145,7 +15116,7 @@ dependencies = [ "Inflector", "proc-macro2", "quote", - "syn 2.0.20", + "syn 2.0.28", ] [[package]] @@ -14238,6 +15209,15 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" +[[package]] +name = "yasna" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" +dependencies = [ + "time 0.3.25", +] + [[package]] name = "zeroize" version = "1.6.0" @@ -14249,14 +15229,13 @@ dependencies = [ [[package]] name = "zeroize_derive" -version = "1.3.2" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", - "synstructure", + "syn 2.0.28", ] [[package]] @@ -14287,11 +15266,11 @@ dependencies = [ [[package]] name = "zstd" -version = "0.12.3+zstd.1.5.2" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76eea132fb024e0e13fd9c2f5d5d595d8a967aa72382ac2f9d39fcc95afd0806" +checksum = "1a27595e173641171fc74a1232b7b1c7a7cb6e18222c11e9dfb9888fa424c53c" dependencies = [ - "zstd-safe 6.0.5+zstd.1.5.4", + "zstd-safe 6.0.6", ] [[package]] @@ -14306,9 +15285,9 @@ dependencies = [ [[package]] name = "zstd-safe" -version = "6.0.5+zstd.1.5.4" +version = "6.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56d9e60b4b1758206c238a10165fbcae3ca37b01744e394c463463f6529d23b" +checksum = "ee98ffd0b48ee95e6c5168188e44a54550b1564d9d530ee21d5f0eaed1069581" dependencies = [ "libc", "zstd-sys", diff --git a/node/core/approval-voting/Cargo.toml b/node/core/approval-voting/Cargo.toml index a42e449f09f9..f31c7d0a1694 100644 --- a/node/core/approval-voting/Cargo.toml +++ b/node/core/approval-voting/Cargo.toml @@ -11,7 +11,7 @@ futures-timer = "3.0.2" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } gum = { package = "tracing-gum", path = "../../gum" } bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -lru = "0.11" +lru = "0.11.0" merlin = "2.0" schnorrkel = "0.9.1" kvdb = "0.13.0" diff --git a/node/core/runtime-api/Cargo.toml b/node/core/runtime-api/Cargo.toml index 22b4a96e60e8..30ba781216d0 100644 --- a/node/core/runtime-api/Cargo.toml +++ b/node/core/runtime-api/Cargo.toml @@ -8,7 +8,7 @@ license.workspace = true [dependencies] futures = "0.3.21" gum = { package = "tracing-gum", path = "../../gum" } -lru = "0.11" +lru = "0.11.0" sp-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/gum/proc-macro/Cargo.toml b/node/gum/proc-macro/Cargo.toml index 61f31beb61f3..e7262008499b 100644 --- a/node/gum/proc-macro/Cargo.toml +++ b/node/gum/proc-macro/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "2.0.20", features = ["full", "extra-traits"] } +syn = { version = "2.0.15", features = ["full", "extra-traits"] } quote = "1.0.28" proc-macro2 = "1.0.56" proc-macro-crate = "1.1.3" diff --git a/node/network/bridge/src/validator_discovery.rs b/node/network/bridge/src/validator_discovery.rs index d4d1df3da467..e89b38544684 100644 --- a/node/network/bridge/src/validator_discovery.rs +++ b/node/network/bridge/src/validator_discovery.rs @@ -157,7 +157,7 @@ impl Service { fn extract_peer_ids(multiaddr: impl Iterator) -> HashSet { multiaddr .filter_map(|mut addr| match addr.pop() { - Some(multiaddr::Protocol::P2p(peer_id)) => Some(peer_id), + Some(multiaddr::Protocol::P2p(key)) => PeerId::from_multihash(key).ok(), _ => None, }) .collect() @@ -208,7 +208,7 @@ mod tests { let authorities = known_authorities(); let multiaddr = known_multiaddr().into_iter().zip(peer_ids.iter().cloned()).map( |(mut addr, peer_id)| { - addr.push(multiaddr::Protocol::P2p(peer_id)); + addr.push(multiaddr::Protocol::P2p(peer_id.into())); HashSet::from([addr]) }, ); diff --git a/node/network/gossip-support/src/tests.rs b/node/network/gossip-support/src/tests.rs index 72bc7ded251a..5f91fcf52147 100644 --- a/node/network/gossip-support/src/tests.rs +++ b/node/network/gossip-support/src/tests.rs @@ -106,7 +106,7 @@ impl MockAuthorityDiscovery { .clone() .into_iter() .map(|(p, a)| { - let multiaddr = Multiaddr::empty().with(Protocol::P2p(p)); + let multiaddr = Multiaddr::empty().with(Protocol::P2p(p.into())); (a, HashSet::from([multiaddr])) }) .collect(); @@ -566,11 +566,11 @@ fn test_log_output() { let unconnected_authorities = { let mut m = HashMap::new(); let peer_id = PeerId::random(); - let addr = Multiaddr::empty().with(Protocol::P2p(peer_id)); + let addr = Multiaddr::empty().with(Protocol::P2p(peer_id.into())); let addrs = HashSet::from([addr.clone(), addr]); m.insert(alice, addrs); let peer_id = PeerId::random(); - let addr = Multiaddr::empty().with(Protocol::P2p(peer_id)); + let addr = Multiaddr::empty().with(Protocol::P2p(peer_id.into())); let addrs = HashSet::from([addr.clone(), addr]); m.insert(bob, addrs); m diff --git a/node/overseer/Cargo.toml b/node/overseer/Cargo.toml index b0576f5c61ef..2e601a46a744 100644 --- a/node/overseer/Cargo.toml +++ b/node/overseer/Cargo.toml @@ -18,7 +18,7 @@ polkadot-node-metrics = { path = "../metrics" } polkadot-primitives = { path = "../../primitives" } orchestra = "0.0.5" gum = { package = "tracing-gum", path = "../gum" } -lru = "0.11" +lru = "0.11.0" sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } async-trait = "0.1.57" tikv-jemalloc-ctl = { version = "0.5.0", optional = true } diff --git a/node/service/Cargo.toml b/node/service/Cargo.toml index e373dd4f0011..48e4633aa5e3 100644 --- a/node/service/Cargo.toml +++ b/node/service/Cargo.toml @@ -87,7 +87,7 @@ parity-db = { version = "0.4.8", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1" } async-trait = "0.1.57" -lru = "0.11" +lru = "0.11.0" log = "0.4.17" is_executable = "1.0.1" diff --git a/node/subsystem-types/Cargo.toml b/node/subsystem-types/Cargo.toml index d994682110e5..1fb9ac83b780 100644 --- a/node/subsystem-types/Cargo.toml +++ b/node/subsystem-types/Cargo.toml @@ -20,7 +20,7 @@ sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-transaction-pool-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -smallvec = "1.11.0" +smallvec = "1.8.0" substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" } thiserror = "1.0.31" async-trait = "0.1.57" diff --git a/runtime/kusama/Cargo.toml b/runtime/kusama/Cargo.toml index 645215bc2c22..28598bde9443 100644 --- a/runtime/kusama/Cargo.toml +++ b/runtime/kusama/Cargo.toml @@ -15,7 +15,7 @@ rustc-hex = { version = "2.1.0", default-features = false } serde = { version = "1.0.163", default-features = false } serde_derive = { version = "1.0.117", optional = true } static_assertions = "1.1.0" -smallvec = "1.11.0" +smallvec = "1.8.0" authority-discovery-primitives = { package = "sp-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } diff --git a/runtime/kusama/constants/Cargo.toml b/runtime/kusama/constants/Cargo.toml index 11ff70cd8c9b..293c91bbd543 100644 --- a/runtime/kusama/constants/Cargo.toml +++ b/runtime/kusama/constants/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true license.workspace = true [dependencies] -smallvec = "1.11.0" +smallvec = "1.8.0" frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } primitives = { package = "polkadot-primitives", path = "../../../primitives", default-features = false } diff --git a/runtime/polkadot/Cargo.toml b/runtime/polkadot/Cargo.toml index 0ab06b8bbb12..5d343811fb14 100644 --- a/runtime/polkadot/Cargo.toml +++ b/runtime/polkadot/Cargo.toml @@ -15,7 +15,7 @@ rustc-hex = { version = "2.1.0", default-features = false } serde = { version = "1.0.163", default-features = false } serde_derive = { version = "1.0.117", optional = true } static_assertions = "1.1.0" -smallvec = "1.11.0" +smallvec = "1.8.0" authority-discovery-primitives = { package = "sp-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } diff --git a/runtime/polkadot/constants/Cargo.toml b/runtime/polkadot/constants/Cargo.toml index a10546edfa7b..42a9c685ea82 100644 --- a/runtime/polkadot/constants/Cargo.toml +++ b/runtime/polkadot/constants/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true license.workspace = true [dependencies] -smallvec = "1.11.0" +smallvec = "1.8.0" frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } primitives = { package = "polkadot-primitives", path = "../../../primitives", default-features = false } diff --git a/runtime/rococo/Cargo.toml b/runtime/rococo/Cargo.toml index f1f0d1cbe729..95791edda710 100644 --- a/runtime/rococo/Cargo.toml +++ b/runtime/rococo/Cargo.toml @@ -13,7 +13,7 @@ log = { version = "0.4.17", default-features = false } serde = { version = "1.0.163", default-features = false } serde_derive = { version = "1.0.117", optional = true } static_assertions = "1.1.0" -smallvec = "1.11.0" +smallvec = "1.8.0" authority-discovery-primitives = { package = "sp-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } diff --git a/runtime/rococo/constants/Cargo.toml b/runtime/rococo/constants/Cargo.toml index f9ea1186c550..e7bc81f199a1 100644 --- a/runtime/rococo/constants/Cargo.toml +++ b/runtime/rococo/constants/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true license.workspace = true [dependencies] -smallvec = "1.11.0" +smallvec = "1.8.0" frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } primitives = { package = "polkadot-primitives", path = "../../../primitives", default-features = false } diff --git a/runtime/test-runtime/Cargo.toml b/runtime/test-runtime/Cargo.toml index 41fbebb39f3a..9dcd8fe83a27 100644 --- a/runtime/test-runtime/Cargo.toml +++ b/runtime/test-runtime/Cargo.toml @@ -15,7 +15,7 @@ rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } serde = { version = "1.0.163", default-features = false } serde_derive = { version = "1.0.117", optional = true } -smallvec = "1.11.0" +smallvec = "1.8.0" authority-discovery-primitives = { package = "sp-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } diff --git a/runtime/test-runtime/constants/Cargo.toml b/runtime/test-runtime/constants/Cargo.toml index 15ab1dbdd4fe..84d8ae8ce560 100644 --- a/runtime/test-runtime/constants/Cargo.toml +++ b/runtime/test-runtime/constants/Cargo.toml @@ -7,7 +7,7 @@ edition.workspace = true license.workspace = true [dependencies] -smallvec = "1.11.0" +smallvec = "1.8.0" frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } primitives = { package = "polkadot-primitives", path = "../../../primitives", default-features = false } diff --git a/runtime/westend/Cargo.toml b/runtime/westend/Cargo.toml index e665a08b1ed1..79583fc2fb1e 100644 --- a/runtime/westend/Cargo.toml +++ b/runtime/westend/Cargo.toml @@ -14,7 +14,7 @@ log = { version = "0.4.17", default-features = false } rustc-hex = { version = "2.1.0", default-features = false } serde = { version = "1.0.163", default-features = false } serde_derive = { version = "1.0.117", optional = true } -smallvec = "1.11.0" +smallvec = "1.8.0" authority-discovery-primitives = { package = "sp-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } diff --git a/runtime/westend/constants/Cargo.toml b/runtime/westend/constants/Cargo.toml index e5d9900e22e2..63e9ad34a7f2 100644 --- a/runtime/westend/constants/Cargo.toml +++ b/runtime/westend/constants/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true license.workspace = true [dependencies] -smallvec = "1.11.0" +smallvec = "1.8.0" frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } primitives = { package = "polkadot-primitives", path = "../../../primitives", default-features = false } diff --git a/xcm/procedural/Cargo.toml b/xcm/procedural/Cargo.toml index b60c8eed6151..a821a73669e0 100644 --- a/xcm/procedural/Cargo.toml +++ b/xcm/procedural/Cargo.toml @@ -11,5 +11,5 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" quote = "1.0.28" -syn = "2.0.20" +syn = "2.0.15" Inflector = "0.11.4" From 2157c2f4c1bdec4f1ad88c169103b3bb21fac2f0 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Wed, 16 Aug 2023 14:20:31 +0200 Subject: [PATCH 21/27] Fix clippy warnings (#7625) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix clippy check Signed-off-by: Oliver Tale-Yazdi * Autofix clippy Signed-off-by: Oliver Tale-Yazdi * Fix trivial Signed-off-by: Oliver Tale-Yazdi * fmt Signed-off-by: Oliver Tale-Yazdi * suppress warnings Signed-off-by: Oliver Tale-Yazdi * Quiet clippy 😌 Signed-off-by: Oliver Tale-Yazdi --------- Signed-off-by: Oliver Tale-Yazdi --- node/collation-generation/src/tests.rs | 2 +- node/core/av-store/src/tests.rs | 2 +- node/core/backing/src/tests.rs | 8 - node/core/chain-selection/src/tests.rs | 52 +- .../src/participation/queues/tests.rs | 2 +- node/core/dispute-coordinator/src/tests.rs | 22 +- .../disputes/prioritized_selection/tests.rs | 66 +-- node/core/pvf-checker/src/tests.rs | 16 +- node/core/pvf/tests/it/adder.rs | 9 +- node/core/runtime-api/src/tests.rs | 8 +- .../src/variants/back_garbage_candidate.rs | 4 +- node/malus/src/variants/common.rs | 5 +- .../src/variants/dispute_valid_candidates.rs | 4 +- .../src/variants/suggest_garbage_candidate.rs | 6 +- .../approval-distribution/src/tests.rs | 42 +- .../src/tests/state.rs | 11 +- .../availability-recovery/src/tests.rs | 32 +- .../bitfield-distribution/src/tests.rs | 163 +++--- node/network/bridge/src/rx/tests.rs | 132 ++--- node/network/bridge/src/tx/tests.rs | 12 +- .../network/bridge/src/validator_discovery.rs | 4 +- .../src/collator_side/tests.rs | 81 +-- .../src/validator_side/tests.rs | 100 ++-- .../dispute-distribution/src/tests/mock.rs | 2 +- node/network/gossip-support/src/tests.rs | 8 +- .../statement-distribution/src/tests.rs | 131 +++-- node/service/src/tests.rs | 16 +- node/test/service/src/lib.rs | 8 +- runtime/common/src/auctions.rs | 8 +- runtime/common/src/crowdloan/mod.rs | 6 +- runtime/common/src/xcm_sender.rs | 14 +- runtime/parachains/src/builder.rs | 22 +- runtime/parachains/src/disputes/tests.rs | 488 ++++++------------ runtime/parachains/src/inclusion/tests.rs | 5 +- runtime/parachains/src/paras/mod.rs | 2 +- runtime/parachains/src/paras/tests.rs | 13 - .../parachains/src/paras_inherent/tests.rs | 32 +- runtime/parachains/src/scheduler/tests.rs | 82 +-- runtime/parachains/src/session_info/tests.rs | 5 +- runtime/parachains/src/ump_tests.rs | 6 +- scripts/ci/gitlab/pipeline/test.yml | 3 +- statement-table/src/generic.rs | 8 +- xcm/pallet-xcm/src/tests.rs | 111 ++-- xcm/xcm-builder/src/asset_conversion.rs | 4 +- xcm/xcm-builder/src/tests/assets.rs | 5 +- xcm/xcm-builder/src/tests/bridging/mod.rs | 6 +- .../tests/bridging/paid_remote_relay_relay.rs | 4 +- xcm/xcm-builder/src/tests/mock.rs | 16 +- xcm/xcm-builder/src/tests/transacting.rs | 6 +- .../src/tests/version_subscriptions.rs | 4 +- xcm/xcm-simulator/fuzzer/src/parachain.rs | 4 +- 51 files changed, 729 insertions(+), 1073 deletions(-) diff --git a/node/collation-generation/src/tests.rs b/node/collation-generation/src/tests.rs index 1c98e1450941..09e5e88c221c 100644 --- a/node/collation-generation/src/tests.rs +++ b/node/collation-generation/src/tests.rs @@ -353,7 +353,7 @@ mod handle_new_activations { let expect_descriptor = { let mut expect_descriptor = expect_descriptor; expect_descriptor.signature = descriptor.signature.clone(); - expect_descriptor.erasure_root = descriptor.erasure_root.clone(); + expect_descriptor.erasure_root = descriptor.erasure_root; expect_descriptor }; assert_eq!(descriptor, expect_descriptor); diff --git a/node/core/av-store/src/tests.rs b/node/core/av-store/src/tests.rs index f8e30210c7c2..dbccf1401582 100644 --- a/node/core/av-store/src/tests.rs +++ b/node/core/av-store/src/tests.rs @@ -53,7 +53,7 @@ struct TestClock { impl TestClock { fn now(&self) -> Duration { - self.inner.lock().clone() + *self.inner.lock() } fn inc(&self, by: Duration) { diff --git a/node/core/backing/src/tests.rs b/node/core/backing/src/tests.rs index 386cc9e2279e..4be7516c58b4 100644 --- a/node/core/backing/src/tests.rs +++ b/node/core/backing/src/tests.rs @@ -283,7 +283,6 @@ fn backing_second_works() { pov_hash, head_data: expected_head_data.clone(), erasure_root: make_erasure_root(&test_state, pov.clone()), - ..Default::default() } .build(); @@ -373,7 +372,6 @@ fn backing_works() { pov_hash, head_data: expected_head_data.clone(), erasure_root: make_erasure_root(&test_state, pov.clone()), - ..Default::default() } .build(); @@ -522,7 +520,6 @@ fn backing_works_while_validation_ongoing() { pov_hash, head_data: expected_head_data.clone(), erasure_root: make_erasure_root(&test_state, pov.clone()), - ..Default::default() } .build(); @@ -699,7 +696,6 @@ fn backing_misbehavior_works() { pov_hash, erasure_root: make_erasure_root(&test_state, pov.clone()), head_data: expected_head_data.clone(), - ..Default::default() } .build(); @@ -884,7 +880,6 @@ fn backing_dont_second_invalid() { pov_hash: pov_hash_b, erasure_root: make_erasure_root(&test_state, pov_block_b.clone()), head_data: expected_head_data.clone(), - ..Default::default() } .build(); @@ -1215,7 +1210,6 @@ fn backing_doesnt_second_wrong_collator() { pov_hash, head_data: expected_head_data.clone(), erasure_root: make_erasure_root(&test_state, pov.clone()), - ..Default::default() } .build(); @@ -1267,7 +1261,6 @@ fn validation_work_ignores_wrong_collator() { pov_hash, head_data: expected_head_data.clone(), erasure_root: make_erasure_root(&test_state, pov.clone()), - ..Default::default() } .build(); @@ -1541,7 +1534,6 @@ fn observes_backing_even_if_not_validator() { pov_hash, head_data: expected_head_data.clone(), erasure_root: make_erasure_root(&test_state, pov.clone()), - ..Default::default() } .build(); diff --git a/node/core/chain-selection/src/tests.rs b/node/core/chain-selection/src/tests.rs index 8b475bd2e13c..c04f9aaf6606 100644 --- a/node/core/chain-selection/src/tests.rs +++ b/node/core/chain-selection/src/tests.rs @@ -568,7 +568,7 @@ fn assert_backend_contains<'a>( fn assert_backend_contains_chains(backend: &TestBackend, chains: Vec>) { for chain in chains { - assert_backend_contains(backend, chain.iter().map(|&(ref hdr, _)| hdr)) + assert_backend_contains(backend, chain.iter().map(|(hdr, _)| hdr)) } } @@ -688,7 +688,7 @@ fn import_chain_on_finalized_incrementally() { .await; assert_eq!(backend.load_first_block_number().unwrap().unwrap(), 1); - assert_backend_contains(&backend, chain.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain.iter().map(|(h, _)| h)); assert_leaves(&backend, vec![head_hash]); assert_leaves_query(&mut virtual_overseer, vec![head_hash]).await; @@ -721,8 +721,8 @@ fn import_two_subtrees_on_finalized() { import_blocks_into(&mut virtual_overseer, &backend, None, chain_b.clone()).await; assert_eq!(backend.load_first_block_number().unwrap().unwrap(), 1); - assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); - assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_a.iter().map(|(h, _)| h)); + assert_backend_contains(&backend, chain_b.iter().map(|(h, _)| h)); assert_leaves(&backend, vec![b_hash, a_hash]); assert_leaves_query(&mut virtual_overseer, vec![b_hash, a_hash]).await; @@ -755,8 +755,8 @@ fn import_two_subtrees_on_nonzero_finalized() { import_blocks_into(&mut virtual_overseer, &backend, None, chain_b.clone()).await; assert_eq!(backend.load_first_block_number().unwrap().unwrap(), 101); - assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); - assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_a.iter().map(|(h, _)| h)); + assert_backend_contains(&backend, chain_b.iter().map(|(h, _)| h)); assert_leaves(&backend, vec![b_hash, a_hash]); assert_leaves_query(&mut virtual_overseer, vec![b_hash, a_hash]).await; @@ -799,9 +799,9 @@ fn leaves_ordered_by_weight_and_then_number() { .await; assert_eq!(backend.load_first_block_number().unwrap().unwrap(), 1); - assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); - assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); - assert_backend_contains(&backend, chain_c.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_a.iter().map(|(h, _)| h)); + assert_backend_contains(&backend, chain_b.iter().map(|(h, _)| h)); + assert_backend_contains(&backend, chain_c.iter().map(|(h, _)| h)); assert_leaves(&backend, vec![c2_hash, a3_hash, b2_hash]); assert_leaves_query(&mut virtual_overseer, vec![c2_hash, a3_hash, b2_hash]).await; virtual_overseer @@ -844,8 +844,8 @@ fn subtrees_imported_even_with_gaps() { .await; assert_eq!(backend.load_first_block_number().unwrap().unwrap(), 1); - assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); - assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_a.iter().map(|(h, _)| h)); + assert_backend_contains(&backend, chain_b.iter().map(|(h, _)| h)); assert_leaves(&backend, vec![b5_hash, a3_hash]); assert_leaves_query(&mut virtual_overseer, vec![b5_hash, a3_hash]).await; @@ -878,7 +878,7 @@ fn reversion_removes_viability_of_chain() { ) .await; - assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_a.iter().map(|(h, _)| h)); assert_leaves(&backend, vec![]); assert_finalized_leaves_query(&mut virtual_overseer, finalized_number, finalized_hash) .await; @@ -914,7 +914,7 @@ fn reversion_removes_viability_and_finds_ancestor_as_leaf() { ) .await; - assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_a.iter().map(|(h, _)| h)); assert_leaves(&backend, vec![a1_hash]); assert_leaves_query(&mut virtual_overseer, vec![a1_hash]).await; @@ -954,15 +954,15 @@ fn ancestor_of_unviable_is_not_leaf_if_has_children() { import_blocks_into(&mut virtual_overseer, &backend, None, chain_b.clone()).await; - assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); - assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_a.iter().map(|(h, _)| h)); + assert_backend_contains(&backend, chain_b.iter().map(|(h, _)| h)); assert_leaves(&backend, vec![a2_hash, b2_hash]); import_blocks_into(&mut virtual_overseer, &backend, None, chain_a_ext.clone()).await; - assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); - assert_backend_contains(&backend, chain_a_ext.iter().map(|&(ref h, _)| h)); - assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_a.iter().map(|(h, _)| h)); + assert_backend_contains(&backend, chain_a_ext.iter().map(|(h, _)| h)); + assert_backend_contains(&backend, chain_b.iter().map(|(h, _)| h)); assert_leaves(&backend, vec![b2_hash]); assert_leaves_query(&mut virtual_overseer, vec![b2_hash]).await; @@ -995,7 +995,7 @@ fn self_and_future_reversions_are_ignored() { ) .await; - assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_a.iter().map(|(h, _)| h)); assert_leaves(&backend, vec![a3_hash]); assert_leaves_query(&mut virtual_overseer, vec![a3_hash]).await; @@ -1028,7 +1028,7 @@ fn revert_finalized_is_ignored() { ) .await; - assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_a.iter().map(|(h, _)| h)); assert_leaves(&backend, vec![a3_hash]); assert_leaves_query(&mut virtual_overseer, vec![a3_hash]).await; @@ -1072,8 +1072,8 @@ fn reversion_affects_viability_of_all_subtrees() { import_blocks_into(&mut virtual_overseer, &backend, None, chain_b.clone()).await; - assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); - assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_a.iter().map(|(h, _)| h)); + assert_backend_contains(&backend, chain_b.iter().map(|(h, _)| h)); assert_leaves(&backend, vec![a1_hash]); assert_leaves_query(&mut virtual_overseer, vec![a1_hash]).await; @@ -2034,12 +2034,12 @@ fn revert_blocks_message_triggers_proper_reversion() { .await; // Checking mini chain - assert_backend_contains(&backend, built_chain.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, built_chain.iter().map(|(h, _)| h)); assert_leaves(&backend, vec![head_hash]); assert_leaves_query(&mut virtual_overseer, vec![head_hash]).await; - let block_1_hash = backend.load_blocks_by_number(1).unwrap().get(0).unwrap().clone(); - let block_2_hash = backend.load_blocks_by_number(2).unwrap().get(0).unwrap().clone(); + let block_1_hash = *backend.load_blocks_by_number(1).unwrap().get(0).unwrap(); + let block_2_hash = *backend.load_blocks_by_number(2).unwrap().get(0).unwrap(); // Sending revert blocks message let (_, write_rx) = backend.await_next_write(); @@ -2098,7 +2098,7 @@ fn revert_blocks_against_finalized_is_ignored() { .await; // Checking mini chain - assert_backend_contains(&backend, built_chain.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, built_chain.iter().map(|(h, _)| h)); // Sending dispute concluded against message virtual_overseer diff --git a/node/core/dispute-coordinator/src/participation/queues/tests.rs b/node/core/dispute-coordinator/src/participation/queues/tests.rs index 5e262d895e31..d4f43639ce87 100644 --- a/node/core/dispute-coordinator/src/participation/queues/tests.rs +++ b/node/core/dispute-coordinator/src/participation/queues/tests.rs @@ -44,7 +44,7 @@ fn make_dummy_comparator( fn clone_request(request: &ParticipationRequest) -> ParticipationRequest { ParticipationRequest { candidate_receipt: request.candidate_receipt.clone(), - candidate_hash: request.candidate_hash.clone(), + candidate_hash: request.candidate_hash, session: request.session, request_timer: None, } diff --git a/node/core/dispute-coordinator/src/tests.rs b/node/core/dispute-coordinator/src/tests.rs index d0cf494d2d4d..75eae8200dc6 100644 --- a/node/core/dispute-coordinator/src/tests.rs +++ b/node/core/dispute-coordinator/src/tests.rs @@ -399,10 +399,10 @@ impl TestState { let mut response = Vec::new(); for i in target_header.number.saturating_sub(k as u32)..target_header.number { response.push( - self.block_num_to_header + *self + .block_num_to_header .get(&i) - .expect("headers and block_num_to_header should always be in sync") - .clone(), + .expect("headers and block_num_to_header should always be in sync"), ); } let _ = response_channel.send(Ok(response)); @@ -552,7 +552,7 @@ impl TestState { let (ctx, ctx_handle) = make_buffered_subsystem_context(TaskExecutor::new(), 1); let subsystem = DisputeCoordinatorSubsystem::new( self.db.clone(), - self.config.clone(), + self.config, self.subsystem_keystore.clone(), Metrics::default(), ); @@ -574,27 +574,27 @@ where // Add two more blocks after the genesis (which is created in `default()`) let h1 = Header { - parent_hash: test_state.last_block.clone(), + parent_hash: test_state.last_block, number: 1, digest: dummy_digest(), state_root: dummy_hash(), extrinsics_root: dummy_hash(), }; let h1_hash = h1.hash(); - test_state.headers.insert(h1_hash.clone(), h1); - test_state.block_num_to_header.insert(1, h1_hash.clone()); + test_state.headers.insert(h1_hash, h1); + test_state.block_num_to_header.insert(1, h1_hash); test_state.last_block = h1_hash; let h2 = Header { - parent_hash: test_state.last_block.clone(), + parent_hash: test_state.last_block, number: 2, digest: dummy_digest(), state_root: dummy_hash(), extrinsics_root: dummy_hash(), }; let h2_hash = h2.hash(); - test_state.headers.insert(h2_hash.clone(), h2); - test_state.block_num_to_header.insert(2, h2_hash.clone()); + test_state.headers.insert(h2_hash, h2); + test_state.block_num_to_header.insert(2, h2_hash); test_state.last_block = h2_hash; test_state.resume(test) @@ -3133,7 +3133,7 @@ fn participation_requests_reprioritized_for_newly_included() { // participation. let parent_block_num: BlockNumber = repetition as BlockNumber - 1; candidate_receipt.descriptor.relay_parent = - test_state.block_num_to_header.get(&parent_block_num).unwrap().clone(); + *test_state.block_num_to_header.get(&parent_block_num).unwrap(); receipts.push(candidate_receipt.clone()); } diff --git a/node/core/provisioner/src/disputes/prioritized_selection/tests.rs b/node/core/provisioner/src/disputes/prioritized_selection/tests.rs index 7798ebe51aaf..2fdeadb2f4f0 100644 --- a/node/core/provisioner/src/disputes/prioritized_selection/tests.rs +++ b/node/core/provisioner/src/disputes/prioritized_selection/tests.rs @@ -136,16 +136,16 @@ fn partitioning_happy_case() { CandidateHash(Hash::random()), DisputeStatus::ConcludedFor(time_now - ACTIVE_DURATION_SECS * 2), ); - input.push(inactive_unknown_onchain.clone()); + input.push(inactive_unknown_onchain); let inactive_unconcluded_onchain = ( 1, CandidateHash(Hash::random()), DisputeStatus::ConcludedFor(time_now - ACTIVE_DURATION_SECS * 2), ); - input.push(inactive_unconcluded_onchain.clone()); + input.push(inactive_unconcluded_onchain); onchain.insert( - (inactive_unconcluded_onchain.0, inactive_unconcluded_onchain.1.clone()), + (inactive_unconcluded_onchain.0, inactive_unconcluded_onchain.1), DisputeState { validators_for: bitvec![u8, Lsb0; 1, 1, 1, 0, 0, 0, 0, 0, 0], validators_against: bitvec![u8, Lsb0; 0, 0, 0, 0, 0, 0, 0, 0, 0], @@ -155,12 +155,12 @@ fn partitioning_happy_case() { ); let active_unknown_onchain = (2, CandidateHash(Hash::random()), DisputeStatus::Active); - input.push(active_unknown_onchain.clone()); + input.push(active_unknown_onchain); let active_unconcluded_onchain = (3, CandidateHash(Hash::random()), DisputeStatus::Active); - input.push(active_unconcluded_onchain.clone()); + input.push(active_unconcluded_onchain); onchain.insert( - (active_unconcluded_onchain.0, active_unconcluded_onchain.1.clone()), + (active_unconcluded_onchain.0, active_unconcluded_onchain.1), DisputeState { validators_for: bitvec![u8, Lsb0; 1, 1, 1, 0, 0, 0, 0, 0, 0], validators_against: bitvec![u8, Lsb0; 0, 0, 0, 0, 0, 0, 0, 0, 0], @@ -170,9 +170,9 @@ fn partitioning_happy_case() { ); let active_concluded_onchain = (4, CandidateHash(Hash::random()), DisputeStatus::Active); - input.push(active_concluded_onchain.clone()); + input.push(active_concluded_onchain); onchain.insert( - (active_concluded_onchain.0, active_concluded_onchain.1.clone()), + (active_concluded_onchain.0, active_concluded_onchain.1), DisputeState { validators_for: bitvec![u8, Lsb0; 1, 1, 1, 1, 1, 1, 1, 1, 0], validators_against: bitvec![u8, Lsb0; 0, 0, 0, 0, 0, 0, 0, 0, 0], @@ -186,9 +186,9 @@ fn partitioning_happy_case() { CandidateHash(Hash::random()), DisputeStatus::ConcludedFor(time_now - ACTIVE_DURATION_SECS * 2), ); - input.push(inactive_concluded_onchain.clone()); + input.push(inactive_concluded_onchain); onchain.insert( - (inactive_concluded_onchain.0, inactive_concluded_onchain.1.clone()), + (inactive_concluded_onchain.0, inactive_concluded_onchain.1), DisputeState { validators_for: bitvec![u8, Lsb0; 1, 1, 1, 1, 1, 1, 1, 0, 0], validators_against: bitvec![u8, Lsb0; 0, 0, 0, 0, 0, 0, 0, 0, 0], @@ -254,10 +254,10 @@ fn partitioning_doubled_onchain_vote() { // Dispute B has supermajority + 1 votes, so the doubled onchain vote doesn't affect it. It // should be considered as 'can conclude onchain'. let dispute_b = (4, CandidateHash(Hash::random()), DisputeStatus::Active); - input.push(dispute_a.clone()); - input.push(dispute_b.clone()); + input.push(dispute_a); + input.push(dispute_b); onchain.insert( - (dispute_a.0, dispute_a.1.clone()), + (dispute_a.0, dispute_a.1), DisputeState { validators_for: bitvec![u8, Lsb0; 1, 1, 1, 1, 1, 1, 1, 0, 0], validators_against: bitvec![u8, Lsb0; 1, 0, 0, 0, 0, 0, 0, 0, 0], @@ -266,7 +266,7 @@ fn partitioning_doubled_onchain_vote() { }, ); onchain.insert( - (dispute_b.0, dispute_b.1.clone()), + (dispute_b.0, dispute_b.1), DisputeState { validators_for: bitvec![u8, Lsb0; 1, 1, 1, 1, 1, 1, 1, 1, 0], validators_against: bitvec![u8, Lsb0; 1, 0, 0, 0, 0, 0, 0, 0, 0], @@ -287,10 +287,10 @@ fn partitioning_duplicated_dispute() { let mut onchain = HashMap::<(u32, CandidateHash), DisputeState>::new(); let some_dispute = (3, CandidateHash(Hash::random()), DisputeStatus::Active); - input.push(some_dispute.clone()); - input.push(some_dispute.clone()); + input.push(some_dispute); + input.push(some_dispute); onchain.insert( - (some_dispute.0, some_dispute.1.clone()), + (some_dispute.0, some_dispute.1), DisputeState { validators_for: bitvec![u8, Lsb0; 1, 1, 1, 0, 0, 0, 0, 0, 0], validators_against: bitvec![u8, Lsb0; 0, 0, 0, 0, 0, 0, 0, 0, 0], @@ -385,7 +385,7 @@ impl TestDisputes { local_votes_count: usize, dummy_receipt: CandidateReceipt, ) { - self.local_disputes.push(dispute.clone()); + self.local_disputes.push(dispute); self.votes_db.insert( (dispute.0, dispute.1), CandidateVotes { @@ -412,7 +412,7 @@ impl TestDisputes { DisputeStatus::ConcludedAgainst(_) | DisputeStatus::ConcludedFor(_) => Some(1), }; self.onchain_disputes.insert( - (dispute.0, dispute.1.clone()), + (dispute.0, dispute.1), DisputeState { validators_for: TestDisputes::generate_bitvec( self.validators_count, @@ -434,10 +434,10 @@ impl TestDisputes { let onchain_votes_count = self.validators_count * 80 / 100; let session_idx = 0; let lf = leaf(); - let dummy_receipt = test_helpers::dummy_candidate_receipt(lf.hash.clone()); + let dummy_receipt = test_helpers::dummy_candidate_receipt(lf.hash); for _ in 0..dispute_count { let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::Active); - self.add_offchain_dispute(d.clone(), local_votes_count, dummy_receipt.clone()); + self.add_offchain_dispute(d, local_votes_count, dummy_receipt.clone()); self.add_onchain_dispute(d, onchain_votes_count); } @@ -452,10 +452,10 @@ impl TestDisputes { let onchain_votes_count = self.validators_count * 40 / 100; let session_idx = 1; let lf = leaf(); - let dummy_receipt = test_helpers::dummy_candidate_receipt(lf.hash.clone()); + let dummy_receipt = test_helpers::dummy_candidate_receipt(lf.hash); for _ in 0..dispute_count { let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::Active); - self.add_offchain_dispute(d.clone(), local_votes_count, dummy_receipt.clone()); + self.add_offchain_dispute(d, local_votes_count, dummy_receipt.clone()); self.add_onchain_dispute(d, onchain_votes_count); } @@ -469,10 +469,10 @@ impl TestDisputes { let local_votes_count = self.validators_count * 90 / 100; let session_idx = 2; let lf = leaf(); - let dummy_receipt = test_helpers::dummy_candidate_receipt(lf.hash.clone()); + let dummy_receipt = test_helpers::dummy_candidate_receipt(lf.hash); for _ in 0..dispute_count { let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::Confirmed); - self.add_offchain_dispute(d.clone(), local_votes_count, dummy_receipt.clone()); + self.add_offchain_dispute(d, local_votes_count, dummy_receipt.clone()); } (session_idx, local_votes_count * dispute_count) } @@ -485,10 +485,10 @@ impl TestDisputes { let onchain_votes_count = self.validators_count * 75 / 100; let session_idx = 3; let lf = leaf(); - let dummy_receipt = test_helpers::dummy_candidate_receipt(lf.hash.clone()); + let dummy_receipt = test_helpers::dummy_candidate_receipt(lf.hash); for _ in 0..dispute_count { let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::ConcludedFor(0)); - self.add_offchain_dispute(d.clone(), local_votes_count, dummy_receipt.clone()); + self.add_offchain_dispute(d, local_votes_count, dummy_receipt.clone()); self.add_onchain_dispute(d, onchain_votes_count); } (session_idx, (local_votes_count - onchain_votes_count) * dispute_count) @@ -501,10 +501,10 @@ impl TestDisputes { let local_votes_count = self.validators_count * 90 / 100; let session_idx = 4; let lf = leaf(); - let dummy_receipt = test_helpers::dummy_candidate_receipt(lf.hash.clone()); + let dummy_receipt = test_helpers::dummy_candidate_receipt(lf.hash); for _ in 0..dispute_count { let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::ConcludedFor(0)); - self.add_offchain_dispute(d.clone(), local_votes_count, dummy_receipt.clone()); + self.add_offchain_dispute(d, local_votes_count, dummy_receipt.clone()); } (session_idx, local_votes_count * dispute_count) } @@ -517,10 +517,10 @@ impl TestDisputes { let onchain_votes_count = self.validators_count * 10 / 100; let session_idx = 5; let lf = leaf(); - let dummy_receipt = test_helpers::dummy_candidate_receipt(lf.hash.clone()); + let dummy_receipt = test_helpers::dummy_candidate_receipt(lf.hash); for _ in 0..dispute_count { let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::Active); - self.add_offchain_dispute(d.clone(), local_votes_count, dummy_receipt.clone()); + self.add_offchain_dispute(d, local_votes_count, dummy_receipt.clone()); self.add_onchain_dispute(d, onchain_votes_count); } @@ -534,10 +534,10 @@ impl TestDisputes { let local_votes_count = self.validators_count * 10 / 100; let session_idx = 6; let lf = leaf(); - let dummy_receipt = test_helpers::dummy_candidate_receipt(lf.hash.clone()); + let dummy_receipt = test_helpers::dummy_candidate_receipt(lf.hash); for _ in 0..dispute_count { let d = (session_idx, CandidateHash(Hash::random()), DisputeStatus::Active); - self.add_offchain_dispute(d.clone(), local_votes_count, dummy_receipt.clone()); + self.add_offchain_dispute(d, local_votes_count, dummy_receipt.clone()); } (session_idx, local_votes_count * dispute_count) diff --git a/node/core/pvf-checker/src/tests.rs b/node/core/pvf-checker/src/tests.rs index b223b1b54c0b..d1daa7a58135 100644 --- a/node/core/pvf-checker/src/tests.rs +++ b/node/core/pvf-checker/src/tests.rs @@ -188,7 +188,7 @@ impl TestState { let activated = if let Some(activated_leaf) = fake_leaf { self.leaves.insert( - activated_leaf.block_hash.clone(), + activated_leaf.block_hash, LeafState { session_index: self.last_session_index, pvfs: activated_leaf.pvfs.clone(), @@ -497,9 +497,9 @@ fn reactivating_pvf_leads_to_second_check() { test_harness(|mut test_state, mut handle| { async move { let pvf = dummy_validation_code_hash(1); - let block_1 = FakeLeaf::new(dummy_hash(), 1, vec![pvf.clone()]); + let block_1 = FakeLeaf::new(dummy_hash(), 1, vec![pvf]); let block_2 = block_1.descendant(vec![]); - let block_3 = block_2.descendant(vec![pvf.clone()]); + let block_3 = block_2.descendant(vec![pvf]); test_state .activate_leaf_with_session( @@ -552,9 +552,9 @@ fn dont_double_vote_for_pvfs_in_view() { test_harness(|mut test_state, mut handle| { async move { let pvf = dummy_validation_code_hash(1); - let block_1_1 = FakeLeaf::new([1; 32].into(), 1, vec![pvf.clone()]); - let block_2_1 = FakeLeaf::new([2; 32].into(), 1, vec![pvf.clone()]); - let block_1_2 = block_1_1.descendant(vec![pvf.clone()]); + let block_1_1 = FakeLeaf::new([1; 32].into(), 1, vec![pvf]); + let block_2_1 = FakeLeaf::new([2; 32].into(), 1, vec![pvf]); + let block_1_2 = block_1_1.descendant(vec![pvf]); test_state .activate_leaf_with_session( @@ -605,8 +605,8 @@ fn judgements_come_out_of_order() { let pvf_1 = dummy_validation_code_hash(1); let pvf_2 = dummy_validation_code_hash(2); - let block_1 = FakeLeaf::new([1; 32].into(), 1, vec![pvf_1.clone()]); - let block_2 = FakeLeaf::new([2; 32].into(), 1, vec![pvf_2.clone()]); + let block_1 = FakeLeaf::new([1; 32].into(), 1, vec![pvf_1]); + let block_2 = FakeLeaf::new([2; 32].into(), 1, vec![pvf_2]); test_state .activate_leaf_with_session( diff --git a/node/core/pvf/tests/it/adder.rs b/node/core/pvf/tests/it/adder.rs index f52827699e2d..a4c2e21bdeaa 100644 --- a/node/core/pvf/tests/it/adder.rs +++ b/node/core/pvf/tests/it/adder.rs @@ -53,14 +53,14 @@ async fn execute_good_block_on_parent() { #[tokio::test] async fn execute_good_chain_on_parent() { - let mut number = 0; let mut parent_hash = [0; 32]; let mut last_state = 0; let host = TestHost::new(); - for add in 0..10 { - let parent_head = HeadData { number, parent_hash, post_state: hash_state(last_state) }; + for (number, add) in (0..10).enumerate() { + let parent_head = + HeadData { number: number as u64, parent_hash, post_state: hash_state(last_state) }; let block_data = BlockData { state: last_state, add }; @@ -80,11 +80,10 @@ async fn execute_good_chain_on_parent() { let new_head = HeadData::decode(&mut &ret.head_data.0[..]).unwrap(); - assert_eq!(new_head.number, number + 1); + assert_eq!(new_head.number, number as u64 + 1); assert_eq!(new_head.parent_hash, parent_head.hash()); assert_eq!(new_head.post_state, hash_state(last_state + add)); - number += 1; parent_hash = new_head.hash(); last_state += add; } diff --git a/node/core/runtime-api/src/tests.rs b/node/core/runtime-api/src/tests.rs index 33f5eef3869f..53b3fd56bf3e 100644 --- a/node/core/runtime-api/src/tests.rs +++ b/node/core/runtime-api/src/tests.rs @@ -984,7 +984,7 @@ fn requests_submit_pvf_check_statement() { ), }) .await; - assert_eq!(rx.await.unwrap().unwrap(), ()); + let _ = rx.await.unwrap().unwrap(); let (tx, rx) = oneshot::channel(); ctx_handle .send(FromOrchestra::Communication { @@ -994,7 +994,7 @@ fn requests_submit_pvf_check_statement() { ), }) .await; - assert_eq!(rx.await.unwrap().unwrap(), ()); + let _ = rx.await.unwrap().unwrap(); assert_eq!( &*subsystem_client.submitted_pvf_check_statement.lock().expect("poisened mutex"), @@ -1061,9 +1061,7 @@ fn requests_validation_code_hash() { let validation_code_hash = dummy_validation_code().hash(); let mut subsystem_client = MockSubsystemClient::default(); - subsystem_client - .validation_code_hash - .insert(para_a, validation_code_hash.clone()); + subsystem_client.validation_code_hash.insert(para_a, validation_code_hash); let subsystem_client = Arc::new(subsystem_client); let subsystem = diff --git a/node/malus/src/variants/back_garbage_candidate.rs b/node/malus/src/variants/back_garbage_candidate.rs index 45f1aa2e0b7f..aa904c37b80a 100644 --- a/node/malus/src/variants/back_garbage_candidate.rs +++ b/node/malus/src/variants/back_garbage_candidate.rs @@ -59,10 +59,10 @@ pub(crate) struct BackGarbageCandidates { } impl OverseerGen for BackGarbageCandidates { - fn generate<'a, Spawner, RuntimeClient>( + fn generate( &self, connector: OverseerConnector, - args: OverseerGenArgs<'a, Spawner, RuntimeClient>, + args: OverseerGenArgs<'_, Spawner, RuntimeClient>, ) -> Result< (Overseer, Arc>>, OverseerHandle), Error, diff --git a/node/malus/src/variants/common.rs b/node/malus/src/variants/common.rs index ab1dfbbb360a..6bc889595362 100644 --- a/node/malus/src/variants/common.rs +++ b/node/malus/src/variants/common.rs @@ -392,9 +392,8 @@ where let behave_maliciously = self.distribution.sample(&mut rand::thread_rng()); match behave_maliciously { true => { - let validation_result = ValidationResult::Invalid( - self.fake_validation_error.clone().into(), - ); + let validation_result = + ValidationResult::Invalid(self.fake_validation_error.into()); gum::info!( target: MALUS, para_id = ?candidate_receipt.descriptor.para_id, diff --git a/node/malus/src/variants/dispute_valid_candidates.rs b/node/malus/src/variants/dispute_valid_candidates.rs index 9ea8449a1d0b..fa3b0c38bc2f 100644 --- a/node/malus/src/variants/dispute_valid_candidates.rs +++ b/node/malus/src/variants/dispute_valid_candidates.rs @@ -76,10 +76,10 @@ pub(crate) struct DisputeValidCandidates { } impl OverseerGen for DisputeValidCandidates { - fn generate<'a, Spawner, RuntimeClient>( + fn generate( &self, connector: OverseerConnector, - args: OverseerGenArgs<'a, Spawner, RuntimeClient>, + args: OverseerGenArgs<'_, Spawner, RuntimeClient>, ) -> Result< (Overseer, Arc>>, OverseerHandle), Error, diff --git a/node/malus/src/variants/suggest_garbage_candidate.rs b/node/malus/src/variants/suggest_garbage_candidate.rs index 7d301c194b44..b0290fff949d 100644 --- a/node/malus/src/variants/suggest_garbage_candidate.rs +++ b/node/malus/src/variants/suggest_garbage_candidate.rs @@ -99,7 +99,7 @@ where // equal to `p`. We use `rand::thread_rng` as the source of randomness. let generate_malicious_candidate = distribution.sample(&mut rand::thread_rng()); - if generate_malicious_candidate == true { + if generate_malicious_candidate { gum::debug!(target: MALUS, "😈 Suggesting malicious candidate.",); let pov = PoV { block_data: BlockData(MALICIOUS_POV.into()) }; @@ -253,10 +253,10 @@ pub(crate) struct SuggestGarbageCandidates { } impl OverseerGen for SuggestGarbageCandidates { - fn generate<'a, Spawner, RuntimeClient>( + fn generate( &self, connector: OverseerConnector, - args: OverseerGenArgs<'a, Spawner, RuntimeClient>, + args: OverseerGenArgs<'_, Spawner, RuntimeClient>, ) -> Result< (Overseer, Arc>>, OverseerHandle), Error, diff --git a/node/network/approval-distribution/src/tests.rs b/node/network/approval-distribution/src/tests.rs index 422157a1eda9..bfd7c945069c 100644 --- a/node/network/approval-distribution/src/tests.rs +++ b/node/network/approval-distribution/src/tests.rs @@ -148,7 +148,7 @@ fn make_gossip_topology( assert!(all_peers.len() >= grid_size); let peer_info = |i: usize| TopologyPeerInfo { - peer_ids: vec![all_peers[i].0.clone()], + peer_ids: vec![all_peers[i].0], validator_index: ValidatorIndex::from(i as u32), discovery_id: all_peers[i].1.clone(), }; @@ -224,7 +224,7 @@ async fn setup_peer_with_view( overseer_send( virtual_overseer, ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerConnected( - peer_id.clone(), + *peer_id, ObservedRole::Full, ValidationVersion::V1.into(), None, @@ -234,8 +234,7 @@ async fn setup_peer_with_view( overseer_send( virtual_overseer, ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerViewChange( - peer_id.clone(), - view, + *peer_id, view, )), ) .await; @@ -249,7 +248,7 @@ async fn send_message_from_peer( overseer_send( virtual_overseer, ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage( - peer_id.clone(), + *peer_id, Versioned::V1(msg), )), ) @@ -480,7 +479,7 @@ fn spam_attack_results_in_negative_reputation_change() { // new block `hash_b` with 20 candidates let candidates_count = 20; let meta = BlockApprovalMeta { - hash: hash_b.clone(), + hash: hash_b, parent_hash, number: 2, candidates: vec![Default::default(); candidates_count], @@ -527,7 +526,7 @@ fn spam_attack_results_in_negative_reputation_change() { overseer_send( overseer, ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerViewChange( - peer.clone(), + *peer, View::with_finalized(2), )), ) @@ -587,7 +586,7 @@ fn peer_sending_us_the_same_we_just_sent_them_is_ok() { overseer_send( overseer, ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerViewChange( - peer.clone(), + *peer, view![hash], )), ) @@ -956,7 +955,7 @@ fn update_peer_view() { overseer_send( overseer, ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerViewChange( - peer.clone(), + *peer, View::new(vec![hash_b, hash_c, hash_d], 2), )), ) @@ -1009,7 +1008,7 @@ fn update_peer_view() { overseer_send( overseer, ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerViewChange( - peer.clone(), + *peer, View::with_finalized(finalized_number), )), ) @@ -1166,7 +1165,7 @@ fn sends_assignments_even_when_state_is_approved() { protocol_v1::ApprovalDistributionMessage::Assignments(sent_assignments) )) )) => { - assert_eq!(peers, vec![peer.clone()]); + assert_eq!(peers, vec![*peer]); assert_eq!(sent_assignments, assignments); } ); @@ -1179,7 +1178,7 @@ fn sends_assignments_even_when_state_is_approved() { protocol_v1::ApprovalDistributionMessage::Approvals(sent_approvals) )) )) => { - assert_eq!(peers, vec![peer.clone()]); + assert_eq!(peers, vec![*peer]); assert_eq!(sent_approvals, approvals); } ); @@ -1208,7 +1207,7 @@ fn race_condition_in_local_vs_remote_view_update() { // Test a small number of candidates let candidates_count = 1; let meta = BlockApprovalMeta { - hash: hash_b.clone(), + hash: hash_b, parent_hash, number: 2, candidates: vec![Default::default(); candidates_count], @@ -1811,7 +1810,7 @@ fn originator_aggression_l1() { let mut state = State::default(); state.aggression_config.resend_unfinalized_period = None; - let aggression_l1_threshold = state.aggression_config.l1_threshold.clone().unwrap(); + let aggression_l1_threshold = state.aggression_config.l1_threshold.unwrap(); let _ = test_harness(state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -1931,8 +1930,7 @@ fn originator_aggression_l1() { assert_eq!(sent_assignments, assignments); assert!(unsent_indices.iter() - .find(|i| &peers[**i].0 == &sent_peers[0]) - .is_some()); + .any(|i| &peers[*i].0 == &sent_peers[0])); } ); } @@ -1951,8 +1949,7 @@ fn originator_aggression_l1() { assert_eq!(sent_approvals, approvals); assert!(unsent_indices.iter() - .find(|i| &peers[**i].0 == &sent_peers[0]) - .is_some()); + .any(|i| &peers[*i].0 == &sent_peers[0])); } ); } @@ -1972,7 +1969,7 @@ fn non_originator_aggression_l1() { let mut state = state_without_reputation_delay(); state.aggression_config.resend_unfinalized_period = None; - let aggression_l1_threshold = state.aggression_config.l1_threshold.clone().unwrap(); + let aggression_l1_threshold = state.aggression_config.l1_threshold.unwrap(); let _ = test_harness(state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2077,8 +2074,8 @@ fn non_originator_aggression_l2() { let mut state = state_without_reputation_delay(); state.aggression_config.resend_unfinalized_period = None; - let aggression_l1_threshold = state.aggression_config.l1_threshold.clone().unwrap(); - let aggression_l2_threshold = state.aggression_config.l2_threshold.clone().unwrap(); + let aggression_l1_threshold = state.aggression_config.l1_threshold.unwrap(); + let aggression_l2_threshold = state.aggression_config.l2_threshold.unwrap(); let _ = test_harness(state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2222,8 +2219,7 @@ fn non_originator_aggression_l2() { assert_eq!(sent_assignments, assignments); assert!(unsent_indices.iter() - .find(|i| &peers[**i].0 == &sent_peers[0]) - .is_some()); + .any(|i| &peers[*i].0 == &sent_peers[0])); } ); } diff --git a/node/network/availability-distribution/src/tests/state.rs b/node/network/availability-distribution/src/tests/state.rs index 36fb16f7e11c..706ec13a3e9b 100644 --- a/node/network/availability-distribution/src/tests/state.rs +++ b/node/network/availability-distribution/src/tests/state.rs @@ -123,13 +123,13 @@ impl Default for TestState { let (core, chunk) = OccupiedCoreBuilder { group_responsible: GroupIndex(i as _), para_id: *para_id, - relay_parent: relay_parent.clone(), + relay_parent: *relay_parent, } .build(); (CoreState::Occupied(core), chunk) }) .unzip(); - cores.insert(relay_child.clone(), p_cores); + cores.insert(*relay_child, p_cores); // Skip chunks for our own group (won't get fetched): let mut chunks_other_groups = p_chunks.into_iter(); chunks_other_groups.next(); @@ -176,12 +176,12 @@ impl TestState { .zip(advanced) .map(|(old, new)| ActiveLeavesUpdate { activated: Some(ActivatedLeaf { - hash: new.clone(), + hash: *new, number: 1, status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), }), - deactivated: vec![old.clone()].into(), + deactivated: vec![*old].into(), }) .collect::>() }; @@ -239,8 +239,7 @@ impl TestState { let chunk = self .chunks .get_mut(&(candidate_hash, validator_index)) - .map(Vec::pop) - .flatten() + .and_then(Vec::pop) .flatten(); tx.send(chunk).expect("Receiver is expected to be alive"); }, diff --git a/node/network/availability-recovery/src/tests.rs b/node/network/availability-recovery/src/tests.rs index c5647a12f589..de923f5967e5 100644 --- a/node/network/availability-recovery/src/tests.rs +++ b/node/network/availability-recovery/src/tests.rs @@ -562,7 +562,7 @@ fn availability_is_recovered_from_chunks_if_no_group_provided() { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(ActivatedLeaf { - hash: test_state.current.clone(), + hash: test_state.current, number: 1, status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), @@ -648,7 +648,7 @@ fn availability_is_recovered_from_chunks_even_if_backing_group_supplied_if_chunk overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(ActivatedLeaf { - hash: test_state.current.clone(), + hash: test_state.current, number: 1, status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), @@ -734,7 +734,7 @@ fn bad_merkle_path_leads_to_recovery_error() { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(ActivatedLeaf { - hash: test_state.current.clone(), + hash: test_state.current, number: 1, status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), @@ -792,7 +792,7 @@ fn wrong_chunk_index_leads_to_recovery_error() { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(ActivatedLeaf { - hash: test_state.current.clone(), + hash: test_state.current, number: 1, status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), @@ -866,7 +866,7 @@ fn invalid_erasure_coding_leads_to_invalid_error() { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(ActivatedLeaf { - hash: test_state.current.clone(), + hash: test_state.current, number: 1, status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), @@ -915,7 +915,7 @@ fn fast_path_backing_group_recovers() { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(ActivatedLeaf { - hash: test_state.current.clone(), + hash: test_state.current, number: 1, status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), @@ -965,7 +965,7 @@ fn recovers_from_only_chunks_if_pov_large() { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(ActivatedLeaf { - hash: test_state.current.clone(), + hash: test_state.current, number: 1, status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), @@ -1069,7 +1069,7 @@ fn fast_path_backing_group_recovers_if_pov_small() { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(ActivatedLeaf { - hash: test_state.current.clone(), + hash: test_state.current, number: 1, status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), @@ -1128,7 +1128,7 @@ fn no_answers_in_fast_path_causes_chunk_requests() { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(ActivatedLeaf { - hash: test_state.current.clone(), + hash: test_state.current, number: 1, status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), @@ -1190,7 +1190,7 @@ fn task_canceled_when_receivers_dropped() { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(ActivatedLeaf { - hash: test_state.current.clone(), + hash: test_state.current, number: 1, status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), @@ -1232,7 +1232,7 @@ fn chunks_retry_until_all_nodes_respond() { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(ActivatedLeaf { - hash: test_state.current.clone(), + hash: test_state.current, number: 1, status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), @@ -1293,7 +1293,7 @@ fn not_returning_requests_wont_stall_retrieval() { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(ActivatedLeaf { - hash: test_state.current.clone(), + hash: test_state.current, number: 1, status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), @@ -1365,7 +1365,7 @@ fn all_not_returning_requests_still_recovers_on_return() { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(ActivatedLeaf { - hash: test_state.current.clone(), + hash: test_state.current, number: 1, status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), @@ -1442,7 +1442,7 @@ fn returns_early_if_we_have_the_data() { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(ActivatedLeaf { - hash: test_state.current.clone(), + hash: test_state.current, number: 1, status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), @@ -1479,7 +1479,7 @@ fn does_not_query_local_validator() { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(ActivatedLeaf { - hash: test_state.current.clone(), + hash: test_state.current, number: 1, status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), @@ -1538,7 +1538,7 @@ fn invalid_local_chunk_is_ignored() { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(ActivatedLeaf { - hash: test_state.current.clone(), + hash: test_state.current, number: 1, status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), diff --git a/node/network/bitfield-distribution/src/tests.rs b/node/network/bitfield-distribution/src/tests.rs index 39816a55240b..9aea5a7178b7 100644 --- a/node/network/bitfield-distribution/src/tests.rs +++ b/node/network/bitfield-distribution/src/tests.rs @@ -65,7 +65,7 @@ fn prewarmed_state( known_message: BitfieldGossipMessage, peers: Vec, ) -> ProtocolState { - let relay_parent = known_message.relay_parent.clone(); + let relay_parent = known_message.relay_parent; let mut topologies = SessionBoundGridTopologyStorage::default(); topologies.update_topology(0_u32, SessionGridTopology::new(Vec::new(), Vec::new()), None); topologies.get_current_topology_mut().local_grid_neighbors_mut().peers_x = @@ -73,7 +73,7 @@ fn prewarmed_state( ProtocolState { per_relay_parent: hashmap! { - relay_parent.clone() => + relay_parent => PerRelayParentData { signing_context, validator_set: vec![validator.clone()], @@ -99,7 +99,7 @@ fn state_with_view( ) -> (ProtocolState, SigningContext, KeystorePtr, ValidatorId) { let mut state = ProtocolState { reputation, ..Default::default() }; - let signing_context = SigningContext { session_index: 1, parent_hash: relay_parent.clone() }; + let signing_context = SigningContext { session_index: 1, parent_hash: relay_parent }; let keystore: KeystorePtr = Arc::new(MemoryKeystore::new()); let validator = Keystore::sr25519_generate_new(&*keystore, ValidatorId::ID, None) @@ -109,10 +109,10 @@ fn state_with_view( .iter() .map(|relay_parent| { ( - relay_parent.clone(), + *relay_parent, PerRelayParentData { signing_context: signing_context.clone(), - validator_set: vec![validator.clone().into()], + validator_set: vec![validator.into()], one_per_validator: hashmap! {}, message_received_from_peer: hashmap! {}, message_sent_to_peer: hashmap! {}, @@ -140,7 +140,7 @@ fn receive_invalid_signature() { let peer_b = PeerId::random(); assert_ne!(peer_a, peer_b); - let signing_context = SigningContext { session_index: 1, parent_hash: hash_a.clone() }; + let signing_context = SigningContext { session_index: 1, parent_hash: hash_a }; // another validator not part of the validatorset let keystore: KeystorePtr = Arc::new(MemoryKeystore::new()); @@ -184,28 +184,20 @@ fn receive_invalid_signature() { .flatten() .expect("should be signed"); - let invalid_msg = BitfieldGossipMessage { - relay_parent: hash_a.clone(), - signed_availability: invalid_signed.clone(), - }; + let invalid_msg = + BitfieldGossipMessage { relay_parent: hash_a, signed_availability: invalid_signed.clone() }; let invalid_msg_2 = BitfieldGossipMessage { - relay_parent: hash_a.clone(), + relay_parent: hash_a, signed_availability: invalid_signed_2.clone(), }; - let valid_msg = BitfieldGossipMessage { - relay_parent: hash_a.clone(), - signed_availability: valid_signed.clone(), - }; + let valid_msg = + BitfieldGossipMessage { relay_parent: hash_a, signed_availability: valid_signed.clone() }; let pool = sp_core::testing::TaskExecutor::new(); let (mut ctx, mut handle) = make_subsystem_context::(pool); - let mut state = prewarmed_state( - validator_0.into(), - signing_context.clone(), - valid_msg, - vec![peer_b.clone()], - ); + let mut state = + prewarmed_state(validator_0.into(), signing_context.clone(), valid_msg, vec![peer_b]); state .per_relay_parent .get_mut(&hash_a) @@ -219,7 +211,7 @@ fn receive_invalid_signature() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), invalid_msg.into_network_message()), + NetworkBridgeEvent::PeerMessage(peer_b, invalid_msg.into_network_message()), &mut rng, )); @@ -230,7 +222,7 @@ fn receive_invalid_signature() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), invalid_msg_2.into_network_message()), + NetworkBridgeEvent::PeerMessage(peer_b, invalid_msg_2.into_network_message()), &mut rng, )); // reputation change due to invalid signature @@ -261,13 +253,10 @@ fn receive_invalid_validator_index() { assert_ne!(peer_a, peer_b); // validator 0 key pair - let (mut state, signing_context, keystore, validator) = state_with_view( - our_view![hash_a, hash_b], - hash_a.clone(), - ReputationAggregator::new(|_| true), - ); + let (mut state, signing_context, keystore, validator) = + state_with_view(our_view![hash_a, hash_b], hash_a, ReputationAggregator::new(|_| true)); - state.peer_views.insert(peer_b.clone(), view![hash_a]); + state.peer_views.insert(peer_b, view![hash_a]); let payload = AvailabilityBitfield(bitvec![u8, bitvec::order::Lsb0; 1u8; 32]); let signed = Signed::::sign( @@ -281,8 +270,7 @@ fn receive_invalid_validator_index() { .flatten() .expect("should be signed"); - let msg = - BitfieldGossipMessage { relay_parent: hash_a.clone(), signed_availability: signed.clone() }; + let msg = BitfieldGossipMessage { relay_parent: hash_a, signed_availability: signed.clone() }; let pool = sp_core::testing::TaskExecutor::new(); let (mut ctx, mut handle) = make_subsystem_context::(pool); @@ -293,7 +281,7 @@ fn receive_invalid_validator_index() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), msg.into_network_message()), + NetworkBridgeEvent::PeerMessage(peer_b, msg.into_network_message()), &mut rng, )); @@ -325,11 +313,8 @@ fn receive_duplicate_messages() { assert_ne!(peer_a, peer_b); // validator 0 key pair - let (mut state, signing_context, keystore, validator) = state_with_view( - our_view![hash_a, hash_b], - hash_a.clone(), - ReputationAggregator::new(|_| true), - ); + let (mut state, signing_context, keystore, validator) = + state_with_view(our_view![hash_a, hash_b], hash_a, ReputationAggregator::new(|_| true)); // create a signed message by validator 0 let payload = AvailabilityBitfield(bitvec![u8, bitvec::order::Lsb0; 1u8; 32]); @@ -345,7 +330,7 @@ fn receive_duplicate_messages() { .expect("should be signed"); let msg = BitfieldGossipMessage { - relay_parent: hash_a.clone(), + relay_parent: hash_a, signed_availability: signed_bitfield.clone(), }; @@ -359,7 +344,7 @@ fn receive_duplicate_messages() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), msg.clone().into_network_message(),), + NetworkBridgeEvent::PeerMessage(peer_b, msg.clone().into_network_message(),), &mut rng, )); @@ -392,7 +377,7 @@ fn receive_duplicate_messages() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_a.clone(), msg.clone().into_network_message(),), + NetworkBridgeEvent::PeerMessage(peer_a, msg.clone().into_network_message(),), &mut rng, )); @@ -411,7 +396,7 @@ fn receive_duplicate_messages() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), msg.clone().into_network_message(),), + NetworkBridgeEvent::PeerMessage(peer_b, msg.clone().into_network_message(),), &mut rng, )); @@ -442,11 +427,8 @@ fn delay_reputation_change() { let peer = PeerId::random(); // validator 0 key pair - let (mut state, signing_context, keystore, validator) = state_with_view( - our_view![hash_a, hash_b], - hash_a.clone(), - ReputationAggregator::new(|_| false), - ); + let (mut state, signing_context, keystore, validator) = + state_with_view(our_view![hash_a, hash_b], hash_a, ReputationAggregator::new(|_| false)); // create a signed message by validator 0 let payload = AvailabilityBitfield(bitvec![u8, bitvec::order::Lsb0; 1u8; 32]); @@ -462,7 +444,7 @@ fn delay_reputation_change() { .expect("should be signed"); let msg = BitfieldGossipMessage { - relay_parent: hash_a.clone(), + relay_parent: hash_a, signed_availability: signed_bitfield.clone(), }; @@ -481,10 +463,7 @@ fn delay_reputation_change() { handle .send(FromOrchestra::Communication { msg: BitfieldDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerMessage( - peer.clone(), - msg.clone().into_network_message(), - ), + NetworkBridgeEvent::PeerMessage(peer, msg.clone().into_network_message()), ), }) .await; @@ -507,10 +486,7 @@ fn delay_reputation_change() { handle .send(FromOrchestra::Communication { msg: BitfieldDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerMessage( - peer.clone(), - msg.clone().into_network_message(), - ), + NetworkBridgeEvent::PeerMessage(peer, msg.clone().into_network_message()), ), }) .await; @@ -555,7 +531,7 @@ fn do_not_relay_message_twice() { // validator 0 key pair let (mut state, signing_context, keystore, validator) = - state_with_view(our_view![hash], hash.clone(), ReputationAggregator::new(|_| true)); + state_with_view(our_view![hash], hash, ReputationAggregator::new(|_| true)); // create a signed message by validator 0 let payload = AvailabilityBitfield(bitvec![u8, bitvec::order::Lsb0; 1u8; 32]); @@ -570,13 +546,11 @@ fn do_not_relay_message_twice() { .flatten() .expect("should be signed"); - state.peer_views.insert(peer_b.clone(), view![hash]); - state.peer_views.insert(peer_a.clone(), view![hash]); + state.peer_views.insert(peer_b, view![hash]); + state.peer_views.insert(peer_a, view![hash]); - let msg = BitfieldGossipMessage { - relay_parent: hash.clone(), - signed_availability: signed_bitfield.clone(), - }; + let msg = + BitfieldGossipMessage { relay_parent: hash, signed_availability: signed_bitfield.clone() }; let pool = sp_core::testing::TaskExecutor::new(); let (mut ctx, mut handle) = make_subsystem_context::(pool); @@ -584,7 +558,7 @@ fn do_not_relay_message_twice() { executor::block_on(async move { let mut gossip_peers = GridNeighbors::empty(); - gossip_peers.peers_x = HashSet::from_iter(vec![peer_a.clone(), peer_b.clone()].into_iter()); + gossip_peers.peers_x = HashSet::from_iter(vec![peer_a, peer_b].into_iter()); relay_message( &mut ctx, @@ -665,11 +639,8 @@ fn changing_view() { assert_ne!(peer_a, peer_b); // validator 0 key pair - let (mut state, signing_context, keystore, validator) = state_with_view( - our_view![hash_a, hash_b], - hash_a.clone(), - ReputationAggregator::new(|_| true), - ); + let (mut state, signing_context, keystore, validator) = + state_with_view(our_view![hash_a, hash_b], hash_a, ReputationAggregator::new(|_| true)); // create a signed message by validator 0 let payload = AvailabilityBitfield(bitvec![u8, bitvec::order::Lsb0; 1u8; 32]); @@ -685,7 +656,7 @@ fn changing_view() { .expect("should be signed"); let msg = BitfieldGossipMessage { - relay_parent: hash_a.clone(), + relay_parent: hash_a, signed_availability: signed_bitfield.clone(), }; @@ -699,7 +670,7 @@ fn changing_view() { &mut state, &Default::default(), NetworkBridgeEvent::PeerConnected( - peer_b.clone(), + peer_b, ObservedRole::Full, ValidationVersion::V1.into(), None @@ -712,7 +683,7 @@ fn changing_view() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerViewChange(peer_b.clone(), view![hash_a, hash_b]), + NetworkBridgeEvent::PeerViewChange(peer_b, view![hash_a, hash_b]), &mut rng, )); @@ -723,7 +694,7 @@ fn changing_view() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), msg.clone().into_network_message(),), + NetworkBridgeEvent::PeerMessage(peer_b, msg.clone().into_network_message(),), &mut rng, )); @@ -754,7 +725,7 @@ fn changing_view() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerViewChange(peer_b.clone(), view![]), + NetworkBridgeEvent::PeerViewChange(peer_b, view![]), &mut rng, )); @@ -767,7 +738,7 @@ fn changing_view() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), msg.clone().into_network_message(),), + NetworkBridgeEvent::PeerMessage(peer_b, msg.clone().into_network_message(),), &mut rng, )); @@ -786,7 +757,7 @@ fn changing_view() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerDisconnected(peer_b.clone()), + NetworkBridgeEvent::PeerDisconnected(peer_b), &mut rng, )); @@ -799,7 +770,7 @@ fn changing_view() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_a.clone(), msg.clone().into_network_message(),), + NetworkBridgeEvent::PeerMessage(peer_a, msg.clone().into_network_message(),), &mut rng, )); @@ -846,13 +817,11 @@ fn do_not_send_message_back_to_origin() { .flatten() .expect("should be signed"); - state.peer_views.insert(peer_b.clone(), view![hash]); - state.peer_views.insert(peer_a.clone(), view![hash]); + state.peer_views.insert(peer_b, view![hash]); + state.peer_views.insert(peer_a, view![hash]); - let msg = BitfieldGossipMessage { - relay_parent: hash.clone(), - signed_availability: signed_bitfield.clone(), - }; + let msg = + BitfieldGossipMessage { relay_parent: hash, signed_availability: signed_bitfield.clone() }; let pool = sp_core::testing::TaskExecutor::new(); let (mut ctx, mut handle) = make_subsystem_context::(pool); @@ -864,7 +833,7 @@ fn do_not_send_message_back_to_origin() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), msg.clone().into_network_message(),), + NetworkBridgeEvent::PeerMessage(peer_b, msg.clone().into_network_message(),), &mut rng, )); @@ -930,13 +899,13 @@ fn topology_test() { let peers_x: Vec<_> = [0, 2, 3, 4, 5, 6] .iter() .cloned() - .map(|i| topology_peer_info[i].peer_ids[0].clone()) + .map(|i| topology_peer_info[i].peer_ids[0]) .collect(); let peers_y: Vec<_> = [8, 15, 22, 29, 36, 43] .iter() .cloned() - .map(|i| topology_peer_info[i].peer_ids[0].clone()) + .map(|i| topology_peer_info[i].peer_ids[0]) .collect(); { @@ -963,13 +932,11 @@ fn topology_test() { .expect("should be signed"); peers_x.iter().chain(peers_y.iter()).for_each(|peer| { - state.peer_views.insert(peer.clone(), view![hash]); + state.peer_views.insert(*peer, view![hash]); }); - let msg = BitfieldGossipMessage { - relay_parent: hash.clone(), - signed_availability: signed_bitfield.clone(), - }; + let msg = + BitfieldGossipMessage { relay_parent: hash, signed_availability: signed_bitfield.clone() }; let pool = sp_core::testing::TaskExecutor::new(); let (mut ctx, mut handle) = make_subsystem_context::(pool); @@ -981,7 +948,7 @@ fn topology_test() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peers_x[0].clone(), msg.clone().into_network_message(),), + NetworkBridgeEvent::PeerMessage(peers_x[0], msg.clone().into_network_message(),), &mut rng, )); @@ -1064,22 +1031,22 @@ fn need_message_works() { .insert(signed_by.clone()); }; - assert!(true == pretend_send(&mut state, peer_a, &validator_set[0])); - assert!(true == pretend_send(&mut state, peer_b, &validator_set[1])); + assert!(pretend_send(&mut state, peer_a, &validator_set[0])); + assert!(pretend_send(&mut state, peer_b, &validator_set[1])); // sending the same thing must not be allowed - assert!(false == pretend_send(&mut state, peer_a, &validator_set[0])); + assert!(!pretend_send(&mut state, peer_a, &validator_set[0])); // receive by Alice pretend_receive(&mut state, peer_a, &validator_set[0]); // must be marked as not needed by Alice, so attempt to send to Alice must be false - assert!(false == pretend_send(&mut state, peer_a, &validator_set[0])); + assert!(!pretend_send(&mut state, peer_a, &validator_set[0])); // but ok for Bob - assert!(false == pretend_send(&mut state, peer_b, &validator_set[1])); + assert!(!pretend_send(&mut state, peer_b, &validator_set[1])); // receive by Bob pretend_receive(&mut state, peer_a, &validator_set[0]); // not ok for Alice - assert!(false == pretend_send(&mut state, peer_a, &validator_set[0])); + assert!(!pretend_send(&mut state, peer_a, &validator_set[0])); // also not ok for Bob - assert!(false == pretend_send(&mut state, peer_b, &validator_set[1])); + assert!(!pretend_send(&mut state, peer_b, &validator_set[1])); } diff --git a/node/network/bridge/src/rx/tests.rs b/node/network/bridge/src/rx/tests.rs index e18a7e541832..b04884edefaa 100644 --- a/node/network/bridge/src/rx/tests.rs +++ b/node/network/bridge/src/rx/tests.rs @@ -432,12 +432,8 @@ fn send_our_view_upon_connection() { handle.await_mode_switch().await; - network_handle - .connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full) - .await; - network_handle - .connect_peer(peer.clone(), PeerSet::Collation, ObservedRole::Full) - .await; + network_handle.connect_peer(peer, PeerSet::Validation, ObservedRole::Full).await; + network_handle.connect_peer(peer, PeerSet::Collation, ObservedRole::Full).await; await_peer_connections(&shared, 1, 1).await; @@ -446,7 +442,7 @@ fn send_our_view_upon_connection() { assert_network_actions_contains( &actions, &NetworkAction::WriteNotification( - peer.clone(), + peer, PeerSet::Validation, WireMessage::::ViewUpdate(view.clone()).encode(), ), @@ -454,7 +450,7 @@ fn send_our_view_upon_connection() { assert_network_actions_contains( &actions, &NetworkAction::WriteNotification( - peer.clone(), + peer, PeerSet::Collation, WireMessage::::ViewUpdate(view.clone()).encode(), ), @@ -482,10 +478,10 @@ fn sends_view_updates_to_peers() { handle.await_mode_switch().await; network_handle - .connect_peer(peer_a.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer(peer_a, PeerSet::Validation, ObservedRole::Full) .await; network_handle - .connect_peer(peer_b.clone(), PeerSet::Collation, ObservedRole::Full) + .connect_peer(peer_b, PeerSet::Collation, ObservedRole::Full) .await; await_peer_connections(&shared, 1, 1).await; @@ -545,10 +541,10 @@ fn do_not_send_view_update_until_synced() { assert_ne!(peer_a, peer_b); network_handle - .connect_peer(peer_a.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer(peer_a, PeerSet::Validation, ObservedRole::Full) .await; network_handle - .connect_peer(peer_b.clone(), PeerSet::Collation, ObservedRole::Full) + .connect_peer(peer_b, PeerSet::Collation, ObservedRole::Full) .await; await_peer_connections(&shared, 1, 1).await; @@ -640,10 +636,10 @@ fn do_not_send_view_update_when_only_finalized_block_changed() { let peer_b = PeerId::random(); network_handle - .connect_peer(peer_a.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer(peer_a, PeerSet::Validation, ObservedRole::Full) .await; network_handle - .connect_peer(peer_b.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer(peer_b, PeerSet::Validation, ObservedRole::Full) .await; await_peer_connections(&shared, 2, 0).await; @@ -700,9 +696,7 @@ fn peer_view_updates_sent_via_overseer() { let peer = PeerId::random(); - network_handle - .connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full) - .await; + network_handle.connect_peer(peer, PeerSet::Validation, ObservedRole::Full).await; await_peer_connections(&shared, 1, 0).await; @@ -712,7 +706,7 @@ fn peer_view_updates_sent_via_overseer() { { assert_sends_validation_event_to_all( NetworkBridgeEvent::PeerConnected( - peer.clone(), + peer, ObservedRole::Full, ValidationVersion::V1.into(), None, @@ -722,7 +716,7 @@ fn peer_view_updates_sent_via_overseer() { .await; assert_sends_validation_event_to_all( - NetworkBridgeEvent::PeerViewChange(peer.clone(), View::default()), + NetworkBridgeEvent::PeerViewChange(peer, View::default()), &mut virtual_overseer, ) .await; @@ -730,14 +724,14 @@ fn peer_view_updates_sent_via_overseer() { network_handle .peer_message( - peer.clone(), + peer, PeerSet::Validation, WireMessage::::ViewUpdate(view.clone()).encode(), ) .await; assert_sends_validation_event_to_all( - NetworkBridgeEvent::PeerViewChange(peer.clone(), view), + NetworkBridgeEvent::PeerViewChange(peer, view), &mut virtual_overseer, ) .await; @@ -752,9 +746,7 @@ fn peer_messages_sent_via_overseer() { let peer = PeerId::random(); - network_handle - .connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full) - .await; + network_handle.connect_peer(peer, PeerSet::Validation, ObservedRole::Full).await; await_peer_connections(&shared, 1, 0).await; @@ -762,7 +754,7 @@ fn peer_messages_sent_via_overseer() { { assert_sends_validation_event_to_all( NetworkBridgeEvent::PeerConnected( - peer.clone(), + peer, ObservedRole::Full, ValidationVersion::V1.into(), None, @@ -772,7 +764,7 @@ fn peer_messages_sent_via_overseer() { .await; assert_sends_validation_event_to_all( - NetworkBridgeEvent::PeerViewChange(peer.clone(), View::default()), + NetworkBridgeEvent::PeerViewChange(peer, View::default()), &mut virtual_overseer, ) .await; @@ -787,13 +779,13 @@ fn peer_messages_sent_via_overseer() { network_handle .peer_message( - peer.clone(), + peer, PeerSet::Validation, WireMessage::ProtocolMessage(message_v1.clone()).encode(), ) .await; - network_handle.disconnect_peer(peer.clone(), PeerSet::Validation).await; + network_handle.disconnect_peer(peer, PeerSet::Validation).await; // Approval distribution message comes first, and the message is only sent to that // subsystem. then a disconnection event arises that is sent to all validation networking @@ -827,12 +819,8 @@ fn peer_disconnect_from_just_one_peerset() { let peer = PeerId::random(); - network_handle - .connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full) - .await; - network_handle - .connect_peer(peer.clone(), PeerSet::Collation, ObservedRole::Full) - .await; + network_handle.connect_peer(peer, PeerSet::Validation, ObservedRole::Full).await; + network_handle.connect_peer(peer, PeerSet::Collation, ObservedRole::Full).await; await_peer_connections(&shared, 1, 1).await; @@ -840,7 +828,7 @@ fn peer_disconnect_from_just_one_peerset() { { assert_sends_validation_event_to_all( NetworkBridgeEvent::PeerConnected( - peer.clone(), + peer, ObservedRole::Full, ValidationVersion::V1.into(), None, @@ -850,7 +838,7 @@ fn peer_disconnect_from_just_one_peerset() { .await; assert_sends_validation_event_to_all( - NetworkBridgeEvent::PeerViewChange(peer.clone(), View::default()), + NetworkBridgeEvent::PeerViewChange(peer, View::default()), &mut virtual_overseer, ) .await; @@ -859,7 +847,7 @@ fn peer_disconnect_from_just_one_peerset() { { assert_sends_collation_event_to_all( NetworkBridgeEvent::PeerConnected( - peer.clone(), + peer, ObservedRole::Full, ValidationVersion::V1.into(), None, @@ -869,16 +857,16 @@ fn peer_disconnect_from_just_one_peerset() { .await; assert_sends_collation_event_to_all( - NetworkBridgeEvent::PeerViewChange(peer.clone(), View::default()), + NetworkBridgeEvent::PeerViewChange(peer, View::default()), &mut virtual_overseer, ) .await; } - network_handle.disconnect_peer(peer.clone(), PeerSet::Validation).await; + network_handle.disconnect_peer(peer, PeerSet::Validation).await; assert_sends_validation_event_to_all( - NetworkBridgeEvent::PeerDisconnected(peer.clone()), + NetworkBridgeEvent::PeerDisconnected(peer), &mut virtual_overseer, ) .await; @@ -904,11 +892,7 @@ fn peer_disconnect_from_just_one_peerset() { assert_network_actions_contains( &actions, - &NetworkAction::WriteNotification( - peer.clone(), - PeerSet::Collation, - wire_message.clone(), - ), + &NetworkAction::WriteNotification(peer, PeerSet::Collation, wire_message.clone()), ); virtual_overseer }); @@ -923,10 +907,10 @@ fn relays_collation_protocol_messages() { let peer_b = PeerId::random(); network_handle - .connect_peer(peer_a.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer(peer_a, PeerSet::Validation, ObservedRole::Full) .await; network_handle - .connect_peer(peer_b.clone(), PeerSet::Collation, ObservedRole::Full) + .connect_peer(peer_b, PeerSet::Collation, ObservedRole::Full) .await; await_peer_connections(&shared, 1, 1).await; @@ -935,7 +919,7 @@ fn relays_collation_protocol_messages() { { assert_sends_validation_event_to_all( NetworkBridgeEvent::PeerConnected( - peer_a.clone(), + peer_a, ObservedRole::Full, ValidationVersion::V1.into(), None, @@ -945,7 +929,7 @@ fn relays_collation_protocol_messages() { .await; assert_sends_validation_event_to_all( - NetworkBridgeEvent::PeerViewChange(peer_a.clone(), View::default()), + NetworkBridgeEvent::PeerViewChange(peer_a, View::default()), &mut virtual_overseer, ) .await; @@ -954,7 +938,7 @@ fn relays_collation_protocol_messages() { { assert_sends_collation_event_to_all( NetworkBridgeEvent::PeerConnected( - peer_b.clone(), + peer_b, ObservedRole::Full, ValidationVersion::V1.into(), None, @@ -964,7 +948,7 @@ fn relays_collation_protocol_messages() { .await; assert_sends_collation_event_to_all( - NetworkBridgeEvent::PeerViewChange(peer_b.clone(), View::default()), + NetworkBridgeEvent::PeerViewChange(peer_b, View::default()), &mut virtual_overseer, ) .await; @@ -983,7 +967,7 @@ fn relays_collation_protocol_messages() { network_handle .peer_message( - peer_a.clone(), + peer_a, PeerSet::Collation, WireMessage::ProtocolMessage(message_v1.clone()).encode(), ) @@ -992,14 +976,14 @@ fn relays_collation_protocol_messages() { let actions = network_handle.next_network_actions(3).await; assert_network_actions_contains( &actions, - &NetworkAction::ReputationChange(peer_a.clone(), UNCONNECTED_PEERSET_COST.into()), + &NetworkAction::ReputationChange(peer_a, UNCONNECTED_PEERSET_COST.into()), ); // peer B has the message relayed. network_handle .peer_message( - peer_b.clone(), + peer_b, PeerSet::Collation, WireMessage::ProtocolMessage(message_v1.clone()).encode(), ) @@ -1027,12 +1011,8 @@ fn different_views_on_different_peer_sets() { let peer = PeerId::random(); - network_handle - .connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full) - .await; - network_handle - .connect_peer(peer.clone(), PeerSet::Collation, ObservedRole::Full) - .await; + network_handle.connect_peer(peer, PeerSet::Validation, ObservedRole::Full).await; + network_handle.connect_peer(peer, PeerSet::Collation, ObservedRole::Full).await; await_peer_connections(&shared, 1, 1).await; @@ -1040,7 +1020,7 @@ fn different_views_on_different_peer_sets() { { assert_sends_validation_event_to_all( NetworkBridgeEvent::PeerConnected( - peer.clone(), + peer, ObservedRole::Full, ValidationVersion::V1.into(), None, @@ -1050,7 +1030,7 @@ fn different_views_on_different_peer_sets() { .await; assert_sends_validation_event_to_all( - NetworkBridgeEvent::PeerViewChange(peer.clone(), View::default()), + NetworkBridgeEvent::PeerViewChange(peer, View::default()), &mut virtual_overseer, ) .await; @@ -1059,7 +1039,7 @@ fn different_views_on_different_peer_sets() { { assert_sends_collation_event_to_all( NetworkBridgeEvent::PeerConnected( - peer.clone(), + peer, ObservedRole::Full, ValidationVersion::V1.into(), None, @@ -1069,7 +1049,7 @@ fn different_views_on_different_peer_sets() { .await; assert_sends_collation_event_to_all( - NetworkBridgeEvent::PeerViewChange(peer.clone(), View::default()), + NetworkBridgeEvent::PeerViewChange(peer, View::default()), &mut virtual_overseer, ) .await; @@ -1080,7 +1060,7 @@ fn different_views_on_different_peer_sets() { network_handle .peer_message( - peer.clone(), + peer, PeerSet::Validation, WireMessage::::ViewUpdate(view_a.clone()).encode(), ) @@ -1088,20 +1068,20 @@ fn different_views_on_different_peer_sets() { network_handle .peer_message( - peer.clone(), + peer, PeerSet::Collation, WireMessage::::ViewUpdate(view_b.clone()).encode(), ) .await; assert_sends_validation_event_to_all( - NetworkBridgeEvent::PeerViewChange(peer.clone(), view_a.clone()), + NetworkBridgeEvent::PeerViewChange(peer, view_a.clone()), &mut virtual_overseer, ) .await; assert_sends_collation_event_to_all( - NetworkBridgeEvent::PeerViewChange(peer.clone(), view_b.clone()), + NetworkBridgeEvent::PeerViewChange(peer, view_b.clone()), &mut virtual_overseer, ) .await; @@ -1117,7 +1097,7 @@ fn sent_views_include_finalized_number_update() { let peer_a = PeerId::random(); network_handle - .connect_peer(peer_a.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer(peer_a, PeerSet::Validation, ObservedRole::Full) .await; await_peer_connections(&shared, 1, 0).await; @@ -1146,11 +1126,7 @@ fn sent_views_include_finalized_number_update() { assert_network_actions_contains( &actions, - &NetworkAction::WriteNotification( - peer_a.clone(), - PeerSet::Validation, - wire_message.clone(), - ), + &NetworkAction::WriteNotification(peer_a, PeerSet::Validation, wire_message.clone()), ); virtual_overseer }); @@ -1164,14 +1140,14 @@ fn view_finalized_number_can_not_go_down() { let peer_a = PeerId::random(); network_handle - .connect_peer(peer_a.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer(peer_a, PeerSet::Validation, ObservedRole::Full) .await; await_peer_connections(&shared, 1, 0).await; network_handle .peer_message( - peer_a.clone(), + peer_a, PeerSet::Validation, WireMessage::::ViewUpdate(View::new( vec![Hash::repeat_byte(0x01)], @@ -1183,7 +1159,7 @@ fn view_finalized_number_can_not_go_down() { network_handle .peer_message( - peer_a.clone(), + peer_a, PeerSet::Validation, WireMessage::::ViewUpdate(View::new(vec![], 0)) .encode(), @@ -1193,7 +1169,7 @@ fn view_finalized_number_can_not_go_down() { let actions = network_handle.next_network_actions(2).await; assert_network_actions_contains( &actions, - &NetworkAction::ReputationChange(peer_a.clone(), MALFORMED_VIEW_COST.into()), + &NetworkAction::ReputationChange(peer_a, MALFORMED_VIEW_COST.into()), ); virtual_overseer }); diff --git a/node/network/bridge/src/tx/tests.rs b/node/network/bridge/src/tx/tests.rs index 520218d3c481..e851cb0dee6f 100644 --- a/node/network/bridge/src/tx/tests.rs +++ b/node/network/bridge/src/tx/tests.rs @@ -242,7 +242,7 @@ fn send_messages_to_peers() { let peer = PeerId::random(); network_handle - .connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer(peer, PeerSet::Validation, ObservedRole::Full) .timeout(TIMEOUT) .await .expect("Timeout does not occur"); @@ -251,7 +251,7 @@ fn send_messages_to_peers() { // so the single item sink has to be free explicitly network_handle - .connect_peer(peer.clone(), PeerSet::Collation, ObservedRole::Full) + .connect_peer(peer, PeerSet::Collation, ObservedRole::Full) .timeout(TIMEOUT) .await .expect("Timeout does not occur"); @@ -269,7 +269,7 @@ fn send_messages_to_peers() { virtual_overseer .send(FromOrchestra::Communication { msg: NetworkBridgeTxMessage::SendValidationMessage( - vec![peer.clone()], + vec![peer], Versioned::V1(message_v1.clone()), ), }) @@ -284,7 +284,7 @@ fn send_messages_to_peers() { .await .expect("Timeout does not occur"), NetworkAction::WriteNotification( - peer.clone(), + peer, PeerSet::Validation, WireMessage::ProtocolMessage(message_v1).encode(), ) @@ -306,7 +306,7 @@ fn send_messages_to_peers() { virtual_overseer .send(FromOrchestra::Communication { msg: NetworkBridgeTxMessage::SendCollationMessage( - vec![peer.clone()], + vec![peer], Versioned::V1(message_v1.clone()), ), }) @@ -319,7 +319,7 @@ fn send_messages_to_peers() { .await .expect("Timeout does not occur"), NetworkAction::WriteNotification( - peer.clone(), + peer, PeerSet::Collation, WireMessage::ProtocolMessage(message_v1).encode(), ) diff --git a/node/network/bridge/src/validator_discovery.rs b/node/network/bridge/src/validator_discovery.rs index e89b38544684..86e861fbc5b5 100644 --- a/node/network/bridge/src/validator_discovery.rs +++ b/node/network/bridge/src/validator_discovery.rs @@ -311,7 +311,7 @@ mod tests { let (ns, ads) = new_network(); let authority_ids: Vec<_> = - ads.by_peer_id.values().map(|v| v.iter()).flatten().cloned().collect(); + ads.by_peer_id.values().flat_map(|v| v.iter()).cloned().collect(); futures::executor::block_on(async move { let (failed, _) = oneshot::channel(); @@ -344,7 +344,7 @@ mod tests { let (ns, ads) = new_network(); let authority_ids: Vec<_> = - ads.by_peer_id.values().map(|v| v.iter()).flatten().cloned().collect(); + ads.by_peer_id.values().flat_map(|v| v.iter()).cloned().collect(); futures::executor::block_on(async move { let (failed, failed_rx) = oneshot::channel(); diff --git a/node/network/collator-protocol/src/collator_side/tests.rs b/node/network/collator-protocol/src/collator_side/tests.rs index e406e5d869cc..ad1096196574 100644 --- a/node/network/collator-protocol/src/collator_side/tests.rs +++ b/node/network/collator-protocol/src/collator_side/tests.rs @@ -147,7 +147,7 @@ impl TestState { fn current_group_validator_peer_ids(&self) -> Vec { self.current_group_validator_indices() .iter() - .map(|i| self.validator_peer_id[i.0 as usize].clone()) + .map(|i| self.validator_peer_id[i.0 as usize]) .collect() } @@ -412,7 +412,7 @@ async fn connect_peer( overseer_send( virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerConnected( - peer.clone(), + peer, polkadot_node_network_protocol::ObservedRole::Authority, CollationVersion::V1.into(), authority_id.map(|v| HashSet::from([v])), @@ -509,7 +509,7 @@ async fn send_peer_view_change( overseer_send( virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerViewChange( - peer.clone(), + *peer, View::new(hashes, 0), )), ) @@ -519,7 +519,7 @@ async fn send_peer_view_change( #[test] fn advertise_and_send_collation() { let mut test_state = TestState::default(); - let local_peer_id = test_state.local_peer_id.clone(); + let local_peer_id = test_state.local_peer_id; let collator_pair = test_state.collator_pair.clone(); test_harness( @@ -540,7 +540,7 @@ fn advertise_and_send_collation() { .into_iter() .zip(test_state.current_group_validator_peer_ids()) { - connect_peer(&mut virtual_overseer, peer.clone(), Some(val.clone())).await; + connect_peer(&mut virtual_overseer, peer, Some(val.clone())).await; } // We declare to the connected validators that we are a collator. @@ -550,7 +550,7 @@ fn advertise_and_send_collation() { expect_declare_msg(&mut virtual_overseer, &test_state, &peer_id).await; } - let peer = test_state.current_group_validator_peer_ids()[0].clone(); + let peer = test_state.current_group_validator_peer_ids()[0]; // Send info about peer's view. send_peer_view_change(&mut virtual_overseer, &peer, vec![test_state.relay_parent]) @@ -627,7 +627,7 @@ fn advertise_and_send_collation() { let old_relay_parent = test_state.relay_parent; test_state.advance_to_new_round(&mut virtual_overseer, false).await; - let peer = test_state.validator_peer_id[2].clone(); + let peer = test_state.validator_peer_id[2]; // Re-request a collation. let (pending_response, rx) = oneshot::channel(); @@ -658,7 +658,7 @@ fn advertise_and_send_collation() { overseer_send( &mut virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerViewChange( - peer.clone(), + peer, view![test_state.relay_parent], )), ) @@ -674,7 +674,7 @@ fn advertise_and_send_collation() { #[test] fn delay_reputation_change() { let test_state = TestState::default(); - let local_peer_id = test_state.local_peer_id.clone(); + let local_peer_id = test_state.local_peer_id; let collator_pair = test_state.collator_pair.clone(); test_harness( @@ -694,7 +694,7 @@ fn delay_reputation_change() { .into_iter() .zip(test_state.current_group_validator_peer_ids()) { - connect_peer(&mut virtual_overseer, peer.clone(), Some(val.clone())).await; + connect_peer(&mut virtual_overseer, peer, Some(val.clone())).await; } // We declare to the connected validators that we are a collator. @@ -704,7 +704,7 @@ fn delay_reputation_change() { expect_declare_msg(&mut virtual_overseer, &test_state, &peer_id).await; } - let peer = test_state.current_group_validator_peer_ids()[0].clone(); + let peer = test_state.current_group_validator_peer_ids()[0]; // Send info about peer's view. send_peer_view_change(&mut virtual_overseer, &peer, vec![test_state.relay_parent]) @@ -773,6 +773,7 @@ fn delay_reputation_change() { } #[test] +#[allow(clippy::async_yields_async)] fn send_only_one_collation_per_relay_parent_at_a_time() { test_validator_send_sequence(|mut second_response_receiver, feedback_first_tx| async move { Delay::new(Duration::from_millis(100)).await; @@ -788,6 +789,7 @@ fn send_only_one_collation_per_relay_parent_at_a_time() { } #[test] +#[allow(clippy::async_yields_async)] fn send_next_collation_after_max_unshared_upload_time() { test_validator_send_sequence(|second_response_receiver, _| async move { Delay::new(MAX_UNSHARED_UPLOAD_TIME + Duration::from_millis(50)).await; @@ -798,7 +800,7 @@ fn send_next_collation_after_max_unshared_upload_time() { #[test] fn collators_declare_to_connected_peers() { let test_state = TestState::default(); - let local_peer_id = test_state.local_peer_id.clone(); + let local_peer_id = test_state.local_peer_id; let collator_pair = test_state.collator_pair.clone(); test_harness( @@ -806,14 +808,13 @@ fn collators_declare_to_connected_peers() { collator_pair, ReputationAggregator::new(|_| true), |mut test_harness| async move { - let peer = test_state.validator_peer_id[0].clone(); + let peer = test_state.validator_peer_id[0]; let validator_id = test_state.current_group_validator_authority_ids()[0].clone(); setup_system(&mut test_harness.virtual_overseer, &test_state).await; // A validator connected to us - connect_peer(&mut test_harness.virtual_overseer, peer.clone(), Some(validator_id)) - .await; + connect_peer(&mut test_harness.virtual_overseer, peer, Some(validator_id)).await; expect_declare_msg(&mut test_harness.virtual_overseer, &test_state, &peer).await; test_harness }, @@ -823,7 +824,7 @@ fn collators_declare_to_connected_peers() { #[test] fn collations_are_only_advertised_to_validators_with_correct_view() { let test_state = TestState::default(); - let local_peer_id = test_state.local_peer_id.clone(); + let local_peer_id = test_state.local_peer_id; let collator_pair = test_state.collator_pair.clone(); test_harness( @@ -833,19 +834,19 @@ fn collations_are_only_advertised_to_validators_with_correct_view() { |mut test_harness| async move { let virtual_overseer = &mut test_harness.virtual_overseer; - let peer = test_state.current_group_validator_peer_ids()[0].clone(); + let peer = test_state.current_group_validator_peer_ids()[0]; let validator_id = test_state.current_group_validator_authority_ids()[0].clone(); - let peer2 = test_state.current_group_validator_peer_ids()[1].clone(); + let peer2 = test_state.current_group_validator_peer_ids()[1]; let validator_id2 = test_state.current_group_validator_authority_ids()[1].clone(); setup_system(virtual_overseer, &test_state).await; // A validator connected to us - connect_peer(virtual_overseer, peer.clone(), Some(validator_id)).await; + connect_peer(virtual_overseer, peer, Some(validator_id)).await; // Connect the second validator - connect_peer(virtual_overseer, peer2.clone(), Some(validator_id2)).await; + connect_peer(virtual_overseer, peer2, Some(validator_id2)).await; expect_declare_msg(virtual_overseer, &test_state, &peer).await; expect_declare_msg(virtual_overseer, &test_state, &peer2).await; @@ -870,7 +871,7 @@ fn collations_are_only_advertised_to_validators_with_correct_view() { #[test] fn collate_on_two_different_relay_chain_blocks() { let mut test_state = TestState::default(); - let local_peer_id = test_state.local_peer_id.clone(); + let local_peer_id = test_state.local_peer_id; let collator_pair = test_state.collator_pair.clone(); test_harness( @@ -880,19 +881,19 @@ fn collate_on_two_different_relay_chain_blocks() { |mut test_harness| async move { let virtual_overseer = &mut test_harness.virtual_overseer; - let peer = test_state.current_group_validator_peer_ids()[0].clone(); + let peer = test_state.current_group_validator_peer_ids()[0]; let validator_id = test_state.current_group_validator_authority_ids()[0].clone(); - let peer2 = test_state.current_group_validator_peer_ids()[1].clone(); + let peer2 = test_state.current_group_validator_peer_ids()[1]; let validator_id2 = test_state.current_group_validator_authority_ids()[1].clone(); setup_system(virtual_overseer, &test_state).await; // A validator connected to us - connect_peer(virtual_overseer, peer.clone(), Some(validator_id)).await; + connect_peer(virtual_overseer, peer, Some(validator_id)).await; // Connect the second validator - connect_peer(virtual_overseer, peer2.clone(), Some(validator_id2)).await; + connect_peer(virtual_overseer, peer2, Some(validator_id2)).await; expect_declare_msg(virtual_overseer, &test_state, &peer).await; expect_declare_msg(virtual_overseer, &test_state, &peer2).await; @@ -921,7 +922,7 @@ fn collate_on_two_different_relay_chain_blocks() { #[test] fn validator_reconnect_does_not_advertise_a_second_time() { let test_state = TestState::default(); - let local_peer_id = test_state.local_peer_id.clone(); + let local_peer_id = test_state.local_peer_id; let collator_pair = test_state.collator_pair.clone(); test_harness( @@ -931,13 +932,13 @@ fn validator_reconnect_does_not_advertise_a_second_time() { |mut test_harness| async move { let virtual_overseer = &mut test_harness.virtual_overseer; - let peer = test_state.current_group_validator_peer_ids()[0].clone(); + let peer = test_state.current_group_validator_peer_ids()[0]; let validator_id = test_state.current_group_validator_authority_ids()[0].clone(); setup_system(virtual_overseer, &test_state).await; // A validator connected to us - connect_peer(virtual_overseer, peer.clone(), Some(validator_id.clone())).await; + connect_peer(virtual_overseer, peer, Some(validator_id.clone())).await; expect_declare_msg(virtual_overseer, &test_state, &peer).await; distribute_collation(virtual_overseer, &test_state, true).await; @@ -946,8 +947,8 @@ fn validator_reconnect_does_not_advertise_a_second_time() { expect_advertise_collation_msg(virtual_overseer, &peer, test_state.relay_parent).await; // Disconnect and reconnect directly - disconnect_peer(virtual_overseer, peer.clone()).await; - connect_peer(virtual_overseer, peer.clone(), Some(validator_id)).await; + disconnect_peer(virtual_overseer, peer).await; + connect_peer(virtual_overseer, peer, Some(validator_id)).await; expect_declare_msg(virtual_overseer, &test_state, &peer).await; send_peer_view_change(virtual_overseer, &peer, vec![test_state.relay_parent]).await; @@ -961,7 +962,7 @@ fn validator_reconnect_does_not_advertise_a_second_time() { #[test] fn collators_reject_declare_messages() { let test_state = TestState::default(); - let local_peer_id = test_state.local_peer_id.clone(); + let local_peer_id = test_state.local_peer_id; let collator_pair = test_state.collator_pair.clone(); let collator_pair2 = CollatorPair::generate().0; @@ -972,19 +973,19 @@ fn collators_reject_declare_messages() { |mut test_harness| async move { let virtual_overseer = &mut test_harness.virtual_overseer; - let peer = test_state.current_group_validator_peer_ids()[0].clone(); + let peer = test_state.current_group_validator_peer_ids()[0]; let validator_id = test_state.current_group_validator_authority_ids()[0].clone(); setup_system(virtual_overseer, &test_state).await; // A validator connected to us - connect_peer(virtual_overseer, peer.clone(), Some(validator_id)).await; + connect_peer(virtual_overseer, peer, Some(validator_id)).await; expect_declare_msg(virtual_overseer, &test_state, &peer).await; overseer_send( virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage( - peer.clone(), + peer, Versioned::V1(protocol_v1::CollatorProtocolMessage::Declare( collator_pair2.public(), ParaId::from(5), @@ -1021,7 +1022,7 @@ where F: Future>, { let test_state = TestState::default(); - let local_peer_id = test_state.local_peer_id.clone(); + let local_peer_id = test_state.local_peer_id; let collator_pair = test_state.collator_pair.clone(); test_harness( @@ -1042,7 +1043,7 @@ where .into_iter() .zip(test_state.current_group_validator_peer_ids()) { - connect_peer(virtual_overseer, peer.clone(), Some(val.clone())).await; + connect_peer(virtual_overseer, peer, Some(val.clone())).await; } // We declare to the connected validators that we are a collator. @@ -1052,8 +1053,8 @@ where expect_declare_msg(virtual_overseer, &test_state, &peer_id).await; } - let validator_0 = test_state.current_group_validator_peer_ids()[0].clone(); - let validator_1 = test_state.current_group_validator_peer_ids()[1].clone(); + let validator_0 = test_state.current_group_validator_peer_ids()[0]; + let validator_1 = test_state.current_group_validator_peer_ids()[1]; // Send info about peer's view. send_peer_view_change(virtual_overseer, &validator_0, vec![test_state.relay_parent]) @@ -1149,7 +1150,7 @@ where #[test] fn connect_to_buffered_groups() { let mut test_state = TestState::default(); - let local_peer_id = test_state.local_peer_id.clone(); + let local_peer_id = test_state.local_peer_id; let collator_pair = test_state.collator_pair.clone(); test_harness( @@ -1180,7 +1181,7 @@ fn connect_to_buffered_groups() { let head_a = test_state.relay_parent; for (val, peer) in group_a.iter().zip(&peers_a) { - connect_peer(&mut virtual_overseer, peer.clone(), Some(val.clone())).await; + connect_peer(&mut virtual_overseer, *peer, Some(val.clone())).await; } for peer_id in &peers_a { diff --git a/node/network/collator-protocol/src/validator_side/tests.rs b/node/network/collator-protocol/src/validator_side/tests.rs index 47409e8d10f3..e921e6c38c3e 100644 --- a/node/network/collator-protocol/src/validator_side/tests.rs +++ b/node/network/collator-protocol/src/validator_side/tests.rs @@ -313,7 +313,7 @@ async fn connect_and_declare_collator( overseer_send( virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerConnected( - peer.clone(), + peer, ObservedRole::Full, CollationVersion::V1.into(), None, @@ -324,7 +324,7 @@ async fn connect_and_declare_collator( overseer_send( virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage( - peer.clone(), + peer, Versioned::V1(protocol_v1::CollatorProtocolMessage::Declare( collator.public(), para_id, @@ -376,13 +376,13 @@ fn act_on_advertisement() { connect_and_declare_collator( &mut virtual_overseer, - peer_b.clone(), + peer_b, pair.clone(), test_state.chain_ids[0], ) .await; - advertise_collation(&mut virtual_overseer, peer_b.clone(), test_state.relay_parent).await; + advertise_collation(&mut virtual_overseer, peer_b, test_state.relay_parent).await; assert_fetch_collation_request( &mut virtual_overseer, @@ -418,17 +418,17 @@ fn collator_reporting_works() { connect_and_declare_collator( &mut virtual_overseer, - peer_b.clone(), + peer_b, test_state.collators[0].clone(), - test_state.chain_ids[0].clone(), + test_state.chain_ids[0], ) .await; connect_and_declare_collator( &mut virtual_overseer, - peer_c.clone(), + peer_c, test_state.collators[1].clone(), - test_state.chain_ids[0].clone(), + test_state.chain_ids[0], ) .await; @@ -477,7 +477,7 @@ fn collator_authentication_verification_works() { overseer_send( &mut virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage( - peer_b.clone(), + peer_b, Versioned::V1(protocol_v1::CollatorProtocolMessage::Declare( test_state.collators[0].public(), test_state.chain_ids[0], @@ -529,22 +529,22 @@ fn fetch_one_collation_at_a_time() { connect_and_declare_collator( &mut virtual_overseer, - peer_b.clone(), + peer_b, test_state.collators[0].clone(), - test_state.chain_ids[0].clone(), + test_state.chain_ids[0], ) .await; connect_and_declare_collator( &mut virtual_overseer, - peer_c.clone(), + peer_c, test_state.collators[1].clone(), - test_state.chain_ids[0].clone(), + test_state.chain_ids[0], ) .await; - advertise_collation(&mut virtual_overseer, peer_b.clone(), test_state.relay_parent).await; - advertise_collation(&mut virtual_overseer, peer_c.clone(), test_state.relay_parent).await; + advertise_collation(&mut virtual_overseer, peer_b, test_state.relay_parent).await; + advertise_collation(&mut virtual_overseer, peer_c, test_state.relay_parent).await; let response_channel = assert_fetch_collation_request( &mut virtual_overseer, @@ -615,31 +615,31 @@ fn fetches_next_collation() { connect_and_declare_collator( &mut virtual_overseer, - peer_b.clone(), + peer_b, test_state.collators[2].clone(), - test_state.chain_ids[0].clone(), + test_state.chain_ids[0], ) .await; connect_and_declare_collator( &mut virtual_overseer, - peer_c.clone(), + peer_c, test_state.collators[3].clone(), - test_state.chain_ids[0].clone(), + test_state.chain_ids[0], ) .await; connect_and_declare_collator( &mut virtual_overseer, - peer_d.clone(), + peer_d, test_state.collators[4].clone(), - test_state.chain_ids[0].clone(), + test_state.chain_ids[0], ) .await; - advertise_collation(&mut virtual_overseer, peer_b.clone(), second).await; - advertise_collation(&mut virtual_overseer, peer_c.clone(), second).await; - advertise_collation(&mut virtual_overseer, peer_d.clone(), second).await; + advertise_collation(&mut virtual_overseer, peer_b, second).await; + advertise_collation(&mut virtual_overseer, peer_c, second).await; + advertise_collation(&mut virtual_overseer, peer_d, second).await; // Dropping the response channel should lead to fetching the second collation. assert_fetch_collation_request(&mut virtual_overseer, second, test_state.chain_ids[0]) @@ -708,9 +708,9 @@ fn reject_connection_to_next_group() { connect_and_declare_collator( &mut virtual_overseer, - peer_b.clone(), + peer_b, test_state.collators[0].clone(), - test_state.chain_ids[1].clone(), // next, not current `para_id` + test_state.chain_ids[1], // next, not current `para_id` ) .await; @@ -757,22 +757,22 @@ fn fetch_next_collation_on_invalid_collation() { connect_and_declare_collator( &mut virtual_overseer, - peer_b.clone(), + peer_b, test_state.collators[0].clone(), - test_state.chain_ids[0].clone(), + test_state.chain_ids[0], ) .await; connect_and_declare_collator( &mut virtual_overseer, - peer_c.clone(), + peer_c, test_state.collators[1].clone(), - test_state.chain_ids[0].clone(), + test_state.chain_ids[0], ) .await; - advertise_collation(&mut virtual_overseer, peer_b.clone(), test_state.relay_parent).await; - advertise_collation(&mut virtual_overseer, peer_c.clone(), test_state.relay_parent).await; + advertise_collation(&mut virtual_overseer, peer_b, test_state.relay_parent).await; + advertise_collation(&mut virtual_overseer, peer_c, test_state.relay_parent).await; let response_channel = assert_fetch_collation_request( &mut virtual_overseer, @@ -854,12 +854,12 @@ fn inactive_disconnected() { connect_and_declare_collator( &mut virtual_overseer, - peer_b.clone(), + peer_b, pair.clone(), test_state.chain_ids[0], ) .await; - advertise_collation(&mut virtual_overseer, peer_b.clone(), test_state.relay_parent).await; + advertise_collation(&mut virtual_overseer, peer_b, test_state.relay_parent).await; assert_fetch_collation_request( &mut virtual_overseer, @@ -870,7 +870,7 @@ fn inactive_disconnected() { Delay::new(ACTIVITY_TIMEOUT * 3).await; - assert_collator_disconnect(&mut virtual_overseer, peer_b.clone()).await; + assert_collator_disconnect(&mut virtual_overseer, peer_b).await; virtual_overseer }); } @@ -905,7 +905,7 @@ fn activity_extends_life() { connect_and_declare_collator( &mut virtual_overseer, - peer_b.clone(), + peer_b, pair.clone(), test_state.chain_ids[0], ) @@ -913,28 +913,28 @@ fn activity_extends_life() { Delay::new(ACTIVITY_TIMEOUT * 2 / 3).await; - advertise_collation(&mut virtual_overseer, peer_b.clone(), hash_a).await; + advertise_collation(&mut virtual_overseer, peer_b, hash_a).await; assert_fetch_collation_request(&mut virtual_overseer, hash_a, test_state.chain_ids[0]) .await; Delay::new(ACTIVITY_TIMEOUT * 2 / 3).await; - advertise_collation(&mut virtual_overseer, peer_b.clone(), hash_b).await; + advertise_collation(&mut virtual_overseer, peer_b, hash_b).await; assert_fetch_collation_request(&mut virtual_overseer, hash_b, test_state.chain_ids[0]) .await; Delay::new(ACTIVITY_TIMEOUT * 2 / 3).await; - advertise_collation(&mut virtual_overseer, peer_b.clone(), hash_c).await; + advertise_collation(&mut virtual_overseer, peer_b, hash_c).await; assert_fetch_collation_request(&mut virtual_overseer, hash_c, test_state.chain_ids[0]) .await; Delay::new(ACTIVITY_TIMEOUT * 3 / 2).await; - assert_collator_disconnect(&mut virtual_overseer, peer_b.clone()).await; + assert_collator_disconnect(&mut virtual_overseer, peer_b).await; virtual_overseer }); @@ -962,7 +962,7 @@ fn disconnect_if_no_declare() { overseer_send( &mut virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerConnected( - peer_b.clone(), + peer_b, ObservedRole::Full, CollationVersion::V1.into(), None, @@ -970,7 +970,7 @@ fn disconnect_if_no_declare() { ) .await; - assert_collator_disconnect(&mut virtual_overseer, peer_b.clone()).await; + assert_collator_disconnect(&mut virtual_overseer, peer_b).await; virtual_overseer }) @@ -1000,7 +1000,7 @@ fn disconnect_if_wrong_declare() { overseer_send( &mut virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerConnected( - peer_b.clone(), + peer_b, ObservedRole::Full, CollationVersion::V1.into(), None, @@ -1011,7 +1011,7 @@ fn disconnect_if_wrong_declare() { overseer_send( &mut virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage( - peer_b.clone(), + peer_b, Versioned::V1(protocol_v1::CollatorProtocolMessage::Declare( pair.public(), ParaId::from(69), @@ -1031,7 +1031,7 @@ fn disconnect_if_wrong_declare() { } ); - assert_collator_disconnect(&mut virtual_overseer, peer_b.clone()).await; + assert_collator_disconnect(&mut virtual_overseer, peer_b).await; virtual_overseer }) @@ -1061,7 +1061,7 @@ fn delay_reputation_change() { overseer_send( &mut virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerConnected( - peer_b.clone(), + peer_b, ObservedRole::Full, CollationVersion::V1.into(), None, @@ -1072,7 +1072,7 @@ fn delay_reputation_change() { overseer_send( &mut virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage( - peer_b.clone(), + peer_b, Versioned::V1(protocol_v1::CollatorProtocolMessage::Declare( pair.public(), ParaId::from(69), @@ -1085,7 +1085,7 @@ fn delay_reputation_change() { overseer_send( &mut virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage( - peer_b.clone(), + peer_b, Versioned::V1(protocol_v1::CollatorProtocolMessage::Declare( pair.public(), ParaId::from(69), @@ -1145,7 +1145,7 @@ fn view_change_clears_old_collators() { connect_and_declare_collator( &mut virtual_overseer, - peer_b.clone(), + peer_b, pair.clone(), test_state.chain_ids[0], ) @@ -1164,7 +1164,7 @@ fn view_change_clears_old_collators() { test_state.group_rotation_info = test_state.group_rotation_info.bump_rotation(); respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; - assert_collator_disconnect(&mut virtual_overseer, peer_b.clone()).await; + assert_collator_disconnect(&mut virtual_overseer, peer_b).await; virtual_overseer }) diff --git a/node/network/dispute-distribution/src/tests/mock.rs b/node/network/dispute-distribution/src/tests/mock.rs index d248328da0a7..e6a49f14c094 100644 --- a/node/network/dispute-distribution/src/tests/mock.rs +++ b/node/network/dispute-distribution/src/tests/mock.rs @@ -70,7 +70,7 @@ pub static ref MOCK_VALIDATORS_DISCOVERY_KEYS: HashMap = expected.into_iter().map(|(_,v)| v.into_iter()).flatten().collect(); - assert_eq!(validator_addrs.into_iter().map(|v| v.into_iter()).flatten().collect::>(), expected); + let expected: HashSet = expected.into_values().flat_map(|v| v.into_iter()).collect(); + assert_eq!(validator_addrs.into_iter().flat_map(|v| v.into_iter()).collect::>(), expected); assert_eq!(peer_set, PeerSet::Validation); } ); @@ -707,8 +707,8 @@ fn issues_a_connection_request_when_last_request_was_mostly_unresolved() { }) => { let mut expected = get_address_map(AUTHORITIES_WITHOUT_US.clone()).await; expected.remove(&bob); - let expected: HashSet = expected.into_iter().map(|(_,v)| v.into_iter()).flatten().collect(); - assert_eq!(validator_addrs.into_iter().map(|v| v.into_iter()).flatten().collect::>(), expected); + let expected: HashSet = expected.into_values().flat_map(|v| v.into_iter()).collect(); + assert_eq!(validator_addrs.into_iter().flat_map(|v| v.into_iter()).collect::>(), expected); assert_eq!(peer_set, PeerSet::Validation); } ); diff --git a/node/network/statement-distribution/src/tests.rs b/node/network/statement-distribution/src/tests.rs index 62167f77a1e0..affed80fce30 100644 --- a/node/network/statement-distribution/src/tests.rs +++ b/node/network/statement-distribution/src/tests.rs @@ -210,8 +210,8 @@ fn note_local_works() { let hash_b = CandidateHash([2; 32].into()); let mut per_peer_tracker = VcPerPeerTracker::default(); - per_peer_tracker.note_local(hash_a.clone()); - per_peer_tracker.note_local(hash_b.clone()); + per_peer_tracker.note_local(hash_a); + per_peer_tracker.note_local(hash_b); assert!(per_peer_tracker.local_observed.contains(&hash_a)); assert!(per_peer_tracker.local_observed.contains(&hash_b)); @@ -227,9 +227,9 @@ fn note_remote_works() { let hash_c = CandidateHash([3; 32].into()); let mut per_peer_tracker = VcPerPeerTracker::default(); - assert!(per_peer_tracker.note_remote(hash_a.clone())); - assert!(per_peer_tracker.note_remote(hash_b.clone())); - assert!(!per_peer_tracker.note_remote(hash_c.clone())); + assert!(per_peer_tracker.note_remote(hash_a)); + assert!(per_peer_tracker.note_remote(hash_b)); + assert!(!per_peer_tracker.note_remote(hash_c)); assert!(per_peer_tracker.remote_observed.contains(&hash_a)); assert!(per_peer_tracker.remote_observed.contains(&hash_b)); @@ -516,9 +516,9 @@ fn peer_view_update_sends_messages() { executor::block_on(async move { let mut topology = GridNeighbors::empty(); - topology.peers_x = HashSet::from_iter(vec![peer.clone()].into_iter()); + topology.peers_x = HashSet::from_iter(vec![peer].into_iter()); update_peer_view_and_maybe_send_unlocked( - peer.clone(), + peer, &topology, &mut peer_data, &mut ctx, @@ -553,7 +553,7 @@ fn peer_view_update_sends_messages() { // it will not change between runs of the program. for statement in active_head.statements_about(candidate_hash) { let message = handle.recv().await; - let expected_to = vec![peer.clone()]; + let expected_to = vec![peer]; let expected_payload = statement_message(hash_c, statement.statement.clone(), &Metrics::default()); @@ -596,14 +596,14 @@ fn circulated_statement_goes_to_all_peers_with_view() { let peer_data_from_view = |view: View| PeerData { view: view.clone(), - view_knowledge: view.iter().map(|v| (v.clone(), Default::default())).collect(), + view_knowledge: view.iter().map(|v| (*v, Default::default())).collect(), maybe_authority: None, }; let mut peer_data: HashMap<_, _> = vec![ - (peer_a.clone(), peer_data_from_view(peer_a_view)), - (peer_b.clone(), peer_data_from_view(peer_b_view)), - (peer_c.clone(), peer_data_from_view(peer_c_view)), + (peer_a, peer_data_from_view(peer_a_view)), + (peer_b, peer_data_from_view(peer_b_view)), + (peer_c, peer_data_from_view(peer_c_view)), ] .into_iter() .collect(); @@ -644,8 +644,7 @@ fn circulated_statement_goes_to_all_peers_with_view() { let statement = StoredStatement { comparator: &comparator, statement: &statement }; let mut topology = GridNeighbors::empty(); - topology.peers_x = - HashSet::from_iter(vec![peer_a.clone(), peer_b.clone(), peer_c.clone()].into_iter()); + topology.peers_x = HashSet::from_iter(vec![peer_a, peer_b, peer_c].into_iter()); let needs_dependents = circulate_statement( RequiredRouting::GridXY, &topology, @@ -786,7 +785,7 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() { .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerConnected( - peer_a.clone(), + peer_a, ObservedRole::Full, ValidationVersion::V1.into(), None, @@ -799,7 +798,7 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() { .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerConnected( - peer_b.clone(), + peer_b, ObservedRole::Full, ValidationVersion::V1.into(), None, @@ -811,7 +810,7 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() { handle .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerViewChange(peer_a.clone(), view![hash_a]), + NetworkBridgeEvent::PeerViewChange(peer_a, view![hash_a]), ), }) .await; @@ -819,7 +818,7 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() { handle .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerViewChange(peer_b.clone(), view![hash_a]), + NetworkBridgeEvent::PeerViewChange(peer_b, view![hash_a]), ), }) .await; @@ -853,7 +852,7 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() { .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerMessage( - peer_a.clone(), + peer_a, Versioned::V1(protocol_v1::StatementDistributionMessage::Statement( hash_a, statement.clone().into(), @@ -887,7 +886,7 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() { )), ) ) => { - assert_eq!(recipients, vec![peer_b.clone()]); + assert_eq!(recipients, vec![peer_b]); assert_eq!(r, hash_a); assert_eq!(s, statement.into()); } @@ -990,7 +989,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerConnected( - peer_a.clone(), + peer_a, ObservedRole::Full, ValidationVersion::V1.into(), Some(HashSet::from([Sr25519Keyring::Alice.public().into()])), @@ -1003,7 +1002,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerConnected( - peer_b.clone(), + peer_b, ObservedRole::Full, ValidationVersion::V1.into(), Some(HashSet::from([Sr25519Keyring::Bob.public().into()])), @@ -1015,7 +1014,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerConnected( - peer_c.clone(), + peer_c, ObservedRole::Full, ValidationVersion::V1.into(), Some(HashSet::from([Sr25519Keyring::Charlie.public().into()])), @@ -1027,7 +1026,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerConnected( - peer_bad.clone(), + peer_bad, ObservedRole::Full, ValidationVersion::V1.into(), None, @@ -1039,7 +1038,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( handle .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerViewChange(peer_a.clone(), view![hash_a]), + NetworkBridgeEvent::PeerViewChange(peer_a, view![hash_a]), ), }) .await; @@ -1047,21 +1046,21 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( handle .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerViewChange(peer_b.clone(), view![hash_a]), + NetworkBridgeEvent::PeerViewChange(peer_b, view![hash_a]), ), }) .await; handle .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerViewChange(peer_c.clone(), view![hash_a]), + NetworkBridgeEvent::PeerViewChange(peer_c, view![hash_a]), ), }) .await; handle .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerViewChange(peer_bad.clone(), view![hash_a]), + NetworkBridgeEvent::PeerViewChange(peer_bad, view![hash_a]), ), }) .await; @@ -1098,7 +1097,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerMessage( - peer_a.clone(), + peer_a, Versioned::V1(protocol_v1::StatementDistributionMessage::LargeStatement( metadata.clone(), )), @@ -1136,7 +1135,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerMessage( - peer_c.clone(), + peer_c, Versioned::V1(protocol_v1::StatementDistributionMessage::LargeStatement( metadata.clone(), )), @@ -1150,7 +1149,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerMessage( - peer_bad.clone(), + peer_bad, Versioned::V1(protocol_v1::StatementDistributionMessage::LargeStatement( metadata.clone(), )), @@ -1486,7 +1485,7 @@ fn delay_reputation_changes() { .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerConnected( - peer_a.clone(), + peer_a, ObservedRole::Full, ValidationVersion::V1.into(), Some(HashSet::from([Sr25519Keyring::Alice.public().into()])), @@ -1499,7 +1498,7 @@ fn delay_reputation_changes() { .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerConnected( - peer_b.clone(), + peer_b, ObservedRole::Full, ValidationVersion::V1.into(), Some(HashSet::from([Sr25519Keyring::Bob.public().into()])), @@ -1511,7 +1510,7 @@ fn delay_reputation_changes() { .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerConnected( - peer_c.clone(), + peer_c, ObservedRole::Full, ValidationVersion::V1.into(), Some(HashSet::from([Sr25519Keyring::Charlie.public().into()])), @@ -1523,7 +1522,7 @@ fn delay_reputation_changes() { .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerConnected( - peer_bad.clone(), + peer_bad, ObservedRole::Full, ValidationVersion::V1.into(), None, @@ -1535,7 +1534,7 @@ fn delay_reputation_changes() { handle .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerViewChange(peer_a.clone(), view![hash_a]), + NetworkBridgeEvent::PeerViewChange(peer_a, view![hash_a]), ), }) .await; @@ -1543,21 +1542,21 @@ fn delay_reputation_changes() { handle .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerViewChange(peer_b.clone(), view![hash_a]), + NetworkBridgeEvent::PeerViewChange(peer_b, view![hash_a]), ), }) .await; handle .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerViewChange(peer_c.clone(), view![hash_a]), + NetworkBridgeEvent::PeerViewChange(peer_c, view![hash_a]), ), }) .await; handle .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerViewChange(peer_bad.clone(), view![hash_a]), + NetworkBridgeEvent::PeerViewChange(peer_bad, view![hash_a]), ), }) .await; @@ -1594,7 +1593,7 @@ fn delay_reputation_changes() { .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerMessage( - peer_a.clone(), + peer_a, Versioned::V1(protocol_v1::StatementDistributionMessage::LargeStatement( metadata.clone(), )), @@ -1632,7 +1631,7 @@ fn delay_reputation_changes() { .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerMessage( - peer_c.clone(), + peer_c, Versioned::V1(protocol_v1::StatementDistributionMessage::LargeStatement( metadata.clone(), )), @@ -1646,7 +1645,7 @@ fn delay_reputation_changes() { .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerMessage( - peer_bad.clone(), + peer_bad, Versioned::V1(protocol_v1::StatementDistributionMessage::LargeStatement( metadata.clone(), )), @@ -1962,7 +1961,7 @@ fn share_prioritizes_backing_group() { .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerConnected( - peer_a.clone(), + peer_a, ObservedRole::Full, ValidationVersion::V1.into(), Some(HashSet::from([Sr25519Keyring::Alice.public().into()])), @@ -1974,7 +1973,7 @@ fn share_prioritizes_backing_group() { .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerConnected( - peer_b.clone(), + peer_b, ObservedRole::Full, ValidationVersion::V1.into(), Some(HashSet::from([Sr25519Keyring::Bob.public().into()])), @@ -1986,7 +1985,7 @@ fn share_prioritizes_backing_group() { .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerConnected( - peer_c.clone(), + peer_c, ObservedRole::Full, ValidationVersion::V1.into(), Some(HashSet::from([Sr25519Keyring::Charlie.public().into()])), @@ -1998,7 +1997,7 @@ fn share_prioritizes_backing_group() { .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerConnected( - peer_bad.clone(), + peer_bad, ObservedRole::Full, ValidationVersion::V1.into(), None, @@ -2010,7 +2009,7 @@ fn share_prioritizes_backing_group() { .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerConnected( - peer_other_group.clone(), + peer_other_group, ObservedRole::Full, ValidationVersion::V1.into(), Some(HashSet::from([Sr25519Keyring::Dave.public().into()])), @@ -2022,7 +2021,7 @@ fn share_prioritizes_backing_group() { handle .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerViewChange(peer_a.clone(), view![hash_a]), + NetworkBridgeEvent::PeerViewChange(peer_a, view![hash_a]), ), }) .await; @@ -2030,28 +2029,28 @@ fn share_prioritizes_backing_group() { handle .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerViewChange(peer_b.clone(), view![hash_a]), + NetworkBridgeEvent::PeerViewChange(peer_b, view![hash_a]), ), }) .await; handle .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerViewChange(peer_c.clone(), view![hash_a]), + NetworkBridgeEvent::PeerViewChange(peer_c, view![hash_a]), ), }) .await; handle .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerViewChange(peer_bad.clone(), view![hash_a]), + NetworkBridgeEvent::PeerViewChange(peer_bad, view![hash_a]), ), }) .await; handle .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerViewChange(peer_other_group.clone(), view![hash_a]), + NetworkBridgeEvent::PeerViewChange(peer_other_group, view![hash_a]), ), }) .await; @@ -2232,7 +2231,7 @@ fn peer_cant_flood_with_large_statements() { .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerConnected( - peer_a.clone(), + peer_a, ObservedRole::Full, ValidationVersion::V1.into(), Some(HashSet::from([Sr25519Keyring::Alice.public().into()])), @@ -2244,7 +2243,7 @@ fn peer_cant_flood_with_large_statements() { handle .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerViewChange(peer_a.clone(), view![hash_a]), + NetworkBridgeEvent::PeerViewChange(peer_a, view![hash_a]), ), }) .await; @@ -2280,7 +2279,7 @@ fn peer_cant_flood_with_large_statements() { .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerMessage( - peer_a.clone(), + peer_a, Versioned::V1( protocol_v1::StatementDistributionMessage::LargeStatement( metadata.clone(), @@ -2355,8 +2354,8 @@ fn handle_multiple_seconded_statements() { for _ in 0..MIN_GOSSIP_PEERS + 2 { all_peers.push(PeerId::random()); } - all_peers.push(peer_a.clone()); - all_peers.push(peer_b.clone()); + all_peers.push(peer_a); + all_peers.push(peer_b); let mut lucky_peers = all_peers.clone(); util::choose_random_subset_with_rng( @@ -2438,7 +2437,7 @@ fn handle_multiple_seconded_statements() { .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerConnected( - peer.clone(), + *peer, ObservedRole::Full, ValidationVersion::V1.into(), None, @@ -2449,7 +2448,7 @@ fn handle_multiple_seconded_statements() { handle .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerViewChange(peer.clone(), view![relay_parent_hash]), + NetworkBridgeEvent::PeerViewChange(*peer, view![relay_parent_hash]), ), }) .await; @@ -2470,13 +2469,13 @@ fn handle_multiple_seconded_statements() { .map(|i| { if i == 0 { TopologyPeerInfo { - peer_ids: vec![peer_a.clone()], + peer_ids: vec![peer_a], validator_index: ValidatorIndex(0), discovery_id: AuthorityPair::generate().0.public(), } } else if i == 1 { TopologyPeerInfo { - peer_ids: vec![peer_b.clone()], + peer_ids: vec![peer_b], validator_index: ValidatorIndex(1), discovery_id: AuthorityPair::generate().0.public(), } @@ -2489,7 +2488,7 @@ fn handle_multiple_seconded_statements() { } else if (i - 2) % dim == 0 { let lucky_index = ((i - 2) / dim) - 1; TopologyPeerInfo { - peer_ids: vec![lucky_peers[lucky_index].clone()], + peer_ids: vec![lucky_peers[lucky_index]], validator_index: ValidatorIndex(i as _), discovery_id: AuthorityPair::generate().0.public(), } @@ -2566,7 +2565,7 @@ fn handle_multiple_seconded_statements() { .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerMessage( - peer_a.clone(), + peer_a, Versioned::V1(protocol_v1::StatementDistributionMessage::Statement( relay_parent_hash, statement.clone().into(), @@ -2618,7 +2617,7 @@ fn handle_multiple_seconded_statements() { .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerMessage( - peer_b.clone(), + peer_b, Versioned::V1(protocol_v1::StatementDistributionMessage::Statement( relay_parent_hash, statement.clone().into(), @@ -2667,7 +2666,7 @@ fn handle_multiple_seconded_statements() { .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerMessage( - peer_a.clone(), + peer_a, Versioned::V1(protocol_v1::StatementDistributionMessage::Statement( relay_parent_hash, statement.clone().into(), @@ -2718,7 +2717,7 @@ fn handle_multiple_seconded_statements() { .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerMessage( - peer_b.clone(), + peer_b, Versioned::V1(protocol_v1::StatementDistributionMessage::Statement( relay_parent_hash, statement.clone().into(), diff --git a/node/service/src/tests.rs b/node/service/src/tests.rs index 95d5765bad45..86119662d9bc 100644 --- a/node/service/src/tests.rs +++ b/node/service/src/tests.rs @@ -87,7 +87,7 @@ fn test_harness>( None, ); - let target_hash = case_vars.target_block.clone(); + let target_hash = case_vars.target_block; let selection_process = async move { let best = select_relay_chain .finality_target_with_longest_chain(target_hash, None) @@ -106,8 +106,7 @@ fn test_harness>( let _overseer = test_fut.await; }, selection_process, - )) - .1; + )); } async fn overseer_recv(overseer: &mut VirtualOverseer) -> AllMessages { @@ -241,7 +240,7 @@ impl ChainBuilder { builder } - pub fn add_block<'a>(&'a mut self, hash: Hash, parent_hash: Hash, number: u32) -> &'a mut Self { + pub fn add_block(&mut self, hash: Hash, parent_hash: Hash, number: u32) -> &mut Self { assert!(number != 0, "cannot add duplicate genesis block"); assert!(hash != Self::GENESIS_HASH, "cannot add block with genesis hash"); assert!( @@ -252,12 +251,7 @@ impl ChainBuilder { self.add_block_inner(hash, parent_hash, number) } - fn add_block_inner<'a>( - &'a mut self, - hash: Hash, - parent_hash: Hash, - number: u32, - ) -> &'a mut Self { + fn add_block_inner(&mut self, hash: Hash, parent_hash: Hash, number: u32) -> &mut Self { let header = ChainBuilder::make_header(parent_hash, number); assert!( self.0.blocks_by_hash.insert(hash, header).is_none(), @@ -360,7 +354,7 @@ async fn test_skeleton( )) => { assert_eq!(target_block_hash, target_hash, "TestIntegrity: target hashes always match. qed"); - tx.send(best_chain_containing_block.clone()).unwrap(); + tx.send(best_chain_containing_block).unwrap(); } ); diff --git a/node/test/service/src/lib.rs b/node/test/service/src/lib.rs index 4fc3f82eb4a9..1313b7b90469 100644 --- a/node/test/service/src/lib.rs +++ b/node/test/service/src/lib.rs @@ -210,7 +210,7 @@ pub fn run_validator_node( .expect("could not create Polkadot test service"); let overseer_handle = overseer_handle.expect("test node must have an overseer handle"); - let peer_id = network.local_peer_id().clone(); + let peer_id = network.local_peer_id(); let addr = MultiaddrWithPeerId { multiaddr, peer_id }; PolkadotTestNode { task_manager, client, overseer_handle, addr, rpc_handlers } @@ -242,7 +242,7 @@ pub fn run_collator_node( .expect("could not create Polkadot test service"); let overseer_handle = overseer_handle.expect("test node must have an overseer handle"); - let peer_id = network.local_peer_id().clone(); + let peer_id = network.local_peer_id(); let addr = MultiaddrWithPeerId { multiaddr, peer_id }; PolkadotTestNode { task_manager, client, overseer_handle, addr, rpc_handlers } @@ -273,7 +273,7 @@ impl PolkadotTestNode { ) -> Result<(), RpcTransactionError> { let sudo = SudoCall::sudo { call: Box::new(call.into()) }; - let extrinsic = construct_extrinsic(&*self.client, sudo, caller, nonce); + let extrinsic = construct_extrinsic(&self.client, sudo, caller, nonce); self.rpc_handlers.send_transaction(extrinsic.into()).await.map(drop) } @@ -283,7 +283,7 @@ impl PolkadotTestNode { function: impl Into, caller: Sr25519Keyring, ) -> Result { - let extrinsic = construct_extrinsic(&*self.client, function, caller, 0); + let extrinsic = construct_extrinsic(&self.client, function, caller, 0); self.rpc_handlers.send_transaction(extrinsic.into()).await } diff --git a/runtime/common/src/auctions.rs b/runtime/common/src/auctions.rs index 901c9c27da28..9c2bb04b9c8e 100644 --- a/runtime/common/src/auctions.rs +++ b/runtime/common/src/auctions.rs @@ -793,7 +793,7 @@ mod tests { if leases.contains_key(&(para, period)) { return Err(LeaseError::AlreadyLeased) } - leases.insert((para, period), LeaseData { leaser: leaser.clone(), amount }); + leases.insert((para, period), LeaseData { leaser: *leaser, amount }); } Ok(()) }) @@ -1391,7 +1391,7 @@ mod tests { (1, 2.into(), 53, SlotRange::TwoTwo), (5, 3.into(), 1, SlotRange::ThreeThree), ]; - assert_eq!(Auctions::calculate_winners(winning.clone()), winners); + assert_eq!(Auctions::calculate_winners(winning), winners); winning[SlotRange::ZeroOne as u8 as usize] = Some((4, 10.into(), 3)); let winners = vec![ @@ -1399,11 +1399,11 @@ mod tests { (1, 2.into(), 53, SlotRange::TwoTwo), (5, 3.into(), 1, SlotRange::ThreeThree), ]; - assert_eq!(Auctions::calculate_winners(winning.clone()), winners); + assert_eq!(Auctions::calculate_winners(winning), winners); winning[SlotRange::ZeroThree as u8 as usize] = Some((1, 100.into(), 100)); let winners = vec![(1, 100.into(), 100, SlotRange::ZeroThree)]; - assert_eq!(Auctions::calculate_winners(winning.clone()), winners); + assert_eq!(Auctions::calculate_winners(winning), winners); } #[test] diff --git a/runtime/common/src/crowdloan/mod.rs b/runtime/common/src/crowdloan/mod.rs index 1db046c52701..0303808e0747 100644 --- a/runtime/common/src/crowdloan/mod.rs +++ b/runtime/common/src/crowdloan/mod.rs @@ -970,16 +970,16 @@ mod tests { ENDING_PERIOD.with(|p| *p.borrow_mut() = ending_period); } fn auction() -> Option<(u64, u64)> { - AUCTION.with(|p| p.borrow().clone()) + AUCTION.with(|p| *p.borrow()) } fn ending_period() -> u64 { - ENDING_PERIOD.with(|p| p.borrow().clone()) + ENDING_PERIOD.with(|p| *p.borrow()) } fn bids() -> Vec { BIDS_PLACED.with(|p| p.borrow().clone()) } fn vrf_delay() -> u64 { - VRF_DELAY.with(|p| p.borrow().clone()) + VRF_DELAY.with(|p| *p.borrow()) } fn set_vrf_delay(delay: u64) { VRF_DELAY.with(|p| *p.borrow_mut() = delay); diff --git a/runtime/common/src/xcm_sender.rs b/runtime/common/src/xcm_sender.rs index 3573ec3dc42b..ff529143c509 100644 --- a/runtime/common/src/xcm_sender.rs +++ b/runtime/common/src/xcm_sender.rs @@ -140,26 +140,24 @@ mod tests { // F * (B + msg_length * M) // message_length = 1 - let result: u128 = TestFeeTracker::get_fee_factor(id.clone()).saturating_mul_int(b + m); + let result: u128 = TestFeeTracker::get_fee_factor(id).saturating_mul_int(b + m); assert_eq!( - TestExponentialPrice::price_for_parachain_delivery(id.clone(), &Xcm(vec![])), + TestExponentialPrice::price_for_parachain_delivery(id, &Xcm(vec![])), (FeeAssetId::get(), result).into() ); // message size = 2 - let result: u128 = - TestFeeTracker::get_fee_factor(id.clone()).saturating_mul_int(b + (2 * m)); + let result: u128 = TestFeeTracker::get_fee_factor(id).saturating_mul_int(b + (2 * m)); assert_eq!( - TestExponentialPrice::price_for_parachain_delivery(id.clone(), &Xcm(vec![ClearOrigin])), + TestExponentialPrice::price_for_parachain_delivery(id, &Xcm(vec![ClearOrigin])), (FeeAssetId::get(), result).into() ); // message size = 4 - let result: u128 = - TestFeeTracker::get_fee_factor(id.clone()).saturating_mul_int(b + (4 * m)); + let result: u128 = TestFeeTracker::get_fee_factor(id).saturating_mul_int(b + (4 * m)); assert_eq!( TestExponentialPrice::price_for_parachain_delivery( - id.clone(), + id, &Xcm(vec![SetAppendix(Xcm(vec![ClearOrigin]))]) ), (FeeAssetId::get(), result).into() diff --git a/runtime/parachains/src/builder.rs b/runtime/parachains/src/builder.rs index 892e934e6dfc..3f95b2087e6c 100644 --- a/runtime/parachains/src/builder.rs +++ b/runtime/parachains/src/builder.rs @@ -377,8 +377,8 @@ impl BenchBuilder { fn signing_context(&self) -> SigningContext { SigningContext { - parent_hash: Self::header(self.block_number.clone()).hash(), - session_index: self.session.clone(), + parent_hash: Self::header(self.block_number).hash(), + session_index: self.session, } } @@ -408,7 +408,7 @@ impl BenchBuilder { } let block_number = BlockNumberFor::::from(block); - let header = Self::header(block_number.clone()); + let header = Self::header(block_number); frame_system::Pallet::::reset_events(); frame_system::Pallet::::initialize( @@ -464,13 +464,13 @@ impl BenchBuilder { for (seed, _) in concluding_cores.iter() { // make sure the candidates that will be concluding are marked as pending availability. - let (para_id, core_idx, group_idx) = self.create_indexes(seed.clone()); + let (para_id, core_idx, group_idx) = self.create_indexes(*seed); Self::add_availability( para_id, core_idx, group_idx, Self::validator_availability_votes_yes(validators.len()), - CandidateHash(H256::from(byte32_slice_from(seed.clone()))), + CandidateHash(H256::from(byte32_slice_from(*seed))), ); } @@ -496,11 +496,11 @@ impl BenchBuilder { .iter() .map(|(seed, num_votes)| { assert!(*num_votes <= validators.len() as u32); - let (para_id, _core_idx, group_idx) = self.create_indexes(seed.clone()); + let (para_id, _core_idx, group_idx) = self.create_indexes(*seed); // This generates a pair and adds it to the keystore, returning just the public. let collator_public = CollatorId::generate_pair(None); - let header = Self::header(self.block_number.clone()); + let header = Self::header(self.block_number); let relay_parent = header.hash(); let head_data = Self::mock_head_data(); let persisted_validation_data_hash = PersistedValidationData:: { @@ -563,7 +563,7 @@ impl BenchBuilder { let public = validators.get(*val_idx).unwrap(); let sig = UncheckedSigned::::benchmark_sign( public, - CompactStatement::Valid(candidate_hash.clone()), + CompactStatement::Valid(candidate_hash), &self.signing_context(), *val_idx, ) @@ -632,14 +632,14 @@ impl BenchBuilder { } else { DisputeStatement::Valid(ValidDisputeStatementKind::Explicit) }; - let data = dispute_statement.payload_data(candidate_hash.clone(), session); + let data = dispute_statement.payload_data(candidate_hash, session); let statement_sig = validator_public.sign(&data).unwrap(); (dispute_statement, ValidatorIndex(validator_index), statement_sig) }) .collect(); - DisputeStatementSet { candidate_hash: candidate_hash.clone(), session, statements } + DisputeStatementSet { candidate_hash: candidate_hash, session, statements } }) .collect() } @@ -702,7 +702,7 @@ impl BenchBuilder { bitfields, backed_candidates, disputes, - parent_header: Self::header(builder.block_number.clone()), + parent_header: Self::header(builder.block_number), }, _session: target_session, _block_number: builder.block_number, diff --git a/runtime/parachains/src/disputes/tests.rs b/runtime/parachains/src/disputes/tests.rs index acdba343274c..0757084084f6 100644 --- a/runtime/parachains/src/disputes/tests.rs +++ b/runtime/parachains/src/disputes/tests.rs @@ -377,7 +377,6 @@ fn test_initializer_on_new_session() { let mock_genesis_config = MockGenesisConfig { configuration: crate::configuration::GenesisConfig { config: HostConfiguration { dispute_period, ..Default::default() }, - ..Default::default() }, ..Default::default() }; @@ -386,13 +385,13 @@ fn test_initializer_on_new_session() { let v0 = ::Pair::generate().0; let candidate_hash = CandidateHash(sp_core::H256::repeat_byte(1)); - Pallet::::note_included(0, candidate_hash.clone(), 0); - Pallet::::note_included(1, candidate_hash.clone(), 1); - Pallet::::note_included(2, candidate_hash.clone(), 2); - Pallet::::note_included(3, candidate_hash.clone(), 3); - Pallet::::note_included(4, candidate_hash.clone(), 4); - Pallet::::note_included(5, candidate_hash.clone(), 5); - Pallet::::note_included(6, candidate_hash.clone(), 5); + Pallet::::note_included(0, candidate_hash, 0); + Pallet::::note_included(1, candidate_hash, 1); + Pallet::::note_included(2, candidate_hash, 2); + Pallet::::note_included(3, candidate_hash, 3); + Pallet::::note_included(4, candidate_hash, 4); + Pallet::::note_included(5, candidate_hash, 5); + Pallet::::note_included(6, candidate_hash, 5); run_to_block(7, |b| { // a new session at each block @@ -465,7 +464,7 @@ fn test_provide_multi_dispute_is_providing() { let inclusion_parent = sp_core::H256::repeat_byte(0xff); let session = 1; let stmts = vec![DisputeStatementSet { - candidate_hash: candidate_hash.clone(), + candidate_hash, session, statements: vec![ ( @@ -481,12 +480,8 @@ fn test_provide_multi_dispute_is_providing() { DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(1), v1.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session } + .signing_payload(), ), ), ], @@ -499,7 +494,7 @@ fn test_provide_multi_dispute_is_providing() { .map(CheckedDisputeStatementSet::unchecked_from_unchecked) .collect() ), - vec![(1, candidate_hash.clone())], + vec![(1, candidate_hash)], ); }) } @@ -528,31 +523,23 @@ fn test_disputes_with_missing_backing_votes_are_rejected() { let session = 1; let stmts = vec![DisputeStatementSet { - candidate_hash: candidate_hash.clone(), + candidate_hash, session, statements: vec![ ( DisputeStatement::Valid(ValidDisputeStatementKind::Explicit), ValidatorIndex(0), v0.sign( - &ExplicitDisputeStatement { - valid: true, - candidate_hash: candidate_hash.clone(), - session, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: true, candidate_hash, session } + .signing_payload(), ), ), ( DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(1), v1.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session } + .signing_payload(), ), ), ], @@ -590,31 +577,23 @@ fn test_freeze_on_note_included() { // v0 votes for 3 let stmts = vec![DisputeStatementSet { - candidate_hash: candidate_hash.clone(), + candidate_hash, session: 3, statements: vec![ ( DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(0), v0.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session: 3, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session: 3 } + .signing_payload(), ), ), ( DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(1), v1.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session: 3, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session: 3 } + .signing_payload(), ), ), ( @@ -636,7 +615,7 @@ fn test_freeze_on_note_included() { ) .is_ok()); - Pallet::::note_included(3, candidate_hash.clone(), 3); + Pallet::::note_included(3, candidate_hash, 3); assert_eq!(Frozen::::get(), Some(2)); }); } @@ -663,31 +642,23 @@ fn test_freeze_provided_against_supermajority_for_included() { // v0 votes for 3 let stmts = vec![DisputeStatementSet { - candidate_hash: candidate_hash.clone(), + candidate_hash, session, statements: vec![ ( DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(0), v0.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session } + .signing_payload(), ), ), ( DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(1), v1.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session } + .signing_payload(), ), ), ( @@ -702,7 +673,7 @@ fn test_freeze_provided_against_supermajority_for_included() { ], }]; - Pallet::::note_included(3, candidate_hash.clone(), 3); + Pallet::::note_included(3, candidate_hash, 3); assert!(Pallet::::process_checked_multi_dispute_data( &stmts .into_iter() @@ -744,43 +715,31 @@ fn test_freeze_provided_against_byzantine_threshold_for_included() { // A byzantine threshold of INVALID let stmts = vec![DisputeStatementSet { - candidate_hash: candidate_hash.clone(), + candidate_hash, session, statements: vec![ ( DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(0), v0.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session } + .signing_payload(), ), ), ( DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(1), v1.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session } + .signing_payload(), ), ), ( DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(2), v2.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session } + .signing_payload(), ), ), ( @@ -796,7 +755,7 @@ fn test_freeze_provided_against_byzantine_threshold_for_included() { }]; // Include the candidate and import the votes - Pallet::::note_included(3, candidate_hash.clone(), 3); + Pallet::::note_included(3, candidate_hash, 3); assert!(Pallet::::process_checked_multi_dispute_data( &stmts .into_iter() @@ -813,43 +772,31 @@ fn test_freeze_provided_against_byzantine_threshold_for_included() { // And generate enough votes to reach supermajority of invalid votes let stmts = vec![DisputeStatementSet { - candidate_hash: candidate_hash.clone(), + candidate_hash, session, statements: vec![ ( DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(3), v3.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session } + .signing_payload(), ), ), ( DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(4), v4.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session } + .signing_payload(), ), ), ( DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(5), v5.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session } + .signing_payload(), ), ), ], @@ -922,31 +869,23 @@ mod unconfirmed_disputes { // v0 votes for 4, v1 votes against 4. DisputeStatementSet { - candidate_hash: candidate_hash.clone(), + candidate_hash, session: 4, statements: vec![ ( DisputeStatement::Valid(ValidDisputeStatementKind::Explicit), ValidatorIndex(0), v0.sign( - &ExplicitDisputeStatement { - valid: true, - candidate_hash: candidate_hash.clone(), - session: 4, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: true, candidate_hash, session: 4 } + .signing_payload(), ), ), ( DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(3), v1.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session: 4, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session: 4 } + .signing_payload(), ), ), ], @@ -1037,7 +976,7 @@ fn test_provide_multi_dispute_success_and_other() { // v0 and v1 vote for 3, v6 votes against let stmts = vec![DisputeStatementSet { - candidate_hash: candidate_hash.clone(), + candidate_hash, session, statements: vec![ ( @@ -1053,24 +992,16 @@ fn test_provide_multi_dispute_success_and_other() { DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(2), v6.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session } + .signing_payload(), ), ), ( DisputeStatement::Valid(ValidDisputeStatementKind::Explicit), ValidatorIndex(3), v1.sign( - &ExplicitDisputeStatement { - valid: true, - candidate_hash: candidate_hash.clone(), - session: 3, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: true, candidate_hash, session: 3 } + .signing_payload(), ), ), ], @@ -1080,29 +1011,25 @@ fn test_provide_multi_dispute_success_and_other() { assert_ok!( Pallet::::process_checked_multi_dispute_data(&stmts), - vec![(3, candidate_hash.clone())], + vec![(3, candidate_hash)], ); // v3 votes against 3 and for 5, v2 and v6 vote against 5. let stmts = vec![ DisputeStatementSet { - candidate_hash: candidate_hash.clone(), + candidate_hash, session: 3, statements: vec![( DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(5), v3.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session: 3, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session: 3 } + .signing_payload(), ), )], }, DisputeStatementSet { - candidate_hash: candidate_hash.clone(), + candidate_hash, session: 5, statements: vec![ ( @@ -1118,24 +1045,16 @@ fn test_provide_multi_dispute_success_and_other() { DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(6), v2.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session: 5, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session: 5 } + .signing_payload(), ), ), ( DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(2), v6.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session: 5, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session: 5 } + .signing_payload(), ), ), ], @@ -1145,23 +1064,19 @@ fn test_provide_multi_dispute_success_and_other() { let stmts = filter_dispute_set(stmts); assert_ok!( Pallet::::process_checked_multi_dispute_data(&stmts), - vec![(5, candidate_hash.clone())], + vec![(5, candidate_hash)], ); // v2 votes for 3 let stmts = vec![DisputeStatementSet { - candidate_hash: candidate_hash.clone(), + candidate_hash, session: 3, statements: vec![( DisputeStatement::Valid(ValidDisputeStatementKind::Explicit), ValidatorIndex(6), v2.sign( - &ExplicitDisputeStatement { - valid: true, - candidate_hash: candidate_hash.clone(), - session: 3, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: true, candidate_hash, session: 3 } + .signing_payload(), ), )], }]; @@ -1171,74 +1086,54 @@ fn test_provide_multi_dispute_success_and_other() { let stmts = vec![ // 0, 4, and 5 vote against 5 DisputeStatementSet { - candidate_hash: candidate_hash.clone(), + candidate_hash, session: 5, statements: vec![ ( DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(0), v0.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session: 5, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session: 5 } + .signing_payload(), ), ), ( DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(1), v4.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session: 5, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session: 5 } + .signing_payload(), ), ), ( DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(4), v5.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session: 5, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session: 5 } + .signing_payload(), ), ), ], }, // 4 and 5 vote for 3 DisputeStatementSet { - candidate_hash: candidate_hash.clone(), + candidate_hash, session: 3, statements: vec![ ( DisputeStatement::Valid(ValidDisputeStatementKind::Explicit), ValidatorIndex(1), v4.sign( - &ExplicitDisputeStatement { - valid: true, - candidate_hash: candidate_hash.clone(), - session: 3, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: true, candidate_hash, session: 3 } + .signing_payload(), ), ), ( DisputeStatement::Valid(ValidDisputeStatementKind::Explicit), ValidatorIndex(4), v5.sign( - &ExplicitDisputeStatement { - valid: true, - candidate_hash: candidate_hash.clone(), - session: 3, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: true, candidate_hash, session: 3 } + .signing_payload(), ), ), ], @@ -1252,7 +1147,7 @@ fn test_provide_multi_dispute_success_and_other() { vec![ ( 5, - candidate_hash.clone(), + candidate_hash, DisputeState { validators_for: bitvec![u8, BitOrderLsb0; 0, 0, 0, 0, 0, 1, 0], validators_against: bitvec![u8, BitOrderLsb0; 1, 1, 1, 0, 1, 0, 1], @@ -1262,7 +1157,7 @@ fn test_provide_multi_dispute_success_and_other() { ), ( 3, - candidate_hash.clone(), + candidate_hash, DisputeState { validators_for: bitvec![u8, BitOrderLsb0; 1, 1, 0, 1, 1, 0, 1], validators_against: bitvec![u8, BitOrderLsb0; 0, 0, 1, 0, 0, 1, 0], @@ -1273,9 +1168,9 @@ fn test_provide_multi_dispute_success_and_other() { ] ); - assert!(!Pallet::::concluded_invalid(3, candidate_hash.clone())); - assert!(!Pallet::::concluded_invalid(4, candidate_hash.clone())); - assert!(Pallet::::concluded_invalid(5, candidate_hash.clone())); + assert!(!Pallet::::concluded_invalid(3, candidate_hash)); + assert!(!Pallet::::concluded_invalid(4, candidate_hash)); + assert!(Pallet::::concluded_invalid(5, candidate_hash)); // Ensure the `reward_validator` function was correctly called assert_eq!( @@ -1372,7 +1267,7 @@ fn test_punish_post_conclusion() { let session = 3; let stmts = vec![DisputeStatementSet { - candidate_hash: candidate_hash.clone(), + candidate_hash, session, statements: vec![ ( @@ -1388,60 +1283,40 @@ fn test_punish_post_conclusion() { DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(1), v4.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session } + .signing_payload(), ), ), ( DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(2), v6.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session } + .signing_payload(), ), ), ( DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(6), v2.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session } + .signing_payload(), ), ), ( DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(4), v5.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session } + .signing_payload(), ), ), ( DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(5), v3.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session } + .signing_payload(), ), ), ( @@ -1469,7 +1344,7 @@ fn test_punish_post_conclusion() { // someone reveals 3 backing vote, 6 votes against let stmts = vec![DisputeStatementSet { - candidate_hash: candidate_hash.clone(), + candidate_hash, session, statements: vec![ ( @@ -1485,12 +1360,8 @@ fn test_punish_post_conclusion() { DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(6), v2.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session } + .signing_payload(), ), ), ], @@ -1600,37 +1471,28 @@ fn test_check_signature() { let wrong_inclusion_parent = sp_core::H256::repeat_byte(4); let statement_1 = DisputeStatement::Valid(ValidDisputeStatementKind::Explicit); - let statement_2 = DisputeStatement::Valid(ValidDisputeStatementKind::BackingSeconded( - inclusion_parent.clone(), - )); - let wrong_statement_2 = DisputeStatement::Valid(ValidDisputeStatementKind::BackingSeconded( - wrong_inclusion_parent.clone(), - )); + let statement_2 = + DisputeStatement::Valid(ValidDisputeStatementKind::BackingSeconded(inclusion_parent)); + let wrong_statement_2 = + DisputeStatement::Valid(ValidDisputeStatementKind::BackingSeconded(wrong_inclusion_parent)); let statement_3 = - DisputeStatement::Valid(ValidDisputeStatementKind::BackingValid(inclusion_parent.clone())); - let wrong_statement_3 = DisputeStatement::Valid(ValidDisputeStatementKind::BackingValid( - wrong_inclusion_parent.clone(), - )); + DisputeStatement::Valid(ValidDisputeStatementKind::BackingValid(inclusion_parent)); + let wrong_statement_3 = + DisputeStatement::Valid(ValidDisputeStatementKind::BackingValid(wrong_inclusion_parent)); let statement_4 = DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking); let statement_5 = DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit); - let signed_1 = validator_id.sign( - &ExplicitDisputeStatement { valid: true, candidate_hash: candidate_hash.clone(), session } - .signing_payload(), - ); - let signed_2 = - validator_id.sign(&CompactStatement::Seconded(candidate_hash.clone()).signing_payload( - &SigningContext { session_index: session, parent_hash: inclusion_parent.clone() }, - )); - let signed_3 = - validator_id.sign(&CompactStatement::Valid(candidate_hash.clone()).signing_payload( - &SigningContext { session_index: session, parent_hash: inclusion_parent.clone() }, - )); - let signed_4 = - validator_id.sign(&ApprovalVote(candidate_hash.clone()).signing_payload(session)); + let signed_1 = validator_id + .sign(&ExplicitDisputeStatement { valid: true, candidate_hash, session }.signing_payload()); + let signed_2 = validator_id.sign(&CompactStatement::Seconded(candidate_hash).signing_payload( + &SigningContext { session_index: session, parent_hash: inclusion_parent }, + )); + let signed_3 = validator_id.sign(&CompactStatement::Valid(candidate_hash).signing_payload( + &SigningContext { session_index: session, parent_hash: inclusion_parent }, + )); + let signed_4 = validator_id.sign(&ApprovalVote(candidate_hash).signing_payload(session)); let signed_5 = validator_id.sign( - &ExplicitDisputeStatement { valid: false, candidate_hash: candidate_hash.clone(), session } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session }.signing_payload(), ); assert!(check_signature( @@ -2007,9 +1869,8 @@ fn deduplication_and_sorting_works() { c_hash: &CandidateHash, valid, session| { - let payload = - ExplicitDisputeStatement { valid, candidate_hash: c_hash.clone(), session } - .signing_payload(); + let payload = ExplicitDisputeStatement { valid, candidate_hash: *c_hash, session } + .signing_payload(); let sig = validator.sign(&payload); (DisputeStatement::Valid(ValidDisputeStatementKind::Explicit), vidx, sig.clone()) }; @@ -2031,24 +1892,24 @@ fn deduplication_and_sorting_works() { let mut disputes = vec![ DisputeStatementSet { - candidate_hash: candidate_hash_b.clone(), + candidate_hash: candidate_hash_b, session: 2, statements: vec![explicit_triple_b.clone(), explicit_triple_b_bad.clone()], }, // same session as above DisputeStatementSet { - candidate_hash: candidate_hash_c.clone(), + candidate_hash: candidate_hash_c, session: 2, statements: vec![explicit_triple_c, explicit_triple_c_bad], }, // the duplicate set DisputeStatementSet { - candidate_hash: candidate_hash_b.clone(), + candidate_hash: candidate_hash_b, session: 2, statements: vec![explicit_triple_b.clone(), explicit_triple_b_bad.clone()], }, DisputeStatementSet { - candidate_hash: candidate_hash_a.clone(), + candidate_hash: candidate_hash_a, session: 1, statements: vec![explicit_triple_a, explicit_triple_a_bad], }, @@ -2116,19 +1977,11 @@ fn filter_removes_duplicates_within_set() { let candidate_hash = CandidateHash(sp_core::H256::repeat_byte(1)); - let payload = ExplicitDisputeStatement { - valid: true, - candidate_hash: candidate_hash.clone(), - session: 1, - } - .signing_payload(); + let payload = + ExplicitDisputeStatement { valid: true, candidate_hash, session: 1 }.signing_payload(); - let payload_against = ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session: 1, - } - .signing_payload(); + let payload_against = + ExplicitDisputeStatement { valid: false, candidate_hash, session: 1 }.signing_payload(); let sig_a = v0.sign(&payload); let sig_b = v0.sign(&payload); @@ -2136,7 +1989,7 @@ fn filter_removes_duplicates_within_set() { let sig_d = v1.sign(&payload_against); let statements = DisputeStatementSet { - candidate_hash: candidate_hash.clone(), + candidate_hash, session: 1, statements: vec![ ( @@ -2172,7 +2025,7 @@ fn filter_removes_duplicates_within_set() { assert_eq!( statements, Some(CheckedDisputeStatementSet::unchecked_from_unchecked(DisputeStatementSet { - candidate_hash: candidate_hash.clone(), + candidate_hash, session: 1, statements: vec![ ( @@ -2217,7 +2070,7 @@ fn filter_bad_signatures_correctly_detects_single_sided() { let candidate_hash_a = CandidateHash(sp_core::H256::repeat_byte(1)); let payload = |c_hash: &CandidateHash, valid| { - ExplicitDisputeStatement { valid, candidate_hash: c_hash.clone(), session: 1 } + ExplicitDisputeStatement { valid, candidate_hash: *c_hash, session: 1 } .signing_payload() }; @@ -2228,7 +2081,7 @@ fn filter_bad_signatures_correctly_detects_single_sided() { let sig_1 = v1.sign(&payload_a_bad); let statements = vec![DisputeStatementSet { - candidate_hash: candidate_hash_a.clone(), + candidate_hash: candidate_hash_a, session: 1, statements: vec![ ( @@ -2262,17 +2115,13 @@ fn filter_removes_session_out_of_bounds() { let candidate_hash = CandidateHash(sp_core::H256::repeat_byte(1)); - let payload = ExplicitDisputeStatement { - valid: true, - candidate_hash: candidate_hash.clone(), - session: 1, - } - .signing_payload(); + let payload = + ExplicitDisputeStatement { valid: true, candidate_hash, session: 1 }.signing_payload(); let sig_a = v0.sign(&payload); let statements = vec![DisputeStatementSet { - candidate_hash: candidate_hash.clone(), + candidate_hash, session: 100, statements: vec![( DisputeStatement::Valid(ValidDisputeStatementKind::Explicit), @@ -2297,7 +2146,6 @@ fn filter_removes_concluded_ancient() { dispute_post_conclusion_acceptance_period, ..Default::default() }, - ..Default::default() }, ..Default::default() }; @@ -2335,26 +2183,20 @@ fn filter_removes_concluded_ancient() { }, ); - let payload_a = ExplicitDisputeStatement { - valid: true, - candidate_hash: candidate_hash_a.clone(), - session: 1, - } - .signing_payload(); + let payload_a = + ExplicitDisputeStatement { valid: true, candidate_hash: candidate_hash_a, session: 1 } + .signing_payload(); - let payload_b = ExplicitDisputeStatement { - valid: true, - candidate_hash: candidate_hash_b.clone(), - session: 1, - } - .signing_payload(); + let payload_b = + ExplicitDisputeStatement { valid: true, candidate_hash: candidate_hash_b, session: 1 } + .signing_payload(); let sig_a = v0.sign(&payload_a); let sig_b = v0.sign(&payload_b); let statements = vec![ DisputeStatementSet { - candidate_hash: candidate_hash_a.clone(), + candidate_hash: candidate_hash_a, session: 1, statements: vec![( DisputeStatement::Valid(ValidDisputeStatementKind::Explicit), @@ -2363,7 +2205,7 @@ fn filter_removes_concluded_ancient() { )], }, DisputeStatementSet { - candidate_hash: candidate_hash_b.clone(), + candidate_hash: candidate_hash_b, session: 1, statements: vec![( DisputeStatement::Valid(ValidDisputeStatementKind::Explicit), @@ -2378,7 +2220,7 @@ fn filter_removes_concluded_ancient() { assert_eq!( statements, vec![CheckedDisputeStatementSet::unchecked_from_unchecked(DisputeStatementSet { - candidate_hash: candidate_hash_b.clone(), + candidate_hash: candidate_hash_b, session: 1, statements: vec![( DisputeStatement::Valid(ValidDisputeStatementKind::Explicit), @@ -2408,19 +2250,13 @@ fn filter_removes_duplicate_statements_sets() { let candidate_hash_a = CandidateHash(sp_core::H256::repeat_byte(1)); - let payload = ExplicitDisputeStatement { - valid: true, - candidate_hash: candidate_hash_a.clone(), - session: 1, - } - .signing_payload(); + let payload = + ExplicitDisputeStatement { valid: true, candidate_hash: candidate_hash_a, session: 1 } + .signing_payload(); - let payload_against = ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash_a.clone(), - session: 1, - } - .signing_payload(); + let payload_against = + ExplicitDisputeStatement { valid: false, candidate_hash: candidate_hash_a, session: 1 } + .signing_payload(); let sig_a = v0.sign(&payload); let sig_a_against = v1.sign(&payload_against); @@ -2440,12 +2276,12 @@ fn filter_removes_duplicate_statements_sets() { let mut sets = vec![ DisputeStatementSet { - candidate_hash: candidate_hash_a.clone(), + candidate_hash: candidate_hash_a, session: 1, statements: statements.clone(), }, DisputeStatementSet { - candidate_hash: candidate_hash_a.clone(), + candidate_hash: candidate_hash_a, session: 1, statements: statements.clone(), }, @@ -2459,11 +2295,7 @@ fn filter_removes_duplicate_statements_sets() { assert_eq!( sets, - vec![DisputeStatementSet { - candidate_hash: candidate_hash_a.clone(), - session: 1, - statements, - }] + vec![DisputeStatementSet { candidate_hash: candidate_hash_a, session: 1, statements }] ); }) } @@ -2480,17 +2312,14 @@ fn filter_ignores_single_sided() { let candidate_hash_a = CandidateHash(sp_core::H256::repeat_byte(1)); - let payload = ExplicitDisputeStatement { - valid: true, - candidate_hash: candidate_hash_a.clone(), - session: 1, - } - .signing_payload(); + let payload = + ExplicitDisputeStatement { valid: true, candidate_hash: candidate_hash_a, session: 1 } + .signing_payload(); let sig_a = v0.sign(&payload); let statements = vec![DisputeStatementSet { - candidate_hash: candidate_hash_a.clone(), + candidate_hash: candidate_hash_a, session: 1, statements: vec![( DisputeStatement::Valid(ValidDisputeStatementKind::Explicit), @@ -2517,17 +2346,14 @@ fn import_ignores_single_sided() { let candidate_hash_a = CandidateHash(sp_core::H256::repeat_byte(1)); - let payload = ExplicitDisputeStatement { - valid: true, - candidate_hash: candidate_hash_a.clone(), - session: 1, - } - .signing_payload(); + let payload = + ExplicitDisputeStatement { valid: true, candidate_hash: candidate_hash_a, session: 1 } + .signing_payload(); let sig_a = v0.sign(&payload); let statements = vec![DisputeStatementSet { - candidate_hash: candidate_hash_a.clone(), + candidate_hash: candidate_hash_a, session: 1, statements: vec![( DisputeStatement::Valid(ValidDisputeStatementKind::Explicit), diff --git a/runtime/parachains/src/inclusion/tests.rs b/runtime/parachains/src/inclusion/tests.rs index 59f5e9414d0a..3b4d7a7df357 100644 --- a/runtime/parachains/src/inclusion/tests.rs +++ b/runtime/parachains/src/inclusion/tests.rs @@ -68,10 +68,7 @@ pub(crate) fn genesis_config(paras: Vec<(ParaId, ParaKind)>) -> MockGenesisConfi .collect(), ..Default::default() }, - configuration: configuration::GenesisConfig { - config: default_config(), - ..Default::default() - }, + configuration: configuration::GenesisConfig { config: default_config() }, ..Default::default() } } diff --git a/runtime/parachains/src/paras/mod.rs b/runtime/parachains/src/paras/mod.rs index 4570bb2b13bd..6089fe2ba3b8 100644 --- a/runtime/parachains/src/paras/mod.rs +++ b/runtime/parachains/src/paras/mod.rs @@ -259,7 +259,7 @@ impl ParaPastCodeMeta { // of the para. #[cfg(test)] fn most_recent_change(&self) -> Option { - self.upgrade_times.last().map(|x| x.expected_at.clone()) + self.upgrade_times.last().map(|x| x.expected_at) } // prunes all code upgrade logs occurring at or before `max`. diff --git a/runtime/parachains/src/paras/tests.rs b/runtime/parachains/src/paras/tests.rs index 4a3be6d7d50e..a9b51fe2b45e 100644 --- a/runtime/parachains/src/paras/tests.rs +++ b/runtime/parachains/src/paras/tests.rs @@ -280,7 +280,6 @@ fn para_past_code_pruning_in_initialize() { paras: GenesisConfig { paras, ..Default::default() }, configuration: crate::configuration::GenesisConfig { config: HostConfiguration { code_retention_period, ..Default::default() }, - ..Default::default() }, ..Default::default() }; @@ -333,7 +332,6 @@ fn note_new_head_sets_head() { paras: GenesisConfig { paras, ..Default::default() }, configuration: crate::configuration::GenesisConfig { config: HostConfiguration { code_retention_period, ..Default::default() }, - ..Default::default() }, ..Default::default() }; @@ -375,7 +373,6 @@ fn note_past_code_sets_up_pruning_correctly() { paras: GenesisConfig { paras, ..Default::default() }, configuration: crate::configuration::GenesisConfig { config: HostConfiguration { code_retention_period, ..Default::default() }, - ..Default::default() }, ..Default::default() }; @@ -424,7 +421,6 @@ fn code_upgrade_applied_after_delay() { validation_upgrade_cooldown, ..Default::default() }, - ..Default::default() }, ..Default::default() }; @@ -537,7 +533,6 @@ fn code_upgrade_applied_after_delay_even_when_late() { validation_upgrade_cooldown, ..Default::default() }, - ..Default::default() }, ..Default::default() }; @@ -620,7 +615,6 @@ fn submit_code_change_when_not_allowed_is_err() { validation_upgrade_cooldown, ..Default::default() }, - ..Default::default() }, ..Default::default() }; @@ -691,7 +685,6 @@ fn upgrade_restriction_elapsed_doesnt_mean_can_upgrade() { validation_upgrade_cooldown, ..Default::default() }, - ..Default::default() }, ..Default::default() }; @@ -757,7 +750,6 @@ fn full_parachain_cleanup_storage() { thread_availability_period: 1, ..Default::default() }, - ..Default::default() }, ..Default::default() }; @@ -1013,7 +1005,6 @@ fn code_hash_at_returns_up_to_end_of_code_retention_period() { validation_upgrade_delay, ..Default::default() }, - ..Default::default() }, ..Default::default() }; @@ -1104,7 +1095,6 @@ fn pvf_check_coalescing_onboarding_and_upgrade() { paras: GenesisConfig { paras, ..Default::default() }, configuration: crate::configuration::GenesisConfig { config: HostConfiguration { validation_upgrade_delay, ..Default::default() }, - ..Default::default() }, ..Default::default() }; @@ -1169,7 +1159,6 @@ fn pvf_check_onboarding_reject_on_expiry() { let genesis_config = MockGenesisConfig { configuration: crate::configuration::GenesisConfig { config: HostConfiguration { pvf_voting_ttl, ..Default::default() }, - ..Default::default() }, ..Default::default() }; @@ -1569,7 +1558,6 @@ fn add_trusted_validation_code_insta_approval() { minimum_validation_upgrade_delay, ..Default::default() }, - ..Default::default() }, ..Default::default() }; @@ -1611,7 +1599,6 @@ fn add_trusted_validation_code_enacts_existing_pvf_vote() { minimum_validation_upgrade_delay, ..Default::default() }, - ..Default::default() }, ..Default::default() }; diff --git a/runtime/parachains/src/paras_inherent/tests.rs b/runtime/parachains/src/paras_inherent/tests.rs index faf52b555ba3..4de12bcc91b7 100644 --- a/runtime/parachains/src/paras_inherent/tests.rs +++ b/runtime/parachains/src/paras_inherent/tests.rs @@ -155,43 +155,31 @@ mod enter { let generate_votes = |session: u32, candidate_hash: CandidateHash| { // v0 votes for 3 vec![DisputeStatementSet { - candidate_hash: candidate_hash.clone(), + candidate_hash, session, statements: vec![ ( DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(0), v0.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session } + .signing_payload(), ), ), ( DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), ValidatorIndex(1), v1.sign( - &ExplicitDisputeStatement { - valid: false, - candidate_hash: candidate_hash.clone(), - session, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: false, candidate_hash, session } + .signing_payload(), ), ), ( DisputeStatement::Valid(ValidDisputeStatementKind::Explicit), ValidatorIndex(1), v1.sign( - &ExplicitDisputeStatement { - valid: true, - candidate_hash: candidate_hash.clone(), - session, - } - .signing_payload(), + &ExplicitDisputeStatement { valid: true, candidate_hash, session } + .signing_payload(), ), ), ], @@ -202,7 +190,7 @@ mod enter { }; let candidate_hash = CandidateHash(sp_core::H256::repeat_byte(1)); - let statements = generate_votes(3, candidate_hash.clone()); + let statements = generate_votes(3, candidate_hash); set_scrapable_on_chain_disputes::(3, statements); assert_matches!(pallet::Pallet::::on_chain_votes(), Some(ScrapedOnChainVotes { session, @@ -221,7 +209,7 @@ mod enter { }); let candidate_hash = CandidateHash(sp_core::H256::repeat_byte(2)); - let statements = generate_votes(7, candidate_hash.clone()); + let statements = generate_votes(7, candidate_hash); set_scrapable_on_chain_disputes::(7, statements); assert_matches!(pallet::Pallet::::on_chain_votes(), Some(ScrapedOnChainVotes { session, @@ -1330,7 +1318,7 @@ mod sanitizers { let mut set = std::collections::HashSet::new(); for (idx, backed_candidate) in backed_candidates.iter().enumerate() { if idx & 0x01 == 0 { - set.insert(backed_candidate.hash().clone()); + set.insert(backed_candidate.hash()); } } set diff --git a/runtime/parachains/src/scheduler/tests.rs b/runtime/parachains/src/scheduler/tests.rs index c4830f4bf253..cc2aee357231 100644 --- a/runtime/parachains/src/scheduler/tests.rs +++ b/runtime/parachains/src/scheduler/tests.rs @@ -116,10 +116,7 @@ fn default_config() -> HostConfiguration { #[test] fn add_parathread_claim_works() { let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: default_config(), - ..Default::default() - }, + configuration: crate::configuration::GenesisConfig { config: default_config() }, ..Default::default() }; @@ -200,7 +197,7 @@ fn cannot_add_claim_when_no_parathread_cores() { config }; let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { config, ..Default::default() }, + configuration: crate::configuration::GenesisConfig { config }, ..Default::default() }; @@ -224,10 +221,7 @@ fn cannot_add_claim_when_no_parathread_cores() { #[test] fn session_change_prunes_cores_beyond_retries_and_those_from_non_live_parathreads() { let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: default_config(), - ..Default::default() - }, + configuration: crate::configuration::GenesisConfig { config: default_config() }, ..Default::default() }; let max_parathread_retries = default_config().parathread_retries; @@ -326,10 +320,7 @@ fn session_change_prunes_cores_beyond_retries_and_those_from_non_live_parathread #[test] fn session_change_shuffles_validators() { let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: default_config(), - ..Default::default() - }, + configuration: crate::configuration::GenesisConfig { config: default_config() }, ..Default::default() }; @@ -384,10 +375,7 @@ fn session_change_takes_only_max_per_core() { }; let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: config.clone(), - ..Default::default() - }, + configuration: crate::configuration::GenesisConfig { config: config.clone() }, ..Default::default() }; @@ -432,10 +420,7 @@ fn session_change_takes_only_max_per_core() { #[test] fn schedule_schedules() { let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: default_config(), - ..Default::default() - }, + configuration: crate::configuration::GenesisConfig { config: default_config() }, ..Default::default() }; @@ -557,10 +542,7 @@ fn schedule_schedules() { #[test] fn schedule_schedules_including_just_freed() { let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: default_config(), - ..Default::default() - }, + configuration: crate::configuration::GenesisConfig { config: default_config() }, ..Default::default() }; @@ -733,10 +715,7 @@ fn schedule_schedules_including_just_freed() { #[test] fn schedule_clears_availability_cores() { let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: default_config(), - ..Default::default() - }, + configuration: crate::configuration::GenesisConfig { config: default_config() }, ..Default::default() }; @@ -839,10 +818,7 @@ fn schedule_rotates_groups() { let parathread_cores = config.parathread_cores; let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: config.clone(), - ..Default::default() - }, + configuration: crate::configuration::GenesisConfig { config: config.clone() }, ..Default::default() }; @@ -911,10 +887,7 @@ fn parathread_claims_are_pruned_after_retries() { let max_retries = default_config().parathread_retries; let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: default_config(), - ..Default::default() - }, + configuration: crate::configuration::GenesisConfig { config: default_config() }, ..Default::default() }; @@ -959,10 +932,7 @@ fn parathread_claims_are_pruned_after_retries() { #[test] fn availability_predicate_works() { let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: default_config(), - ..Default::default() - }, + configuration: crate::configuration::GenesisConfig { config: default_config() }, ..Default::default() }; @@ -1067,10 +1037,7 @@ fn next_up_on_available_uses_next_scheduled_or_none_for_thread() { config.parathread_cores = 1; let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: config.clone(), - ..Default::default() - }, + configuration: crate::configuration::GenesisConfig { config: config.clone() }, ..Default::default() }; @@ -1139,10 +1106,7 @@ fn next_up_on_time_out_reuses_claim_if_nothing_queued() { config.parathread_cores = 1; let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: config.clone(), - ..Default::default() - }, + configuration: crate::configuration::GenesisConfig { config: config.clone() }, ..Default::default() }; @@ -1217,10 +1181,7 @@ fn next_up_on_available_is_parachain_always() { config.parathread_cores = 0; let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: config.clone(), - ..Default::default() - }, + configuration: crate::configuration::GenesisConfig { config: config.clone() }, ..Default::default() }; @@ -1271,10 +1232,7 @@ fn next_up_on_time_out_is_parachain_always() { config.parathread_cores = 0; let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: config.clone(), - ..Default::default() - }, + configuration: crate::configuration::GenesisConfig { config: config.clone() }, ..Default::default() }; @@ -1322,10 +1280,7 @@ fn next_up_on_time_out_is_parachain_always() { #[test] fn session_change_requires_reschedule_dropping_removed_paras() { let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: default_config(), - ..Default::default() - }, + configuration: crate::configuration::GenesisConfig { config: default_config() }, ..Default::default() }; @@ -1399,10 +1354,7 @@ fn session_change_requires_reschedule_dropping_removed_paras() { #[test] fn parathread_claims_are_pruned_after_deregistration() { let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: default_config(), - ..Default::default() - }, + configuration: crate::configuration::GenesisConfig { config: default_config() }, ..Default::default() }; diff --git a/runtime/parachains/src/session_info/tests.rs b/runtime/parachains/src/session_info/tests.rs index 63226fb7cf81..c4475526d58f 100644 --- a/runtime/parachains/src/session_info/tests.rs +++ b/runtime/parachains/src/session_info/tests.rs @@ -71,10 +71,7 @@ fn default_config() -> HostConfiguration { fn genesis_config() -> MockGenesisConfig { MockGenesisConfig { - configuration: configuration::GenesisConfig { - config: default_config(), - ..Default::default() - }, + configuration: configuration::GenesisConfig { config: default_config() }, ..Default::default() } } diff --git a/runtime/parachains/src/ump_tests.rs b/runtime/parachains/src/ump_tests.rs index 424aa2a4d032..22aee31043a2 100644 --- a/runtime/parachains/src/ump_tests.rs +++ b/runtime/parachains/src/ump_tests.rs @@ -523,21 +523,21 @@ fn overweight_queue_works() { assert_last_events( [ pallet_message_queue::Event::::Processed { - id: hash_1.clone(), + id: hash_1, origin: Ump(UmpQueueId::Para(para_a)), weight_used: Weight::from_parts(301, 301), success: true, } .into(), pallet_message_queue::Event::::OverweightEnqueued { - id: hash_2.clone(), + id: hash_2, origin: Ump(UmpQueueId::Para(para_a)), page_index: 0, message_index: 1, } .into(), pallet_message_queue::Event::::OverweightEnqueued { - id: hash_3.clone(), + id: hash_3, origin: Ump(UmpQueueId::Para(para_a)), page_index: 0, message_index: 2, diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index ea629f189dc8..963df0aa0303 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -114,5 +114,6 @@ cargo-clippy: - .docker-env - .test-refs script: + - echo $RUSTFLAGS - cargo version && cargo clippy --version - - SKIP_WASM_BUILD=1 env -u RUSTFLAGS cargo clippy --locked --all-targets + - SKIP_WASM_BUILD=1 env -u RUSTFLAGS cargo clippy -q --locked --all-targets --workspace diff --git a/statement-table/src/generic.rs b/statement-table/src/generic.rs index 9aa445becce0..161a06ef161a 100644 --- a/statement-table/src/generic.rs +++ b/statement-table/src/generic.rs @@ -723,7 +723,7 @@ mod tests { // authority 2 votes for validity on 1's candidate. let bad_validity_vote = SignedStatement { - statement: Statement::Valid(candidate_a_digest.clone()), + statement: Statement::Valid(candidate_a_digest), signature: Signature(2), sender: AuthorityId(2), }; @@ -794,7 +794,7 @@ mod tests { assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); let extra_vote = SignedStatement { - statement: Statement::Valid(candidate_digest.clone()), + statement: Statement::Valid(candidate_digest), signature: Signature(1), sender: AuthorityId(1), }; @@ -864,7 +864,7 @@ mod tests { assert!(table.attested_candidate(&candidate_digest, &context).is_none()); let vote = SignedStatement { - statement: Statement::Valid(candidate_digest.clone()), + statement: Statement::Valid(candidate_digest), signature: Signature(2), sender: AuthorityId(2), }; @@ -923,7 +923,7 @@ mod tests { assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); let vote = SignedStatement { - statement: Statement::Valid(candidate_digest.clone()), + statement: Statement::Valid(candidate_digest), signature: Signature(2), sender: AuthorityId(2), }; diff --git a/xcm/pallet-xcm/src/tests.rs b/xcm/pallet-xcm/src/tests.rs index f42eb987876a..6ff9f1d893c8 100644 --- a/xcm/pallet-xcm/src/tests.rs +++ b/xcm/pallet-xcm/src/tests.rs @@ -46,10 +46,8 @@ fn report_outcome_notify_works() { (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; let sender: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); - let mut message = Xcm(vec![TransferAsset { - assets: (Here, SEND_AMOUNT).into(), - beneficiary: sender.clone(), - }]); + let mut message = + Xcm(vec![TransferAsset { assets: (Here, SEND_AMOUNT).into(), beneficiary: sender }]); let call = pallet_test_notifier::Call::notification_received { query_id: 0, response: Default::default(), @@ -71,7 +69,7 @@ fn report_outcome_notify_works() { query_id: 0, max_weight: Weight::from_parts(1_000_000, 1_000_000), })])), - TransferAsset { assets: (Here, SEND_AMOUNT).into(), beneficiary: sender.clone() }, + TransferAsset { assets: (Here, SEND_AMOUNT).into(), beneficiary: sender }, ]) ); let querier: MultiLocation = Here.into(); @@ -79,7 +77,7 @@ fn report_outcome_notify_works() { responder: MultiLocation::from(Parachain(PARA_ID)).into(), maybe_notify: Some((4, 2)), timeout: 100, - maybe_match_querier: Some(querier.clone().into()), + maybe_match_querier: Some(querier.into()), }; assert_eq!(crate::Queries::::iter().collect::>(), vec![(0, status)]); @@ -123,10 +121,8 @@ fn report_outcome_works() { (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; let sender: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); - let mut message = Xcm(vec![TransferAsset { - assets: (Here, SEND_AMOUNT).into(), - beneficiary: sender.clone(), - }]); + let mut message = + Xcm(vec![TransferAsset { assets: (Here, SEND_AMOUNT).into(), beneficiary: sender }]); new_test_ext_with_balances(balances).execute_with(|| { XcmPallet::report_outcome(&mut message, Parachain(PARA_ID).into_location(), 100).unwrap(); assert_eq!( @@ -137,7 +133,7 @@ fn report_outcome_works() { query_id: 0, max_weight: Weight::zero(), })])), - TransferAsset { assets: (Here, SEND_AMOUNT).into(), beneficiary: sender.clone() }, + TransferAsset { assets: (Here, SEND_AMOUNT).into(), beneficiary: sender }, ]) ); let querier: MultiLocation = Here.into(); @@ -145,7 +141,7 @@ fn report_outcome_works() { responder: MultiLocation::from(Parachain(PARA_ID)).into(), maybe_notify: None, timeout: 100, - maybe_match_querier: Some(querier.clone().into()), + maybe_match_querier: Some(querier.into()), }; assert_eq!(crate::Queries::::iter().collect::>(), vec![(0, status)]); @@ -187,13 +183,13 @@ fn custom_querier_works() { let querier: MultiLocation = (Parent, AccountId32 { network: None, id: ALICE.into() }).into(); - let r = TestNotifier::prepare_new_query(RuntimeOrigin::signed(ALICE), querier.clone()); + let r = TestNotifier::prepare_new_query(RuntimeOrigin::signed(ALICE), querier); assert_eq!(r, Ok(())); let status = QueryStatus::Pending { responder: MultiLocation::from(AccountId32 { network: None, id: ALICE.into() }).into(), maybe_notify: None, timeout: 100, - maybe_match_querier: Some(querier.clone().into()), + maybe_match_querier: Some(querier.into()), }; assert_eq!(crate::Queries::::iter().collect::>(), vec![(0, status)]); @@ -218,7 +214,7 @@ fn custom_querier_works() { RuntimeEvent::XcmPallet(crate::Event::InvalidQuerier { origin: AccountId32 { network: None, id: ALICE.into() }.into(), query_id: 0, - expected_querier: querier.clone(), + expected_querier: querier, maybe_actual_querier: None, }), ); @@ -244,7 +240,7 @@ fn custom_querier_works() { RuntimeEvent::XcmPallet(crate::Event::InvalidQuerier { origin: AccountId32 { network: None, id: ALICE.into() }.into(), query_id: 0, - expected_querier: querier.clone(), + expected_querier: querier, maybe_actual_querier: Some(MultiLocation::here()), }), ); @@ -293,7 +289,7 @@ fn send_works() { ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), ClearOrigin, buy_execution((Parent, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: sender.clone() }, + DepositAsset { assets: AllCounted(1).into(), beneficiary: sender }, ]); let versioned_dest = Box::new(RelayLocation::get().into()); @@ -303,7 +299,7 @@ fn send_works() { versioned_dest, versioned_message )); - let sent_message = Xcm(Some(DescendOrigin(sender.clone().try_into().unwrap())) + let sent_message = Xcm(Some(DescendOrigin(sender.try_into().unwrap())) .into_iter() .chain(message.0.clone().into_iter()) .collect()); @@ -337,7 +333,7 @@ fn send_fails_when_xcm_router_blocks() { let message = Xcm(vec![ ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), buy_execution((Parent, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: sender.clone() }, + DepositAsset { assets: AllCounted(1).into(), beneficiary: sender }, ]); assert_noop!( XcmPallet::send( @@ -367,7 +363,7 @@ fn teleport_assets_works() { assert_ok!(XcmPallet::teleport_assets( RuntimeOrigin::signed(ALICE), Box::new(RelayLocation::get().into()), - Box::new(dest.clone().into()), + Box::new(dest.into()), Box::new((Here, SEND_AMOUNT).into()), 0, )); @@ -410,7 +406,7 @@ fn limited_teleport_assets_works() { assert_ok!(XcmPallet::limited_teleport_assets( RuntimeOrigin::signed(ALICE), Box::new(RelayLocation::get().into()), - Box::new(dest.clone().into()), + Box::new(dest.into()), Box::new((Here, SEND_AMOUNT).into()), 0, WeightLimit::Limited(Weight::from_parts(5000, 5000)), @@ -454,7 +450,7 @@ fn unlimited_teleport_assets_works() { assert_ok!(XcmPallet::limited_teleport_assets( RuntimeOrigin::signed(ALICE), Box::new(RelayLocation::get().into()), - Box::new(dest.clone().into()), + Box::new(dest.into()), Box::new((Here, SEND_AMOUNT).into()), 0, WeightLimit::Unlimited, @@ -496,7 +492,7 @@ fn reserve_transfer_assets_works() { assert_ok!(XcmPallet::reserve_transfer_assets( RuntimeOrigin::signed(ALICE), Box::new(Parachain(PARA_ID).into()), - Box::new(dest.clone().into()), + Box::new(dest.into()), Box::new((Here, SEND_AMOUNT).into()), 0, )); @@ -543,7 +539,7 @@ fn limited_reserve_transfer_assets_works() { assert_ok!(XcmPallet::limited_reserve_transfer_assets( RuntimeOrigin::signed(ALICE), Box::new(Parachain(PARA_ID).into()), - Box::new(dest.clone().into()), + Box::new(dest.into()), Box::new((Here, SEND_AMOUNT).into()), 0, WeightLimit::Limited(Weight::from_parts(5000, 5000)), @@ -591,7 +587,7 @@ fn unlimited_reserve_transfer_assets_works() { assert_ok!(XcmPallet::limited_reserve_transfer_assets( RuntimeOrigin::signed(ALICE), Box::new(Parachain(PARA_ID).into()), - Box::new(dest.clone().into()), + Box::new(dest.into()), Box::new((Here, SEND_AMOUNT).into()), 0, WeightLimit::Unlimited, @@ -670,7 +666,7 @@ fn trapped_assets_can_be_claimed() { // This will make an error. Trap(0), // This would succeed, but we never get to it. - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest.clone() }, + DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, ]))), weight )); @@ -678,12 +674,12 @@ fn trapped_assets_can_be_claimed() { Junction::AccountId32 { network: None, id: ALICE.into() }.into(); let trapped = AssetTraps::::iter().collect::>(); let vma = VersionedMultiAssets::from(MultiAssets::from((Here, SEND_AMOUNT))); - let hash = BlakeTwo256::hash_of(&(source.clone(), vma.clone())); + let hash = BlakeTwo256::hash_of(&(source, vma.clone())); assert_eq!( last_events(2), vec![ RuntimeEvent::XcmPallet(crate::Event::AssetsTrapped { - hash: hash.clone(), + hash, origin: source, assets: vma }), @@ -704,7 +700,7 @@ fn trapped_assets_can_be_claimed() { Box::new(VersionedXcm::from(Xcm(vec![ ClaimAsset { assets: (Here, SEND_AMOUNT).into(), ticket: Here.into() }, buy_execution((Here, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest.clone() }, + DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, ]))), weight )); @@ -742,25 +738,22 @@ fn basic_subscription_works() { let remote: MultiLocation = Parachain(1000).into(); assert_ok!(XcmPallet::force_subscribe_version_notify( RuntimeOrigin::root(), - Box::new(remote.clone().into()), + Box::new(remote.into()), )); assert_eq!( Queries::::iter().collect::>(), - vec![( - 0, - QueryStatus::VersionNotifier { origin: remote.clone().into(), is_active: false } - )] + vec![(0, QueryStatus::VersionNotifier { origin: remote.into(), is_active: false })] ); assert_eq!( VersionNotifiers::::iter().collect::>(), - vec![(XCM_VERSION, remote.clone().into(), 0)] + vec![(XCM_VERSION, remote.into(), 0)] ); assert_eq!( take_sent_xcm(), vec![( - remote.clone(), + remote, Xcm(vec![SubscribeVersion { query_id: 0, max_response_weight: Weight::zero() }]), ),] ); @@ -790,27 +783,27 @@ fn subscriptions_increment_id() { let remote: MultiLocation = Parachain(1000).into(); assert_ok!(XcmPallet::force_subscribe_version_notify( RuntimeOrigin::root(), - Box::new(remote.clone().into()), + Box::new(remote.into()), )); let remote2: MultiLocation = Parachain(1001).into(); assert_ok!(XcmPallet::force_subscribe_version_notify( RuntimeOrigin::root(), - Box::new(remote2.clone().into()), + Box::new(remote2.into()), )); assert_eq!( take_sent_xcm(), vec![ ( - remote.clone(), + remote, Xcm(vec![SubscribeVersion { query_id: 0, max_response_weight: Weight::zero() }]), ), ( - remote2.clone(), + remote2, Xcm(vec![SubscribeVersion { query_id: 1, max_response_weight: Weight::zero() @@ -827,12 +820,12 @@ fn double_subscription_fails() { let remote: MultiLocation = Parachain(1000).into(); assert_ok!(XcmPallet::force_subscribe_version_notify( RuntimeOrigin::root(), - Box::new(remote.clone().into()), + Box::new(remote.into()), )); assert_noop!( XcmPallet::force_subscribe_version_notify( RuntimeOrigin::root(), - Box::new(remote.clone().into()) + Box::new(remote.into()) ), Error::::AlreadySubscribed, ); @@ -845,16 +838,16 @@ fn unsubscribe_works() { let remote: MultiLocation = Parachain(1000).into(); assert_ok!(XcmPallet::force_subscribe_version_notify( RuntimeOrigin::root(), - Box::new(remote.clone().into()), + Box::new(remote.into()), )); assert_ok!(XcmPallet::force_unsubscribe_version_notify( RuntimeOrigin::root(), - Box::new(remote.clone().into()) + Box::new(remote.into()) )); assert_noop!( XcmPallet::force_unsubscribe_version_notify( RuntimeOrigin::root(), - Box::new(remote.clone().into()) + Box::new(remote.into()) ), Error::::NoSubscription, ); @@ -863,13 +856,13 @@ fn unsubscribe_works() { take_sent_xcm(), vec![ ( - remote.clone(), + remote, Xcm(vec![SubscribeVersion { query_id: 0, max_response_weight: Weight::zero() }]), ), - (remote.clone(), Xcm(vec![UnsubscribeVersion]),), + (remote, Xcm(vec![UnsubscribeVersion]),), ] ); }); @@ -886,7 +879,7 @@ fn subscription_side_works() { let message = Xcm(vec![SubscribeVersion { query_id: 0, max_response_weight: Weight::zero() }]); let hash = fake_message_hash(&message); - let r = XcmExecutor::::execute_xcm(remote.clone(), message, hash, weight); + let r = XcmExecutor::::execute_xcm(remote, message, hash, weight); assert_eq!(r, Outcome::Complete(weight)); let instr = QueryResponse { @@ -895,7 +888,7 @@ fn subscription_side_works() { response: Response::Version(1), querier: None, }; - assert_eq!(take_sent_xcm(), vec![(remote.clone(), Xcm(vec![instr]))]); + assert_eq!(take_sent_xcm(), vec![(remote, Xcm(vec![instr]))]); // A runtime upgrade which doesn't alter the version sends no notifications. CurrentMigration::::put(VersionMigrationStage::default()); @@ -914,7 +907,7 @@ fn subscription_side_works() { response: Response::Version(2), querier: None, }; - assert_eq!(take_sent_xcm(), vec![(remote.clone(), Xcm(vec![instr]))]); + assert_eq!(take_sent_xcm(), vec![(remote, Xcm(vec![instr]))]); }); } @@ -1004,7 +997,7 @@ fn subscriber_side_subscription_works() { let remote: MultiLocation = Parachain(1000).into(); assert_ok!(XcmPallet::force_subscribe_version_notify( RuntimeOrigin::root(), - Box::new(remote.clone().into()), + Box::new(remote.into()), )); take_sent_xcm(); @@ -1021,7 +1014,7 @@ fn subscriber_side_subscription_works() { }, ]); let hash = fake_message_hash(&message); - let r = XcmExecutor::::execute_xcm(remote.clone(), message, hash, weight); + let r = XcmExecutor::::execute_xcm(remote, message, hash, weight); assert_eq!(r, Outcome::Complete(weight)); assert_eq!(take_sent_xcm(), vec![]); @@ -1039,7 +1032,7 @@ fn subscriber_side_subscription_works() { }, ]); let hash = fake_message_hash(&message); - let r = XcmExecutor::::execute_xcm(remote.clone(), message, hash, weight); + let r = XcmExecutor::::execute_xcm(remote, message, hash, weight); assert_eq!(r, Outcome::Complete(weight)); // This message can now be sent to remote as it's v2. @@ -1068,7 +1061,7 @@ fn auto_subscription_works() { ); assert_eq!(XcmPallet::wrap_version(&remote_v2, msg_v3.clone()), Err(())); - let expected = vec![(remote_v2.clone().into(), 2)]; + let expected = vec![(remote_v2.into(), 2)]; assert_eq!(VersionDiscoveryQueue::::get().into_inner(), expected); assert_eq!( @@ -1077,14 +1070,14 @@ fn auto_subscription_works() { ); assert_eq!(XcmPallet::wrap_version(&remote_v3, msg_v3.clone()), Err(())); - let expected = vec![(remote_v2.clone().into(), 2), (remote_v3.clone().into(), 2)]; + let expected = vec![(remote_v2.into(), 2), (remote_v3.into(), 2)]; assert_eq!(VersionDiscoveryQueue::::get().into_inner(), expected); XcmPallet::on_initialize(1); assert_eq!( take_sent_xcm(), vec![( - remote_v3.clone(), + remote_v3, Xcm(vec![SubscribeVersion { query_id: 0, max_response_weight: Weight::zero() }]), )] ); @@ -1102,7 +1095,7 @@ fn auto_subscription_works() { }, ]); let hash = fake_message_hash(&message); - let r = XcmExecutor::::execute_xcm(remote_v3.clone(), message, hash, weight); + let r = XcmExecutor::::execute_xcm(remote_v3, message, hash, weight); assert_eq!(r, Outcome::Complete(weight)); // V2 messages can be sent to remote_v3 under XCM v3. @@ -1120,7 +1113,7 @@ fn auto_subscription_works() { assert_eq!( take_sent_xcm(), vec![( - remote_v2.clone(), + remote_v2, Xcm(vec![SubscribeVersion { query_id: 1, max_response_weight: Weight::zero() }]), )] ); @@ -1138,7 +1131,7 @@ fn auto_subscription_works() { }, ]); let hash = fake_message_hash(&message); - let r = XcmExecutor::::execute_xcm(remote_v2.clone(), message, hash, weight); + let r = XcmExecutor::::execute_xcm(remote_v2, message, hash, weight); assert_eq!(r, Outcome::Complete(weight)); // v3 messages cannot be sent to remote_v2... diff --git a/xcm/xcm-builder/src/asset_conversion.rs b/xcm/xcm-builder/src/asset_conversion.rs index 2fe26e8cd1e3..a246436a9d6d 100644 --- a/xcm/xcm-builder/src/asset_conversion.rs +++ b/xcm/xcm-builder/src/asset_conversion.rs @@ -286,11 +286,11 @@ mod tests { struct ClassInstanceIdConverter; impl MaybeEquivalence for ClassInstanceIdConverter { fn convert(value: &AssetInstance) -> Option { - value.clone().try_into().ok() + (*value).try_into().ok() } fn convert_back(value: &ClassInstanceId) -> Option { - Some(AssetInstance::from(value.clone())) + Some(AssetInstance::from(*value)) } } diff --git a/xcm/xcm-builder/src/tests/assets.rs b/xcm/xcm-builder/src/tests/assets.rs index dbcb731a1bda..e1d61a9d1c6d 100644 --- a/xcm/xcm-builder/src/tests/assets.rs +++ b/xcm/xcm-builder/src/tests/assets.rs @@ -147,10 +147,7 @@ fn reserve_transfer_should_work() { let message = Xcm(vec![TransferReserveAsset { assets: (Here, 100u128).into(), dest: Parachain(2).into(), - xcm: Xcm::<()>(vec![DepositAsset { - assets: AllCounted(1).into(), - beneficiary: three.clone(), - }]), + xcm: Xcm::<()>(vec![DepositAsset { assets: AllCounted(1).into(), beneficiary: three }]), }]); let hash = fake_message_hash(&message); let r = XcmExecutor::::execute_xcm( diff --git a/xcm/xcm-builder/src/tests/bridging/mod.rs b/xcm/xcm-builder/src/tests/bridging/mod.rs index 2b5de62975ee..45630dbfc248 100644 --- a/xcm/xcm-builder/src/tests/bridging/mod.rs +++ b/xcm/xcm-builder/src/tests/bridging/mod.rs @@ -155,7 +155,7 @@ fn price( d: &InteriorMultiLocation, m: &Xcm<()>, ) -> Result { - Ok(validate_export::(n, c, s.clone(), d.clone(), m.clone())?.1) + Ok(validate_export::(n, c, *s, *d, m.clone())?.1) } fn deliver( @@ -205,7 +205,7 @@ impl, Remote: Get, RemoteExporter: ExportXcm> S // though it is `Remote`. ExecutorUniversalLocation::set(Remote::get()); let origin = Local::get().relative_to(&Remote::get()); - AllowUnpaidFrom::set(vec![origin.clone()]); + AllowUnpaidFrom::set(vec![origin]); set_exporter_override(price::, deliver::); // The we execute it: let mut id = fake_id(); @@ -255,7 +255,7 @@ impl, Remote: Get, RemoteExporter: ExportXcm> S // though it is `Remote`. ExecutorUniversalLocation::set(Remote::get()); let origin = Local::get().relative_to(&Remote::get()); - AllowPaidFrom::set(vec![origin.clone()]); + AllowPaidFrom::set(vec![origin]); set_exporter_override(price::, deliver::); // Then we execute it: let mut id = fake_id(); diff --git a/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs b/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs index 6870413c38d5..7593ea5f17c0 100644 --- a/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs +++ b/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs @@ -113,7 +113,7 @@ fn sending_to_bridged_chain_without_funds_fails() { let dest: MultiLocation = (Parent, Parent, Remote::get()).into(); // Routing won't work if we don't have enough funds. assert_eq!( - send_xcm::(dest.clone(), Xcm(vec![Trap(1)])), + send_xcm::(dest, Xcm(vec![Trap(1)])), Err(SendError::Transport("Error executing")), ); } @@ -188,7 +188,7 @@ fn sending_to_parachain_of_bridged_chain_without_funds_fails() { let dest: MultiLocation = (Parent, Parent, Remote::get(), Parachain(100)).into(); // Routing won't work if we don't have enough funds. assert_eq!( - send_xcm::(dest.clone(), Xcm(vec![Trap(1)])), + send_xcm::(dest, Xcm(vec![Trap(1)])), Err(SendError::Transport("Error executing")), ); } diff --git a/xcm/xcm-builder/src/tests/mock.rs b/xcm/xcm-builder/src/tests/mock.rs index aea780b84367..9be034596f43 100644 --- a/xcm/xcm-builder/src/tests/mock.rs +++ b/xcm/xcm-builder/src/tests/mock.rs @@ -254,7 +254,7 @@ impl TransactAsset for TestAssetTransactor { who: &MultiLocation, _context: &XcmContext, ) -> Result<(), XcmError> { - add_asset(who.clone(), what.clone()); + add_asset(*who, what.clone()); Ok(()) } @@ -515,7 +515,7 @@ pub fn disallow_unlock( pub fn unlock_allowed(unlocker: &MultiLocation, asset: &MultiAsset, owner: &MultiLocation) -> bool { ALLOWED_UNLOCKS.with(|l| { l.borrow_mut() - .get(&(owner.clone(), unlocker.clone())) + .get(&(*owner, *unlocker)) .map_or(false, |x| x.contains_asset(asset)) }) } @@ -550,7 +550,7 @@ pub fn request_unlock_allowed( ) -> bool { ALLOWED_REQUEST_UNLOCKS.with(|l| { l.borrow_mut() - .get(&(owner.clone(), locker.clone())) + .get(&(*owner, *locker)) .map_or(false, |x| x.contains_asset(asset)) }) } @@ -560,11 +560,11 @@ impl Enact for TestTicket { fn enact(self) -> Result<(), LockError> { match &self.0 { LockTraceItem::Lock { unlocker, asset, owner } => - allow_unlock(unlocker.clone(), asset.clone(), owner.clone()), + allow_unlock(*unlocker, asset.clone(), *owner), LockTraceItem::Unlock { unlocker, asset, owner } => - disallow_unlock(unlocker.clone(), asset.clone(), owner.clone()), + disallow_unlock(*unlocker, asset.clone(), *owner), LockTraceItem::Reduce { locker, asset, owner } => - disallow_request_unlock(locker.clone(), asset.clone(), owner.clone()), + disallow_request_unlock(*locker, asset.clone(), *owner), _ => {}, } LOCK_TRACE.with(move |l| l.borrow_mut().push(self.0)); @@ -583,7 +583,7 @@ impl AssetLock for TestAssetLock { asset: MultiAsset, owner: MultiLocation, ) -> Result { - ensure!(assets(owner.clone()).contains_asset(&asset), LockError::AssetNotOwned); + ensure!(assets(owner).contains_asset(&asset), LockError::AssetNotOwned); Ok(TestTicket(LockTraceItem::Lock { unlocker, asset, owner })) } @@ -601,7 +601,7 @@ impl AssetLock for TestAssetLock { asset: MultiAsset, owner: MultiLocation, ) -> Result<(), LockError> { - allow_request_unlock(locker.clone(), asset.clone(), owner.clone()); + allow_request_unlock(locker, asset.clone(), owner); let item = LockTraceItem::Note { locker, asset, owner }; LOCK_TRACE.with(move |l| l.borrow_mut().push(item)); Ok(()) diff --git a/xcm/xcm-builder/src/tests/transacting.rs b/xcm/xcm-builder/src/tests/transacting.rs index ccb2c23d19fd..743ad7039f7f 100644 --- a/xcm/xcm-builder/src/tests/transacting.rs +++ b/xcm/xcm-builder/src/tests/transacting.rs @@ -66,11 +66,11 @@ fn transacting_should_refund_weight() { #[test] fn paid_transacting_should_refund_payment_for_unused_weight() { let one: MultiLocation = AccountIndex64 { index: 1, network: None }.into(); - AllowPaidFrom::set(vec![one.clone()]); + AllowPaidFrom::set(vec![one]); add_asset(AccountIndex64 { index: 1, network: None }, (Parent, 200u128)); WeightPrice::set((Parent.into(), 1_000_000_000_000, 1024 * 1024)); - let origin = one.clone(); + let origin = one; let fees = (Parent, 200u128).into(); let message = Xcm::(vec![ WithdrawAsset((Parent, 200u128).into()), // enough for 200 units of weight. @@ -84,7 +84,7 @@ fn paid_transacting_should_refund_payment_for_unused_weight() { .into(), }, RefundSurplus, - DepositAsset { assets: AllCounted(1).into(), beneficiary: one.clone() }, + DepositAsset { assets: AllCounted(1).into(), beneficiary: one }, ]); let hash = fake_message_hash(&message); let weight_limit = Weight::from_parts(100, 100); diff --git a/xcm/xcm-builder/src/tests/version_subscriptions.rs b/xcm/xcm-builder/src/tests/version_subscriptions.rs index 434c92202c83..44ab7d34c51b 100644 --- a/xcm/xcm-builder/src/tests/version_subscriptions.rs +++ b/xcm/xcm-builder/src/tests/version_subscriptions.rs @@ -59,7 +59,7 @@ fn version_subscription_instruction_should_work() { let hash = fake_message_hash(&message); let weight_limit = Weight::from_parts(20, 20); let r = XcmExecutor::::execute_xcm_in_credit( - origin.clone(), + origin, message, hash, weight_limit, @@ -124,7 +124,7 @@ fn version_unsubscription_instruction_should_work() { let hash = fake_message_hash(&message); let weight_limit = Weight::from_parts(20, 20); let r = XcmExecutor::::execute_xcm_in_credit( - origin.clone(), + origin, message, hash, weight_limit, diff --git a/xcm/xcm-simulator/fuzzer/src/parachain.rs b/xcm/xcm-simulator/fuzzer/src/parachain.rs index 26379ed8dc8e..3d0b1c82f691 100644 --- a/xcm/xcm-simulator/fuzzer/src/parachain.rs +++ b/xcm/xcm-simulator/fuzzer/src/parachain.rs @@ -238,7 +238,7 @@ pub mod mock_msg_queue { Ok(xcm) => { let location = MultiLocation::new(1, X1(Parachain(sender.into()))); match T::XcmExecutor::execute_xcm(location, xcm, message_hash, max_weight) { - Outcome::Error(e) => (Err(e.clone()), Event::Fail(Some(hash), e)), + Outcome::Error(e) => (Err(e), Event::Fail(Some(hash), e)), Outcome::Complete(w) => (Ok(w), Event::Success(Some(hash))), // As far as the caller is concerned, this was dispatched without error, so // we just report the weight used. @@ -262,7 +262,7 @@ pub mod mock_msg_queue { let _ = XcmpMessageFormat::decode(&mut data_ref) .expect("Simulator encodes with versioned xcm format; qed"); - let mut remaining_fragments = &data_ref[..]; + let mut remaining_fragments = data_ref; while !remaining_fragments.is_empty() { if let Ok(xcm) = VersionedXcm::::decode(&mut remaining_fragments) From d73503cc2351ffec047a4912ecdb3e1eab5f46aa Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Wed, 16 Aug 2023 16:26:41 +0200 Subject: [PATCH 22/27] [Polkadot] 28 days as conviction voting period (#7595) * Use 28 days for conviction vote locking Signed-off-by: Oliver Tale-Yazdi * Remove unused dependency profile Signed-off-by: Oliver Tale-Yazdi --------- Signed-off-by: Oliver Tale-Yazdi --- Cargo.toml | 1 - runtime/polkadot/src/governance/mod.rs | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 44cf027e35b3..dc42123a9f20 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -166,7 +166,6 @@ crossbeam-deque = { opt-level = 3 } crypto-mac = { opt-level = 3 } curve25519-dalek = { opt-level = 3 } ed25519-dalek = { opt-level = 3 } -flate2 = { opt-level = 3 } futures-channel = { opt-level = 3 } hash-db = { opt-level = 3 } hashbrown = { opt-level = 3 } diff --git a/runtime/polkadot/src/governance/mod.rs b/runtime/polkadot/src/governance/mod.rs index 4cd9eeacd845..870d143dbaf0 100644 --- a/runtime/polkadot/src/governance/mod.rs +++ b/runtime/polkadot/src/governance/mod.rs @@ -35,7 +35,7 @@ mod tracks; pub use tracks::TracksInfo; parameter_types! { - pub const VoteLockingPeriod: BlockNumber = 7 * DAYS; + pub const VoteLockingPeriod: BlockNumber = prod_or_fast!(28 * DAYS, 1); } impl pallet_conviction_voting::Config for Runtime { From 91e127953c9bb51f6bc2f8779376fc672e995c30 Mon Sep 17 00:00:00 2001 From: Javier Viola Date: Wed, 16 Aug 2023 17:31:57 -0300 Subject: [PATCH 23/27] bump zombienet to `v1.3.65` (#7631) * bump zombienet version * remove workaround, zombienet collators cmd bug fixed in latest version * add env var to run in ci * add env var to run in ci --- .gitlab-ci.yml | 6 +++--- .../test-parachains/adder/collator/src/main.rs | 6 +----- .../test-parachains/undying/collator/src/main.rs | 6 +----- scripts/ci/gitlab/pipeline/zombienet.yml | 13 ++++++++++++- 4 files changed, 17 insertions(+), 14 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5a84bbfeba85..b2d91e61da94 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -33,12 +33,12 @@ variables: GIT_STRATEGY: fetch GIT_DEPTH: 100 CI_SERVER_NAME: "GitLab CI" - CI_IMAGE: !reference [.ci-unified, variables, CI_IMAGE] + CI_IMAGE: !reference [.ci-unified, variables, CI_IMAGE] BUILDAH_IMAGE: "quay.io/buildah/stable:v1.29" BUILDAH_COMMAND: "buildah --storage-driver overlay2" DOCKER_OS: "debian:stretch" ARCH: "x86_64" - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.55" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.65" default: cache: {} @@ -233,7 +233,7 @@ include: file: /common/timestamp.yml - project: parity/infrastructure/ci_cd/shared ref: main - file: /common/ci-unified.yml + file: /common/ci-unified.yml #### stage: .post diff --git a/parachain/test-parachains/adder/collator/src/main.rs b/parachain/test-parachains/adder/collator/src/main.rs index de1b37b50dab..ac135a2702a5 100644 --- a/parachain/test-parachains/adder/collator/src/main.rs +++ b/parachain/test-parachains/adder/collator/src/main.rs @@ -21,7 +21,6 @@ use polkadot_node_primitives::CollationGenerationConfig; use polkadot_node_subsystem::messages::{CollationGenerationMessage, CollatorProtocolMessage}; use polkadot_primitives::Id as ParaId; use sc_cli::{Error as SubstrateCliError, SubstrateCli}; -use sc_service::Role; use sp_core::hexdisplay::HexDisplay; use test_parachain_adder_collator::Collator; @@ -54,12 +53,9 @@ fn main() -> Result<()> { ) })?; - runner.run_node_until_exit(|mut config| async move { + runner.run_node_until_exit(|config| async move { let collator = Collator::new(); - // Zombienet is spawning all collators currently with the same CLI, this means it - // sets `--validator` and this is wrong here. - config.role = Role::Full; let full_node = polkadot_service::build_full( config, polkadot_service::NewFullParams { diff --git a/parachain/test-parachains/undying/collator/src/main.rs b/parachain/test-parachains/undying/collator/src/main.rs index 79420dbbc9d5..ac889d7a00e0 100644 --- a/parachain/test-parachains/undying/collator/src/main.rs +++ b/parachain/test-parachains/undying/collator/src/main.rs @@ -21,7 +21,6 @@ use polkadot_node_primitives::CollationGenerationConfig; use polkadot_node_subsystem::messages::{CollationGenerationMessage, CollatorProtocolMessage}; use polkadot_primitives::Id as ParaId; use sc_cli::{Error as SubstrateCliError, SubstrateCli}; -use sc_service::Role; use sp_core::hexdisplay::HexDisplay; use test_parachain_undying_collator::Collator; @@ -54,12 +53,9 @@ fn main() -> Result<()> { ) })?; - runner.run_node_until_exit(|mut config| async move { + runner.run_node_until_exit(|config| async move { let collator = Collator::new(cli.run.pov_size, cli.run.pvf_complexity); - // Zombienet is spawning all collators currently with the same CLI, this means it - // sets `--validator` and this is wrong here. - config.role = Role::Full; let full_node = polkadot_service::build_full( config, polkadot_service::NewFullParams { diff --git a/scripts/ci/gitlab/pipeline/zombienet.yml b/scripts/ci/gitlab/pipeline/zombienet.yml index cc4a7eb2ccc1..d7a12ad0723f 100644 --- a/scripts/ci/gitlab/pipeline/zombienet.yml +++ b/scripts/ci/gitlab/pipeline/zombienet.yml @@ -12,6 +12,7 @@ zombienet-tests-parachains-smoke-test: - job: publish-malus-image - job: publish-test-collators-image variables: + RUN_IN_CONTAINER: "1" GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/smoke" before_script: - echo "Zombie-net Tests Config" @@ -42,6 +43,7 @@ zombienet-tests-parachains-pvf: - job: publish-polkadot-debug-image - job: publish-test-collators-image variables: + RUN_IN_CONTAINER: "1" GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/functional" before_script: - echo "Zombie-net Tests Config" @@ -73,6 +75,7 @@ zombienet-tests-parachains-disputes: - job: publish-test-collators-image - job: publish-malus-image variables: + RUN_IN_CONTAINER: "1" GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/functional" before_script: - echo "Zombie-net Tests Config" @@ -104,6 +107,7 @@ zombienet-tests-parachains-disputes-garbage-candidate: - job: publish-test-collators-image - job: publish-malus-image variables: + RUN_IN_CONTAINER: "1" GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/functional" before_script: - echo "Zombie-net Tests Config" @@ -135,6 +139,7 @@ zombienet-tests-parachains-disputes-past-session: - job: publish-test-collators-image - job: publish-malus-image variables: + RUN_IN_CONTAINER: "1" GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/functional" before_script: - echo "Zombie-net Tests Config" @@ -167,6 +172,7 @@ zombienet-test-parachains-upgrade-smoke-test: - job: publish-malus-image - job: publish-test-collators-image variables: + RUN_IN_CONTAINER: "1" GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/smoke" before_script: - echo "ZombieNet Tests Config" @@ -197,6 +203,7 @@ zombienet-tests-misc-paritydb: - job: publish-test-collators-image artifacts: true variables: + RUN_IN_CONTAINER: "1" GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/misc" before_script: - echo "Zombie-net Tests Config" @@ -227,7 +234,8 @@ zombienet-tests-misc-upgrade-node: - job: build-linux-stable artifacts: true variables: - GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/misc" + RUN_IN_CONTAINER: "1" + GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/misc" before_script: - echo "Zombie-net Tests Config" - echo "${ZOMBIENET_IMAGE_NAME}" @@ -258,6 +266,7 @@ zombienet-tests-malus-dispute-valid: - job: publish-malus-image - job: publish-test-collators-image variables: + RUN_IN_CONTAINER: "1" GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/node/malus/integrationtests" before_script: - echo "Zombie-net Tests Config" @@ -288,6 +297,7 @@ zombienet-tests-deregister-register-validator: - job: publish-polkadot-debug-image artifacts: true variables: + RUN_IN_CONTAINER: "1" GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/smoke" before_script: - echo "Zombie-net Tests Config" @@ -315,6 +325,7 @@ zombienet-tests-beefy-and-mmr: needs: - job: publish-polkadot-debug-image variables: + RUN_IN_CONTAINER: "1" GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/functional" before_script: - echo "Zombie-net Tests Config" From d74c60b6489f31f437edb1a19de0d0f6b8ad432e Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Thu, 17 Aug 2023 15:57:38 +1000 Subject: [PATCH 24/27] cli: remove deprecated try-runtime subcommand (also companion for #14731) (#7599) * remove try-runtime-cli * fix ci pipeline * fix link * remove chain var * build runtime with try-runtime feature * use main branch * pin to commit * fix build * update lockfile for {"substrate"} --------- Co-authored-by: parity-processbot <> --- Cargo.lock | 368 +++++++++++++-------------- cli/src/cli.rs | 9 +- cli/src/command.rs | 64 +---- scripts/ci/gitlab/pipeline/check.yml | 6 +- 4 files changed, 194 insertions(+), 253 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 50e6128a7f0a..9073a8fb6297 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -778,7 +778,7 @@ dependencies = [ [[package]] name = "binary-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "hash-db", "log", @@ -2831,7 +2831,7 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "parity-scale-codec", ] @@ -2854,7 +2854,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-support", "frame-support-procedural", @@ -2879,7 +2879,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "Inflector", "array-bytes", @@ -2927,7 +2927,7 @@ dependencies = [ [[package]] name = "frame-election-provider-solution-type" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2938,7 +2938,7 @@ dependencies = [ [[package]] name = "frame-election-provider-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-election-provider-solution-type", "frame-support", @@ -2955,7 +2955,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-support", "frame-system", @@ -2984,7 +2984,7 @@ dependencies = [ [[package]] name = "frame-remote-externalities" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "async-recursion", "futures", @@ -3005,7 +3005,7 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "aquamarine", "bitflags 1.3.2", @@ -3043,7 +3043,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "Inflector", "cfg-expr", @@ -3061,7 +3061,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", @@ -3073,7 +3073,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "proc-macro2", "quote", @@ -3083,7 +3083,7 @@ dependencies = [ [[package]] name = "frame-support-test" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-executive", @@ -3110,7 +3110,7 @@ dependencies = [ [[package]] name = "frame-support-test-pallet" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-support", "frame-system", @@ -3123,7 +3123,7 @@ dependencies = [ [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "cfg-if", "frame-support", @@ -3142,7 +3142,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -3157,7 +3157,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "parity-scale-codec", "sp-api", @@ -3166,7 +3166,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-support", "parity-scale-codec", @@ -3341,7 +3341,7 @@ dependencies = [ [[package]] name = "generate-bags" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "chrono", "frame-election-provider-support", @@ -5345,7 +5345,7 @@ dependencies = [ [[package]] name = "mmr-gadget" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "futures", "log", @@ -5364,7 +5364,7 @@ dependencies = [ [[package]] name = "mmr-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "anyhow", "jsonrpsee", @@ -5901,7 +5901,7 @@ dependencies = [ [[package]] name = "pallet-assets" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -5916,7 +5916,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-support", "frame-system", @@ -5932,7 +5932,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-support", "frame-system", @@ -5946,7 +5946,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -5970,7 +5970,7 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "aquamarine", "docify", @@ -5992,7 +5992,7 @@ dependencies = [ [[package]] name = "pallet-bags-list-remote-tests" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-election-provider-support", "frame-remote-externalities", @@ -6011,7 +6011,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6026,7 +6026,7 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-support", "frame-system", @@ -6045,7 +6045,7 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "array-bytes", "binary-merkle-tree", @@ -6069,7 +6069,7 @@ dependencies = [ [[package]] name = "pallet-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6087,7 +6087,7 @@ dependencies = [ [[package]] name = "pallet-child-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6106,7 +6106,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6123,7 +6123,7 @@ dependencies = [ [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "assert_matches", "frame-benchmarking", @@ -6140,7 +6140,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6158,7 +6158,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6181,7 +6181,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-support-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6194,7 +6194,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6213,7 +6213,7 @@ dependencies = [ [[package]] name = "pallet-fast-unstake" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "docify", "frame-benchmarking", @@ -6232,7 +6232,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6255,7 +6255,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "enumflags2", "frame-benchmarking", @@ -6271,7 +6271,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6291,7 +6291,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6308,7 +6308,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6325,7 +6325,7 @@ dependencies = [ [[package]] name = "pallet-message-queue" version = "7.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6344,7 +6344,7 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6361,7 +6361,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6377,7 +6377,7 @@ dependencies = [ [[package]] name = "pallet-nis" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6393,7 +6393,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-support", "frame-system", @@ -6412,7 +6412,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-benchmarking" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6432,7 +6432,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-runtime-api" version = "1.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "pallet-nomination-pools", "parity-scale-codec", @@ -6443,7 +6443,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-support", "frame-system", @@ -6460,7 +6460,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6484,7 +6484,7 @@ dependencies = [ [[package]] name = "pallet-preimage" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6501,7 +6501,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6516,7 +6516,7 @@ dependencies = [ [[package]] name = "pallet-ranked-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6534,7 +6534,7 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6549,7 +6549,7 @@ dependencies = [ [[package]] name = "pallet-referenda" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "assert_matches", "frame-benchmarking", @@ -6568,7 +6568,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "docify", "frame-benchmarking", @@ -6586,7 +6586,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-support", "frame-system", @@ -6607,7 +6607,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6623,7 +6623,7 @@ dependencies = [ [[package]] name = "pallet-society" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6641,7 +6641,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6664,7 +6664,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -6675,7 +6675,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-fn" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "log", "sp-arithmetic", @@ -6684,7 +6684,7 @@ dependencies = [ [[package]] name = "pallet-staking-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "parity-scale-codec", "sp-api", @@ -6693,7 +6693,7 @@ dependencies = [ [[package]] name = "pallet-state-trie-migration" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6710,7 +6710,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6725,7 +6725,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6743,7 +6743,7 @@ dependencies = [ [[package]] name = "pallet-tips" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6762,7 +6762,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-support", "frame-system", @@ -6778,7 +6778,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -6794,7 +6794,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -6806,7 +6806,7 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6823,7 +6823,7 @@ dependencies = [ [[package]] name = "pallet-uniques" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6838,7 +6838,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6854,7 +6854,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -6869,7 +6869,7 @@ dependencies = [ [[package]] name = "pallet-whitelist" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-benchmarking", "frame-support", @@ -10031,7 +10031,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "log", "sp-core", @@ -10042,7 +10042,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "async-trait", "futures", @@ -10070,7 +10070,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "futures", "futures-timer", @@ -10093,7 +10093,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -10108,7 +10108,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "memmap2", "sc-chain-spec-derive", @@ -10127,7 +10127,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -10138,7 +10138,7 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "array-bytes", "chrono", @@ -10177,7 +10177,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "fnv", "futures", @@ -10203,7 +10203,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "hash-db", "kvdb", @@ -10229,7 +10229,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "async-trait", "futures", @@ -10254,7 +10254,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "async-trait", "fork-tree", @@ -10290,7 +10290,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "futures", "jsonrpsee", @@ -10312,7 +10312,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "array-bytes", "async-channel", @@ -10346,7 +10346,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "futures", "jsonrpsee", @@ -10365,7 +10365,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "fork-tree", "parity-scale-codec", @@ -10378,7 +10378,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "ahash 0.8.3", "array-bytes", @@ -10419,7 +10419,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "finality-grandpa", "futures", @@ -10439,7 +10439,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "async-trait", "futures", @@ -10462,7 +10462,7 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -10484,7 +10484,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "sc-allocator", "sp-maybe-compressed-blob", @@ -10496,7 +10496,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "anyhow", "cfg-if", @@ -10513,7 +10513,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "ansi_term", "futures", @@ -10529,7 +10529,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "array-bytes", "parking_lot 0.12.1", @@ -10543,7 +10543,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "array-bytes", "async-channel", @@ -10584,7 +10584,7 @@ dependencies = [ [[package]] name = "sc-network-bitswap" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "async-channel", "cid", @@ -10604,7 +10604,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "async-trait", "bitflags 1.3.2", @@ -10621,7 +10621,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "ahash 0.8.3", "futures", @@ -10639,7 +10639,7 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "array-bytes", "async-channel", @@ -10660,7 +10660,7 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "array-bytes", "async-channel", @@ -10694,7 +10694,7 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "array-bytes", "futures", @@ -10712,7 +10712,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "array-bytes", "bytes", @@ -10746,7 +10746,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -10755,7 +10755,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "futures", "jsonrpsee", @@ -10786,7 +10786,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -10805,7 +10805,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "http", "jsonrpsee", @@ -10820,7 +10820,7 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "array-bytes", "futures", @@ -10848,7 +10848,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "async-trait", "directories", @@ -10912,7 +10912,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "log", "parity-scale-codec", @@ -10923,7 +10923,7 @@ dependencies = [ [[package]] name = "sc-storage-monitor" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "clap 4.3.19", "fs4", @@ -10937,7 +10937,7 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -10956,7 +10956,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "6.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "futures", "libc", @@ -10975,7 +10975,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "chrono", "futures", @@ -10994,7 +10994,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "ansi_term", "atty", @@ -11023,7 +11023,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -11034,7 +11034,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "async-trait", "futures", @@ -11060,7 +11060,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "async-trait", "futures", @@ -11076,7 +11076,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "async-channel", "futures", @@ -11660,7 +11660,7 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "hash-db", "log", @@ -11681,7 +11681,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "Inflector", "blake2", @@ -11695,7 +11695,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "parity-scale-codec", "scale-info", @@ -11708,7 +11708,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "16.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "integer-sqrt", "num-traits", @@ -11722,7 +11722,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "parity-scale-codec", "scale-info", @@ -11735,7 +11735,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "sp-api", "sp-inherents", @@ -11746,7 +11746,7 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "futures", "log", @@ -11764,7 +11764,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "async-trait", "futures", @@ -11779,7 +11779,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "async-trait", "parity-scale-codec", @@ -11796,7 +11796,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "async-trait", "parity-scale-codec", @@ -11815,7 +11815,7 @@ dependencies = [ [[package]] name = "sp-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "lazy_static", "parity-scale-codec", @@ -11834,7 +11834,7 @@ dependencies = [ [[package]] name = "sp-consensus-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "finality-grandpa", "log", @@ -11852,7 +11852,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "parity-scale-codec", "scale-info", @@ -11864,7 +11864,7 @@ dependencies = [ [[package]] name = "sp-core" version = "21.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "array-bytes", "arrayvec 0.7.4", @@ -11911,7 +11911,7 @@ dependencies = [ [[package]] name = "sp-core-hashing" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "blake2b_simd", "byteorder", @@ -11924,7 +11924,7 @@ dependencies = [ [[package]] name = "sp-core-hashing-proc-macro" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "quote", "sp-core-hashing", @@ -11934,7 +11934,7 @@ dependencies = [ [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "kvdb", "parking_lot 0.12.1", @@ -11943,7 +11943,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "proc-macro2", "quote", @@ -11953,7 +11953,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.19.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "environmental", "parity-scale-codec", @@ -11964,7 +11964,7 @@ dependencies = [ [[package]] name = "sp-genesis-builder" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "serde_json", "sp-api", @@ -11975,7 +11975,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -11989,7 +11989,7 @@ dependencies = [ [[package]] name = "sp-io" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "bytes", "ed25519", @@ -12014,7 +12014,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "lazy_static", "sp-core", @@ -12025,7 +12025,7 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.27.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -12037,7 +12037,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "thiserror", "zstd 0.12.4", @@ -12046,7 +12046,7 @@ dependencies = [ [[package]] name = "sp-metadata-ir" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-metadata", "parity-scale-codec", @@ -12057,7 +12057,7 @@ dependencies = [ [[package]] name = "sp-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "ckb-merkle-mountain-range", "log", @@ -12075,7 +12075,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "parity-scale-codec", "scale-info", @@ -12089,7 +12089,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "sp-api", "sp-core", @@ -12099,7 +12099,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "backtrace", "lazy_static", @@ -12109,7 +12109,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "rustc-hash", "serde", @@ -12119,7 +12119,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "either", "hash256-std-hasher", @@ -12141,7 +12141,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "17.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -12159,7 +12159,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "Inflector", "proc-macro-crate", @@ -12171,7 +12171,7 @@ dependencies = [ [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "parity-scale-codec", "scale-info", @@ -12186,7 +12186,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -12200,7 +12200,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.28.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "hash-db", "log", @@ -12221,7 +12221,7 @@ dependencies = [ [[package]] name = "sp-statement-store" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "aes-gcm 0.10.2", "curve25519-dalek 3.2.0", @@ -12245,12 +12245,12 @@ dependencies = [ [[package]] name = "sp-std" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" [[package]] name = "sp-storage" version = "13.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "impl-serde", "parity-scale-codec", @@ -12263,7 +12263,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "async-trait", "parity-scale-codec", @@ -12276,7 +12276,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "10.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "parity-scale-codec", "sp-std", @@ -12288,7 +12288,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "sp-api", "sp-runtime", @@ -12297,7 +12297,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "async-trait", "parity-scale-codec", @@ -12312,7 +12312,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "ahash 0.8.3", "hash-db", @@ -12335,7 +12335,7 @@ dependencies = [ [[package]] name = "sp-version" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "impl-serde", "parity-scale-codec", @@ -12352,7 +12352,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "parity-scale-codec", "proc-macro2", @@ -12363,7 +12363,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "14.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -12376,7 +12376,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "20.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "parity-scale-codec", "scale-info", @@ -12617,12 +12617,12 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "frame-system-rpc-runtime-api", "futures", @@ -12641,7 +12641,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "hyper", "log", @@ -12653,7 +12653,7 @@ dependencies = [ [[package]] name = "substrate-rpc-client" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "async-trait", "jsonrpsee", @@ -12666,7 +12666,7 @@ dependencies = [ [[package]] name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -12683,7 +12683,7 @@ dependencies = [ [[package]] name = "substrate-test-client" version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "array-bytes", "async-trait", @@ -12709,7 +12709,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "futures", "substrate-test-utils-derive", @@ -12719,7 +12719,7 @@ dependencies = [ [[package]] name = "substrate-test-utils-derive" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -12730,7 +12730,7 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "ansi_term", "build-helper", @@ -13638,7 +13638,7 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "try-runtime-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#e53cf32cce1f1f9416e09183b2b0dbdb0a50367d" +source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" dependencies = [ "async-trait", "clap 4.3.19", diff --git a/cli/src/cli.rs b/cli/src/cli.rs index 696d381962b6..66205902b79d 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -55,12 +55,9 @@ pub enum Subcommand { /// capabilities of running a validator. HostPerfCheck, - /// Try some command against runtime state. - #[cfg(feature = "try-runtime")] - TryRuntime(try_runtime_cli::TryRuntimeCmd), - - /// Try some command against runtime state. Note: `try-runtime` feature must be enabled. - #[cfg(not(feature = "try-runtime"))] + /// Try-runtime has migrated to a standalone CLI + /// (). The subcommand exists as a stub and + /// deprecation notice. It will be removed entirely some time after Janurary 2024. TryRuntime, /// Key management CLI utilities diff --git a/cli/src/command.rs b/cli/src/command.rs index dd76ed558695..0fbeafb99a07 100644 --- a/cli/src/command.rs +++ b/cli/src/command.rs @@ -531,68 +531,12 @@ pub fn run() -> Result<()> { }, Some(Subcommand::Key(cmd)) => Ok(cmd.run(&cli)?), #[cfg(feature = "try-runtime")] - Some(Subcommand::TryRuntime(cmd)) => { - use sc_service::TaskManager; - use try_runtime_cli::block_building_info::timestamp_with_babe_info; - - let runner = cli.create_runner(cmd)?; - let chain_spec = &runner.config().chain_spec; - set_default_ss58_version(chain_spec); - - let registry = &runner.config().prometheus_config.as_ref().map(|cfg| &cfg.registry); - let task_manager = TaskManager::new(runner.config().tokio_handle.clone(), *registry) - .map_err(|e| Error::SubstrateService(sc_service::Error::Prometheus(e)))?; - - ensure_dev(chain_spec).map_err(Error::Other)?; - - #[cfg(feature = "kusama-native")] - if chain_spec.is_kusama() { - return runner.async_run(|_| { - Ok(( - cmd.run::( - Some(timestamp_with_babe_info(service::kusama_runtime_constants::time::MILLISECS_PER_BLOCK)) - ) - .map_err(Error::SubstrateCli), - task_manager, - )) - }) - } - - #[cfg(feature = "westend-native")] - if chain_spec.is_westend() { - return runner.async_run(|_| { - Ok(( - cmd.run::( - Some(timestamp_with_babe_info(service::westend_runtime_constants::time::MILLISECS_PER_BLOCK)) - ) - .map_err(Error::SubstrateCli), - task_manager, - )) - }) - } - // else we assume it is polkadot. - #[cfg(feature = "polkadot-native")] - { - return runner.async_run(|_| { - Ok(( - cmd.run::( - Some(timestamp_with_babe_info(service::polkadot_runtime_constants::time::MILLISECS_PER_BLOCK)) - ) - .map_err(Error::SubstrateCli), - task_manager, - )) - }) - } - #[cfg(not(feature = "polkadot-native"))] - panic!("No runtime feature (polkadot, kusama, westend, rococo) is enabled") - }, + Some(Subcommand::TryRuntime) => Err(try_runtime_cli::DEPRECATION_NOTICE.to_owned().into()), #[cfg(not(feature = "try-runtime"))] - Some(Subcommand::TryRuntime) => Err(Error::Other( - "TryRuntime wasn't enabled when building the node. \ + Some(Subcommand::TryRuntime) => Err("TryRuntime wasn't enabled when building the node. \ You can enable it with `--features try-runtime`." - .into(), - ) - .into()), + .to_owned() + .into()), Some(Subcommand::ChainInfo(cmd)) => { let runner = cli.create_runner(cmd)?; Ok(runner.sync_run(|config| cmd.run::(&config))?) diff --git a/scripts/ci/gitlab/pipeline/check.yml b/scripts/ci/gitlab/pipeline/check.yml index a9076c9318cd..9b2ad5e73833 100644 --- a/scripts/ci/gitlab/pipeline/check.yml +++ b/scripts/ci/gitlab/pipeline/check.yml @@ -62,10 +62,10 @@ check-try-runtime: - | export RUST_LOG=remote-ext=debug,runtime=debug echo "---------- Running try-runtime for ${NETWORK} ----------" - time cargo build --release --locked -p "$NETWORK"-runtime - time cargo run --locked --release --features try-runtime try-runtime \ + time cargo install --locked --git https://github.com/paritytech/try-runtime-cli --rev a93c9b5abe5d31a4cf1936204f7e5c489184b521 + time cargo build --release --locked -p "$NETWORK"-runtime --features try-runtime + time try-runtime \ --runtime ./target/release/wbuild/"$NETWORK"-runtime/target/wasm32-unknown-unknown/release/"$NETWORK"_runtime.wasm \ - --chain=${NETWORK}-dev \ on-runtime-upgrade --checks=pre-and-post live --uri wss://${NETWORK}-try-runtime-node.parity-chains.parity.io:443 check-runtime-migration-polkadot: From eb7078cc9917f5696313ffb2b989572f2e5f4ec6 Mon Sep 17 00:00:00 2001 From: Gavin Wood Date: Thu, 17 Aug 2023 13:06:39 +0200 Subject: [PATCH 25/27] Polkadot gets topic-based message IDs (#7301) * Polkadot gets topics * Formatting * Fixes --------- Co-authored-by: Keith Yeung --- runtime/polkadot/src/xcm_config.rs | 12 ++++++------ xcm/src/v3/junction.rs | 1 + 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/runtime/polkadot/src/xcm_config.rs b/runtime/polkadot/src/xcm_config.rs index faae2e1d2619..b2e09b1453af 100644 --- a/runtime/polkadot/src/xcm_config.rs +++ b/runtime/polkadot/src/xcm_config.rs @@ -43,8 +43,8 @@ use xcm_builder::{ AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, BackingToPlurality, ChildParachainAsNative, ChildParachainConvertsVia, CurrencyAdapter as XcmCurrencyAdapter, IsConcrete, MintLocation, OriginToPluralityVoice, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, UsingComponents, - WeightInfoBounds, WithComputedOrigin, + SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, + UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, }; use xcm_executor::traits::WithOriginFilter; @@ -120,14 +120,14 @@ parameter_types! { /// The XCM router. When we want to send an XCM message, we use this type. It amalgamates all of our /// individual routers. -pub type XcmRouter = ( +pub type XcmRouter = WithUniqueTopic<( // Only one router so far - use DMP to communicate with child parachains. ChildParachainRouter< Runtime, XcmPallet, ExponentialPrice, >, -); +)>; parameter_types! { pub const Dot: MultiAssetFilter = Wild(AllOf { fun: WildFungible, id: Concrete(TokenLocation::get()) }); @@ -153,7 +153,7 @@ match_types! { } /// The barriers one of which must be passed for an XCM message to be executed. -pub type Barrier = ( +pub type Barrier = TrailingSetTopicAsId<( // Weight that is paid for may be consumed. TakeWeightCredit, // Expected responses are OK. @@ -170,7 +170,7 @@ pub type Barrier = ( UniversalLocation, ConstU32<8>, >, -); +)>; /// A call filter for the XCM Transact instruction. This is a temporary measure until we /// properly account for proof size weights. diff --git a/xcm/src/v3/junction.rs b/xcm/src/v3/junction.rs index ae66e2b33364..b5dd5bc7c88f 100644 --- a/xcm/src/v3/junction.rs +++ b/xcm/src/v3/junction.rs @@ -288,6 +288,7 @@ pub enum Junction { /// An instanced, indexed pallet that forms a constituent part of the context. /// /// Generally used when the context is a Frame-based chain. + // TODO XCMv4 inner should be `Compact`. PalletInstance(u8), /// A non-descript index within the context location. /// From 046c43b97e46bc843fab277a497504275a5f5407 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 17 Aug 2023 13:26:44 +0200 Subject: [PATCH 26/27] Companion for Substrate#14612 (#7536) * Companion for reworking the storage transaction cache * Cargo lock * Fixes * update lockfile for {"substrate"} --------- Co-authored-by: parity-processbot <> --- Cargo.lock | 372 +++++++++--------- node/service/Cargo.toml | 1 - node/service/src/lib.rs | 14 +- utils/staking-miner/Cargo.toml | 2 +- utils/staking-miner/src/dry_run.rs | 2 +- utils/staking-miner/src/emergency_solution.rs | 2 +- utils/staking-miner/src/main.rs | 11 +- utils/staking-miner/src/monitor.rs | 2 +- utils/staking-miner/src/prelude.rs | 4 +- 9 files changed, 200 insertions(+), 210 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9073a8fb6297..0cf0541fa5f7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -778,7 +778,7 @@ dependencies = [ [[package]] name = "binary-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "hash-db", "log", @@ -2831,7 +2831,7 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "parity-scale-codec", ] @@ -2854,7 +2854,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-support", "frame-support-procedural", @@ -2879,7 +2879,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "Inflector", "array-bytes", @@ -2927,7 +2927,7 @@ dependencies = [ [[package]] name = "frame-election-provider-solution-type" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2938,7 +2938,7 @@ dependencies = [ [[package]] name = "frame-election-provider-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-election-provider-solution-type", "frame-support", @@ -2955,7 +2955,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-support", "frame-system", @@ -2984,7 +2984,7 @@ dependencies = [ [[package]] name = "frame-remote-externalities" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "async-recursion", "futures", @@ -2996,6 +2996,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", + "sp-state-machine", "spinners", "substrate-rpc-client", "tokio", @@ -3005,7 +3006,7 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "aquamarine", "bitflags 1.3.2", @@ -3043,7 +3044,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "Inflector", "cfg-expr", @@ -3061,7 +3062,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", @@ -3073,7 +3074,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "proc-macro2", "quote", @@ -3083,7 +3084,7 @@ dependencies = [ [[package]] name = "frame-support-test" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-executive", @@ -3110,7 +3111,7 @@ dependencies = [ [[package]] name = "frame-support-test-pallet" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-support", "frame-system", @@ -3123,7 +3124,7 @@ dependencies = [ [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "cfg-if", "frame-support", @@ -3142,7 +3143,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -3157,7 +3158,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "parity-scale-codec", "sp-api", @@ -3166,7 +3167,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-support", "parity-scale-codec", @@ -3341,7 +3342,7 @@ dependencies = [ [[package]] name = "generate-bags" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "chrono", "frame-election-provider-support", @@ -5345,7 +5346,7 @@ dependencies = [ [[package]] name = "mmr-gadget" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "futures", "log", @@ -5364,7 +5365,7 @@ dependencies = [ [[package]] name = "mmr-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "anyhow", "jsonrpsee", @@ -5901,7 +5902,7 @@ dependencies = [ [[package]] name = "pallet-assets" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -5916,7 +5917,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-support", "frame-system", @@ -5932,7 +5933,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-support", "frame-system", @@ -5946,7 +5947,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -5970,7 +5971,7 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "aquamarine", "docify", @@ -5992,7 +5993,7 @@ dependencies = [ [[package]] name = "pallet-bags-list-remote-tests" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-election-provider-support", "frame-remote-externalities", @@ -6011,7 +6012,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6026,7 +6027,7 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-support", "frame-system", @@ -6045,7 +6046,7 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "array-bytes", "binary-merkle-tree", @@ -6069,7 +6070,7 @@ dependencies = [ [[package]] name = "pallet-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6087,7 +6088,7 @@ dependencies = [ [[package]] name = "pallet-child-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6106,7 +6107,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6123,7 +6124,7 @@ dependencies = [ [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "assert_matches", "frame-benchmarking", @@ -6140,7 +6141,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6158,7 +6159,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6181,7 +6182,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-support-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6194,7 +6195,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6213,7 +6214,7 @@ dependencies = [ [[package]] name = "pallet-fast-unstake" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "docify", "frame-benchmarking", @@ -6232,7 +6233,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6255,7 +6256,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "enumflags2", "frame-benchmarking", @@ -6271,7 +6272,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6291,7 +6292,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6308,7 +6309,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6325,7 +6326,7 @@ dependencies = [ [[package]] name = "pallet-message-queue" version = "7.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6344,7 +6345,7 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6361,7 +6362,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6377,7 +6378,7 @@ dependencies = [ [[package]] name = "pallet-nis" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6393,7 +6394,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-support", "frame-system", @@ -6412,7 +6413,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-benchmarking" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6432,7 +6433,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-runtime-api" version = "1.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "pallet-nomination-pools", "parity-scale-codec", @@ -6443,7 +6444,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-support", "frame-system", @@ -6460,7 +6461,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6484,7 +6485,7 @@ dependencies = [ [[package]] name = "pallet-preimage" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6501,7 +6502,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6516,7 +6517,7 @@ dependencies = [ [[package]] name = "pallet-ranked-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6534,7 +6535,7 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6549,7 +6550,7 @@ dependencies = [ [[package]] name = "pallet-referenda" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "assert_matches", "frame-benchmarking", @@ -6568,7 +6569,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "docify", "frame-benchmarking", @@ -6586,7 +6587,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-support", "frame-system", @@ -6607,7 +6608,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6623,7 +6624,7 @@ dependencies = [ [[package]] name = "pallet-society" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6641,7 +6642,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -6664,7 +6665,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -6675,7 +6676,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-fn" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "log", "sp-arithmetic", @@ -6684,7 +6685,7 @@ dependencies = [ [[package]] name = "pallet-staking-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "parity-scale-codec", "sp-api", @@ -6693,7 +6694,7 @@ dependencies = [ [[package]] name = "pallet-state-trie-migration" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6710,7 +6711,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6725,7 +6726,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6743,7 +6744,7 @@ dependencies = [ [[package]] name = "pallet-tips" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6762,7 +6763,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-support", "frame-system", @@ -6778,7 +6779,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -6794,7 +6795,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -6806,7 +6807,7 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6823,7 +6824,7 @@ dependencies = [ [[package]] name = "pallet-uniques" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6838,7 +6839,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6854,7 +6855,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -6869,7 +6870,7 @@ dependencies = [ [[package]] name = "pallet-whitelist" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-benchmarking", "frame-support", @@ -8590,7 +8591,6 @@ dependencies = [ "sp-storage", "sp-timestamp", "sp-transaction-pool", - "sp-trie", "sp-version", "sp-weights", "substrate-prometheus-endpoint", @@ -10031,7 +10031,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "log", "sp-core", @@ -10042,7 +10042,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "async-trait", "futures", @@ -10070,7 +10070,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "futures", "futures-timer", @@ -10093,7 +10093,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -10108,7 +10108,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "memmap2", "sc-chain-spec-derive", @@ -10127,7 +10127,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -10138,7 +10138,7 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "array-bytes", "chrono", @@ -10177,7 +10177,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "fnv", "futures", @@ -10203,7 +10203,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "hash-db", "kvdb", @@ -10229,7 +10229,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "async-trait", "futures", @@ -10254,7 +10254,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "async-trait", "fork-tree", @@ -10290,7 +10290,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "futures", "jsonrpsee", @@ -10312,7 +10312,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "array-bytes", "async-channel", @@ -10346,7 +10346,7 @@ dependencies = [ [[package]] name = "sc-consensus-beefy-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "futures", "jsonrpsee", @@ -10365,7 +10365,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "fork-tree", "parity-scale-codec", @@ -10378,7 +10378,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "ahash 0.8.3", "array-bytes", @@ -10419,7 +10419,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "finality-grandpa", "futures", @@ -10439,7 +10439,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "async-trait", "futures", @@ -10462,7 +10462,7 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -10484,7 +10484,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "sc-allocator", "sp-maybe-compressed-blob", @@ -10496,7 +10496,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "anyhow", "cfg-if", @@ -10513,7 +10513,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "ansi_term", "futures", @@ -10529,7 +10529,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "array-bytes", "parking_lot 0.12.1", @@ -10543,7 +10543,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "array-bytes", "async-channel", @@ -10584,7 +10584,7 @@ dependencies = [ [[package]] name = "sc-network-bitswap" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "async-channel", "cid", @@ -10604,7 +10604,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "async-trait", "bitflags 1.3.2", @@ -10621,7 +10621,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "ahash 0.8.3", "futures", @@ -10639,7 +10639,7 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "array-bytes", "async-channel", @@ -10660,7 +10660,7 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "array-bytes", "async-channel", @@ -10694,7 +10694,7 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "array-bytes", "futures", @@ -10712,7 +10712,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "array-bytes", "bytes", @@ -10746,7 +10746,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -10755,7 +10755,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "futures", "jsonrpsee", @@ -10786,7 +10786,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -10805,7 +10805,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "http", "jsonrpsee", @@ -10820,7 +10820,7 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "array-bytes", "futures", @@ -10848,7 +10848,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "async-trait", "directories", @@ -10912,7 +10912,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "log", "parity-scale-codec", @@ -10923,7 +10923,7 @@ dependencies = [ [[package]] name = "sc-storage-monitor" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "clap 4.3.19", "fs4", @@ -10937,7 +10937,7 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -10956,7 +10956,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "6.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "futures", "libc", @@ -10975,7 +10975,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "chrono", "futures", @@ -10994,7 +10994,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "ansi_term", "atty", @@ -11023,7 +11023,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -11034,7 +11034,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "async-trait", "futures", @@ -11060,7 +11060,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "async-trait", "futures", @@ -11076,7 +11076,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "async-channel", "futures", @@ -11660,7 +11660,7 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "hash-db", "log", @@ -11681,7 +11681,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "Inflector", "blake2", @@ -11695,7 +11695,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "parity-scale-codec", "scale-info", @@ -11708,7 +11708,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "16.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "integer-sqrt", "num-traits", @@ -11722,7 +11722,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "parity-scale-codec", "scale-info", @@ -11735,7 +11735,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "sp-api", "sp-inherents", @@ -11746,7 +11746,7 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "futures", "log", @@ -11764,7 +11764,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "async-trait", "futures", @@ -11779,7 +11779,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "async-trait", "parity-scale-codec", @@ -11796,7 +11796,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "async-trait", "parity-scale-codec", @@ -11815,7 +11815,7 @@ dependencies = [ [[package]] name = "sp-consensus-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "lazy_static", "parity-scale-codec", @@ -11834,7 +11834,7 @@ dependencies = [ [[package]] name = "sp-consensus-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "finality-grandpa", "log", @@ -11852,7 +11852,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "parity-scale-codec", "scale-info", @@ -11864,7 +11864,7 @@ dependencies = [ [[package]] name = "sp-core" version = "21.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "array-bytes", "arrayvec 0.7.4", @@ -11911,7 +11911,7 @@ dependencies = [ [[package]] name = "sp-core-hashing" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "blake2b_simd", "byteorder", @@ -11924,7 +11924,7 @@ dependencies = [ [[package]] name = "sp-core-hashing-proc-macro" version = "9.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "quote", "sp-core-hashing", @@ -11934,7 +11934,7 @@ dependencies = [ [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "kvdb", "parking_lot 0.12.1", @@ -11943,7 +11943,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "proc-macro2", "quote", @@ -11953,7 +11953,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.19.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "environmental", "parity-scale-codec", @@ -11964,7 +11964,7 @@ dependencies = [ [[package]] name = "sp-genesis-builder" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "serde_json", "sp-api", @@ -11975,7 +11975,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -11989,7 +11989,7 @@ dependencies = [ [[package]] name = "sp-io" version = "23.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "bytes", "ed25519", @@ -12014,7 +12014,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "lazy_static", "sp-core", @@ -12025,7 +12025,7 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.27.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -12037,7 +12037,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "thiserror", "zstd 0.12.4", @@ -12046,7 +12046,7 @@ dependencies = [ [[package]] name = "sp-metadata-ir" version = "0.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-metadata", "parity-scale-codec", @@ -12057,7 +12057,7 @@ dependencies = [ [[package]] name = "sp-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "ckb-merkle-mountain-range", "log", @@ -12075,7 +12075,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "parity-scale-codec", "scale-info", @@ -12089,7 +12089,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "sp-api", "sp-core", @@ -12099,7 +12099,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "backtrace", "lazy_static", @@ -12109,7 +12109,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "rustc-hash", "serde", @@ -12119,7 +12119,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "24.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "either", "hash256-std-hasher", @@ -12141,7 +12141,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "17.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -12159,7 +12159,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "Inflector", "proc-macro-crate", @@ -12171,7 +12171,7 @@ dependencies = [ [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "parity-scale-codec", "scale-info", @@ -12186,7 +12186,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -12200,7 +12200,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.28.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "hash-db", "log", @@ -12221,7 +12221,7 @@ dependencies = [ [[package]] name = "sp-statement-store" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "aes-gcm 0.10.2", "curve25519-dalek 3.2.0", @@ -12245,12 +12245,12 @@ dependencies = [ [[package]] name = "sp-std" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" [[package]] name = "sp-storage" version = "13.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "impl-serde", "parity-scale-codec", @@ -12263,7 +12263,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "async-trait", "parity-scale-codec", @@ -12276,7 +12276,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "10.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "parity-scale-codec", "sp-std", @@ -12288,7 +12288,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "sp-api", "sp-runtime", @@ -12297,7 +12297,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "async-trait", "parity-scale-codec", @@ -12312,7 +12312,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "ahash 0.8.3", "hash-db", @@ -12335,7 +12335,7 @@ dependencies = [ [[package]] name = "sp-version" version = "22.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "impl-serde", "parity-scale-codec", @@ -12352,7 +12352,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "8.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "parity-scale-codec", "proc-macro2", @@ -12363,7 +12363,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "14.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -12376,7 +12376,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "20.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "parity-scale-codec", "scale-info", @@ -12476,9 +12476,9 @@ dependencies = [ "signal-hook", "signal-hook-tokio", "sp-core", - "sp-io", "sp-npos-elections", "sp-runtime", + "sp-state-machine", "sp-version", "sub-tokens", "thiserror", @@ -12617,12 +12617,12 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "frame-system-rpc-runtime-api", "futures", @@ -12641,7 +12641,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "hyper", "log", @@ -12653,7 +12653,7 @@ dependencies = [ [[package]] name = "substrate-rpc-client" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "async-trait", "jsonrpsee", @@ -12666,7 +12666,7 @@ dependencies = [ [[package]] name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -12683,7 +12683,7 @@ dependencies = [ [[package]] name = "substrate-test-client" version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "array-bytes", "async-trait", @@ -12709,7 +12709,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "futures", "substrate-test-utils-derive", @@ -12719,7 +12719,7 @@ dependencies = [ [[package]] name = "substrate-test-utils-derive" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -12730,7 +12730,7 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "ansi_term", "build-helper", @@ -13638,7 +13638,7 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "try-runtime-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#aa70241b841b96a3cfd3580a3d378a6d3d01b1ec" +source = "git+https://github.com/paritytech/substrate?branch=master#ecd503d49029236cc43d6de8c5068188c0fc1ef8" dependencies = [ "async-trait", "clap 4.3.19", diff --git a/node/service/Cargo.toml b/node/service/Cargo.toml index 48e4633aa5e3..8cd9e4434bbd 100644 --- a/node/service/Cargo.toml +++ b/node/service/Cargo.toml @@ -53,7 +53,6 @@ sp-session = { git = "https://github.com/paritytech/substrate", branch = "master sp-storage = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" } pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index 3a850c46279a..2e46bf7329e2 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -56,7 +56,6 @@ use { sc_client_api::BlockBackend, sc_transaction_pool_api::OffchainTransactionPoolFactory, sp_core::traits::SpawnNamed, - sp_trie::PrefixedMemoryDB, }; use polkadot_node_subsystem_util::database::Database; @@ -475,7 +474,7 @@ fn new_partial( FullClient, FullBackend, ChainSelection, - sc_consensus::DefaultImportQueue, + sc_consensus::DefaultImportQueue, sc_transaction_pool::FullPool, ( impl Fn( @@ -1321,15 +1320,8 @@ macro_rules! chain_ops { pub fn new_chain_ops( config: &mut Configuration, jaeger_agent: Option, -) -> Result< - ( - Arc, - Arc, - sc_consensus::BasicQueue>, - TaskManager, - ), - Error, -> { +) -> Result<(Arc, Arc, sc_consensus::BasicQueue, TaskManager), Error> +{ config.keystore = service::config::KeystoreConfig::InMemory; if config.chain_spec.is_rococo() || diff --git a/utils/staking-miner/Cargo.toml b/utils/staking-miner/Cargo.toml index 05b0ddefaee9..f63425bb74e4 100644 --- a/utils/staking-miner/Cargo.toml +++ b/utils/staking-miner/Cargo.toml @@ -25,7 +25,7 @@ remote-externalities = { git = "https://github.com/paritytech/substrate", branch signal-hook-tokio = { version = "0.3", features = ["futures-v0_3"] } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-version = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-npos-elections = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-transaction-pool-api = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/utils/staking-miner/src/dry_run.rs b/utils/staking-miner/src/dry_run.rs index 4d09306a6ef5..7e46f630a1f5 100644 --- a/utils/staking-miner/src/dry_run.rs +++ b/utils/staking-miner/src/dry_run.rs @@ -120,7 +120,7 @@ macro_rules! dry_run_cmd_for { ($runtime:ident) => { paste::paste! { } else { Default::default() }; - let mut ext = crate::create_election_ext::(rpc.clone(), config.at, pallets).await?; + let mut ext = crate::create_election_ext::(rpc.clone(), config.at, pallets).await?; if config.force_snapshot { force_create_snapshot::(&mut ext)?; }; diff --git a/utils/staking-miner/src/emergency_solution.rs b/utils/staking-miner/src/emergency_solution.rs index 85487f7e40df..9ea9f90756e2 100644 --- a/utils/staking-miner/src/emergency_solution.rs +++ b/utils/staking-miner/src/emergency_solution.rs @@ -28,7 +28,7 @@ macro_rules! emergency_solution_cmd_for { ($runtime:ident) => { paste::paste! { ) -> Result<(), Error<$crate::[<$runtime _runtime_exports>]::Runtime>> { use $crate::[<$runtime _runtime_exports>]::*; - let mut ext = crate::create_election_ext::(client, config.at, vec![]).await?; + let mut ext = crate::create_election_ext::(client, config.at, vec![]).await?; let raw_solution = crate::mine_with::(&config.solver, &mut ext, false)?; ext.execute_with(|| { diff --git a/utils/staking-miner/src/main.rs b/utils/staking-miner/src/main.rs index a4c496998ed9..90b2c7366a1b 100644 --- a/utils/staking-miner/src/main.rs +++ b/utils/staking-miner/src/main.rs @@ -56,7 +56,6 @@ use runtime_versions::RuntimeVersions; use signal_hook::consts::signal::*; use signal_hook_tokio::Signals; use sp_npos_elections::BalancingConfig; -use sp_runtime::{traits::Block as BlockT, DeserializeOwned}; use std::{ops::Deref, sync::Arc, time::Duration}; use tracing_subscriber::{fmt, EnvFilter}; @@ -295,15 +294,13 @@ frame_support::parameter_types! { /// Build the Ext at hash with all the data of `ElectionProviderMultiPhase` and any additional /// pallets. -async fn create_election_ext( +async fn create_election_ext( client: SharedRpcClient, - at: Option, + at: Option, additional: Vec, ) -> Result> where T: EPM::Config, - B: BlockT + DeserializeOwned, - B::Header: DeserializeOwned, { use frame_support::{storage::generator::StorageMap, traits::PalletInfo}; use sp_core::hashing::twox_128; @@ -312,7 +309,7 @@ where .expect("Pallet always has name; qed.") .to_string()]; pallets.extend(additional); - Builder::::new() + Builder::::new() .mode(Mode::Online(OnlineConfig { transport: Transport::Uri(client.uri().to_owned()), at, @@ -323,7 +320,7 @@ where })) .build() .await - .map_err(|why| Error::RemoteExternalities(why)) + .map_err(|why| Error::::RemoteExternalities(why)) .map(|rx| rx.inner_ext) } diff --git a/utils/staking-miner/src/monitor.rs b/utils/staking-miner/src/monitor.rs index e578e1c83544..607ecb6baa42 100644 --- a/utils/staking-miner/src/monitor.rs +++ b/utils/staking-miner/src/monitor.rs @@ -259,7 +259,7 @@ macro_rules! monitor_cmd_for { ($runtime:tt) => { paste::paste! { let _lock = submit_lock.lock().await; - let mut ext = match crate::create_election_ext::(rpc.clone(), Some(hash), vec![]).await { + let mut ext = match crate::create_election_ext::(rpc.clone(), Some(hash), vec![]).await { Ok(ext) => ext, Err(err) => { log::debug!(target: LOG_TARGET, "Skipping block {}; {}", at.number, err); diff --git a/utils/staking-miner/src/prelude.rs b/utils/staking-miner/src/prelude.rs index db029de881c9..fb701ece2384 100644 --- a/utils/staking-miner/src/prelude.rs +++ b/utils/staking-miner/src/prelude.rs @@ -32,6 +32,8 @@ pub type Nonce = core_primitives::Nonce; pub type Hash = core_primitives::Hash; /// The header type. We re-export it here, but we can easily get it from block as well. pub type Header = core_primitives::Header; +/// The block type. +pub type Block = core_primitives::Block; pub use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; @@ -44,7 +46,7 @@ pub const LOG_TARGET: &str = "staking-miner"; pub use pallet_election_provider_multi_phase as EPM; /// The externalities type. -pub type Ext = sp_io::TestExternalities; +pub type Ext = sp_state_machine::TestExternalities>; /// The key pair type being used. We "strongly" assume sr25519 for simplicity. pub type Pair = sp_core::sr25519::Pair; From 56d45fe3f34b895e6f94b05d6fea278497527cf5 Mon Sep 17 00:00:00 2001 From: eskimor Date: Thu, 17 Aug 2023 14:52:23 +0200 Subject: [PATCH 27/27] Parathreads Feature Branch (#6969) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * First baby steps * Split scheduler into several modules * Towards a more modular approach for scheduling * move free_cores; IntoInterator -> BTreeMap * Move clear() * Move more functions out of scheduler * Change weight composition * More abstraction * Further refactor * clippy * fmt * fix test-runtime * Add parathreads pallet to construct_runtime! * Make all runtimes use (Parachains, Parathreads) scheduling * Delete commented out code * Remove parathreads scheduler from westend, rococo, and kusama * fix rococo, westend, and kusama config * Revert "fix rococo, westend, and kusama config" This reverts commit 3ef951d7d3b8f171ce3a2a42c948ac6742772bee. * Revert "Remove parathreads scheduler from westend, rococo, and kusama" This reverts commit 664bafab512f29546c72ba5ceb4255e41298dc3b. * Remove CoreIndex from free_cores * Remove unnecessary struct for parathreads * parathreads provider take 1 * Comment out parathread tests * Pop into lookahead * fmt * Fill lookahead with two entries for parachains * fmt * Current stage * Towards ab parathreads * no AB use * Make tests typecheck * quick hack to set scheduling lookahead to 1 * Fix scheduler tests * fix paras_inherent tests * misc * Update more of a test * cfg(test) * some cleanup * Undo paras_inherent changes * Adjust paras inherent tests * Undo changes to v2 primitives * Undo v2 mod changes to tests * minor * Remove parathreads assigner and pallet * minor * minor * more cleanup * fmt * minor * minor * minor * Remove on_new_session from assignment provider * Make adder collator integration test pass * disable failing unit tests * minor * minor * re-enable one unit test * minor * handle retries, add concluded para to pop interface * comment out unused code * Remove core_para from interface * Remove first claimqueue element on clear if None instead removing all Nones * Move claimqueue get out of loop * Use VecDeque instead of Ved in ClaimQueue * Make occupied() AB ready(?) * handle freed disputed in clear_and_fill_claimqueue * clear_and_fill_claimqueue returns scheduled Vec * Rename and minor refactor * return position of assignment taken from claimqueue * minor * Fix session boundary parachains number change + extended test * Fix runtimes * Fix polkadot runtime * Remove polkadot pallet from benchmarks * fix test runtime * Add storage migration * Minor refactor * Minor * migratin typechecks * Add migration to runtimes * Towards modular scheduling II (#6568) * Add post migration check * pebkac * Disable migrations but mine * Revert "Disable migrations but mine" This reverts commit 4fa5c5a370c199944a7e0926f50b08626bfbad4c. * Move scheduler migration * Revert "Move scheduler migration" This reverts commit a16b1659a907950bae048a9f7010f2aa76e02b6d. * Fix migration * cleanup * Don't lose retries value anymore * comment out test function * Remove retries value from Assignment again * minor * Make collator for parathreads optional * data type refactor * update scheduler tests * Change test function cfg * comment out test function * Try cfg(test) only * fix cfg flags * Add get_max_retries function to provider interface (#7047) * Fix merge commit * pebkac * fix merge * update cargo.lock * fix merge * fix merge * Use btreemap instead of vec, fix scheduler calls. * Use imported `ScheduledCore` * Remove unused import in inclusion tests * Use keys() instead of mapping over a BTreeMap * Fix migrations for parachains scheduler * Use BlockNumberFor everywhere in scheduler * Add on demand assignment provider pallet (#7110) * Address some PR comments * minor * more cleanup * find_map and timeout availability fixes * Change default scheduling_lookahead to 1 * Add on demand assignment provider pallet * Move test-runtime to new assignment provider * Run cargo format on scheduler tests * minor * Mutate cores in single loop * timeout predicate simplification * claimqueue desired size fix * Replace expect by ok_or * More improvements * Fix push back order and next_up_on_timeout * minor * session change docs * Add pre_new_session call to hand pre session updates * Remove sc_network dependency and PeerId from unnecessary data structures * Remove unnecessary peer_ids * Add OnDemandOrdering proxy (#7156) * Add OnDemandBidding proxy * Fix names * OnDemandAssigner for rococo only * Check PeerId in collator protocol before fetching collation * On occupied, remove non occupied cores from the claimqueue front and refill * Add missing docs * Comment out unused field * fix ScheduledCore in tests * Fix the fix * pebkac * fmt * Fix occupied dropping * Remove double import * ScheduledCore fixes * Readd sc-network dep * pebkac * OpaquePeerId -> PeerId in can_collate interface * Cargo.lock update for interface change * Remove checks not needed anymore? * Drop occupied core on session change if it would time out after the new session * Add on demand assignment provider pallet * Move test-runtime to new assignment provider * Run cargo format on scheduler tests * Add OnDemandOrdering proxy (#7156) * Add OnDemandBidding proxy * Fix names * OnDemandAssigner for rococo only * Remove unneeded config values * Update comments * Use and_then for queue position * Return the max size of the spot queue on error * Add comments to add_parathread_entry * Add module comments * Add log for when can_collate fails * Change assigner queue type to `Assignment` * Update assignment provider tests * More logs * Remove unused keyring import * disable can_collate * comment out can_collate * Can collate first checks set if empty * Move can_collate call to collation advertisement * Fix backing test * map to loop * Remove obsolete check * Move invalid collation test from backing to collator-protocol * fix unused imports * fix test * fix Debug derivation * Increase time limit on zombienet predicates * Increase zombienet timeout * Minor * Address some PR comments * Address PR comments * Comment out failing assert due to on-demand assigner missing * remove collator_restrictions info from backing * Move can_collate to ActiveParas * minor * minor * Update weight information for on demand config * Add ttl to parasentry * Fix tests missing parasentry ttl * Adjust scheduler tests to use ttl default values * Use match instead of if let for ttl drop * Use RuntimeDebug trait for `ParasEntry` fields * Add comments to on demand assignment pallet * Fix spot traffic calculation * Revert runtimedebug changes to primitives * Remove runtimedebug derivation from `ParasEntry` * Mention affinity in pallet level docs * Use RuntimeDebug trait for ParasEntry child types * Remove collator restrictions * Fix primitive versioning and other merge issues * Fix tests post merge * Fix node side tests * Edit parascheduler migration for clarity * Move parascheduler migration up to next release * Remove vestiges from merge * Fix tests * Refactor ttl handling * Remove unused things from scheduler tests * Move on demand assigner to own directory * Update documentation * Remove unused sc-network dependency in primitives Was used for collator restrictions * Remove unused import * Reenable scheduler test * Remove unused storage value * Enable timeout predicate test and fix fn Turns out that the issue with the compiler is fixed and we can now use impl Trait in the manner used here. * Remove unused imports * Add benchmarking entry for perbill in config * Correct typo * Address review comments * Log out errors when calculating spot traffic. * Change parascheduler's log target name * Update scheduler_common documentation * Use mutate for affinity fns, add tests * Add another on demand affinity test * Unify parathreads and parachains in HostConfig (take 2) (#7452) * Unify parathreads and parachains in HostConfig * Fixed missed occurences * Remove commented out lines * `HostConfiguration v7` * Fix version check * Add `MigrateToV7` to `Unreleased` * fmt * fmt * Fix compilation errors after the rebase * Update runtime/parachains/src/scheduler/tests.rs Co-authored-by: Anton Vilhelm Ásgeirsson * Update runtime/parachains/src/scheduler/tests.rs Co-authored-by: Anton Vilhelm Ásgeirsson * fmt * Fix migration test * Fix tests * Remove unneeded assert from tests * parathread_cores -> on_demand_cores; parathread_retries -> on_demand_retries * Fix a compilation error in tests * Remove unused `use` * update colander image version --------- Co-authored-by: alexgparity Co-authored-by: Anton Vilhelm Ásgeirsson Co-authored-by: Javier Viola * Fix branch after merge with master * Refactor out duplicate checks into a helper fn * Fix tests post merge * Rename add_parathread_assignment, add test * Update docs * Remove unused on_finalize function * Add weight info to on demand pallet * Update runtime/parachains/src/configuration.rs Co-authored-by: Tsvetomir Dimitrov * Update runtime/parachains/src/scheduler_common/mod.rs Co-authored-by: Tsvetomir Dimitrov * Update runtime/parachains/src/assigner_on_demand/mod.rs Co-authored-by: Tsvetomir Dimitrov * Add benchmarking to on demand pallet * Make place_order test check for success * Add on demand benchmarks * Add local test weights to rococo runtime * Modify TTL drop behaviour to not skip claims Previous behaviour would jump a new claim from the assignment provider ahead in the claimqueue, assuming lookahead is larger than 1. * Refactor ttl test to test claimqueue order * Disable place_order ext. when no on_demand cores * Use default genesis config for benchmark tests * Refactor config builder param * Move lifecycle test from scheduler to on demand * Remove unneeded lifecycle test Paras module via the parachain assignment provider doesn't provide new assignments if a parachain loses it's lease. The on demand assignment provider doesn't provide an assignment that is not a parathread. * Re enable validator shuffle test * More realistic weights for place_order * Remove redundant import * Fix backwards compatibility (hopefully) * ".git/.scripts/commands/bench/bench.sh" --subcommand=runtime --runtime=rococo --target_dir=polkadot --pallet=runtime_parachains::assigner_on_demand * Fix tests. * Fix off-by-one. * Re enable claimqueue fills test * Re enable schedule_rotates_groups test * Fix fill_claimqueue_fills test * Re enable next_up_on_timeout test, move fn * Do not pop from assignment provider when retrying * Fix tests missing collator in scheduledcore * Add comment about timeout predicate. * Rename parasentry retries to availability timeouts * Re enable schedule_schedules... test * Refactor prune retried test to new scheduler * Have all scheduler tests use genesis_cfg fn * Update docs * Update copyright notices on new files * Rename is_parachain_core to is_bulk_core * Remove erroneous TODO * Simplify import * ".git/.scripts/commands/bench/bench.sh" --subcommand=runtime --runtime=rococo --target_dir=polkadot --pallet=runtime_parachains::configuration * Revert AdvertiseCollation order shuffle * Refactor place_order into keepalive and allowdeath * Revert rename of hrmp max inbound channels parachain encompasses both on demand and slot auction / bulk. * Restore availability_timeout_predicate function * Clean up leftover comments * Update runtime/parachains/src/scheduler/tests.rs Co-authored-by: Tsvetomir Dimitrov * ".git/.scripts/commands/bench/bench.sh" --subcommand=runtime --runtime=westend --target_dir=polkadot --pallet=runtime_parachains::configuration --------- Co-authored-by: alexgparity Co-authored-by: alexgparity <115470171+alexgparity@users.noreply.github.com> Co-authored-by: Tsvetomir Dimitrov Co-authored-by: Javier Viola Co-authored-by: eskimor Co-authored-by: command-bot <> * On Demand - update weights and small nits (#7605) * Remove collator restriction test in inclusion On demand parachains won't have collator restrictions implemented in this way but will instead use a preferred collator registered to a `ParaId` in `paras_registrar`. * Remove redundant config guard for test fns * Update weights * Update WeightInfo for on_demand assigner * Unify assignment provider parameters into one call (#7606) * Combine assignmentprovider params into one fn call * Move scheduler_common to a module under scheduler * Fix ttl handling in benchmark builder * Run cargo format * Remove obsolete test. * Small improvement. * Use same migration pattern as config module * Remove old TODO * Change log target name for assigner on demand * Fix migration * Fix clippy warnings * Add HostConfiguration storage migration to V8 * Add `MigrateToV8` to unreleased migrations for all runtimes * Fix storage version check for config v8 * Set `StorageVersion` to 8 in `MigrateToV8` * Remove dups. * Update primitives/src/v5/mod.rs Co-authored-by: Bastian Köcher --------- Co-authored-by: alexgparity Co-authored-by: alexgparity <115470171+alexgparity@users.noreply.github.com> Co-authored-by: antonva Co-authored-by: Tsvetomir Dimitrov Co-authored-by: Anton Vilhelm Ásgeirsson Co-authored-by: Javier Viola Co-authored-by: eskimor Co-authored-by: Bastian Köcher --- node/core/backing/src/lib.rs | 42 +- node/core/backing/src/tests.rs | 120 +- .../src/validator_side/mod.rs | 1 + node/service/src/chain_spec.rs | 5 +- node/test/service/src/chain_spec.rs | 3 +- primitives/src/lib.rs | 3 +- primitives/src/v5/mod.rs | 75 +- runtime/kusama/src/lib.rs | 10 +- .../runtime_parachains_configuration.rs | 120 +- runtime/parachains/src/assigner.rs | 111 ++ .../src/assigner_on_demand/benchmarking.rs | 109 ++ .../src/assigner_on_demand/mock_helpers.rs | 86 ++ .../parachains/src/assigner_on_demand/mod.rs | 614 +++++++++ .../src/assigner_on_demand/tests.rs | 558 ++++++++ runtime/parachains/src/assigner_parachains.rs | 70 + runtime/parachains/src/builder.rs | 39 +- runtime/parachains/src/configuration.rs | 239 ++-- .../src/configuration/benchmarking.rs | 2 + .../parachains/src/configuration/migration.rs | 1 + .../src/configuration/migration/v7.rs | 97 +- .../src/configuration/migration/v8.rs | 319 +++++ runtime/parachains/src/configuration/tests.rs | 52 +- runtime/parachains/src/dmp.rs | 5 +- runtime/parachains/src/hrmp.rs | 12 +- runtime/parachains/src/hrmp/tests.rs | 18 +- runtime/parachains/src/inclusion/mod.rs | 48 +- runtime/parachains/src/inclusion/tests.rs | 91 +- runtime/parachains/src/initializer.rs | 3 + runtime/parachains/src/lib.rs | 3 + runtime/parachains/src/mock.rs | 29 +- runtime/parachains/src/paras/tests.rs | 3 +- runtime/parachains/src/paras_inherent/mod.rs | 26 +- .../parachains/src/paras_inherent/tests.rs | 44 +- runtime/parachains/src/runtime_api_impl/v5.rs | 101 +- runtime/parachains/src/scheduler.rs | 890 ++++++------ runtime/parachains/src/scheduler/common.rs | 98 ++ runtime/parachains/src/scheduler/migration.rs | 170 +++ runtime/parachains/src/scheduler/tests.rs | 1198 +++++++++-------- runtime/parachains/src/session_info/tests.rs | 2 +- runtime/polkadot/src/lib.rs | 10 +- .../runtime_parachains_configuration.rs | 170 +-- runtime/rococo/src/lib.rs | 34 +- runtime/rococo/src/weights/mod.rs | 1 + .../runtime_parachains_assigner_on_demand.rs | 91 ++ .../runtime_parachains_configuration.rs | 168 +-- runtime/test-runtime/src/lib.rs | 8 +- runtime/westend/src/lib.rs | 10 +- .../runtime_parachains_configuration.rs | 120 +- scripts/ci/gitlab/pipeline/zombienet.yml | 2 +- .../0003-parachains-garbage-candidate.zndsl | 2 +- zombienet_tests/misc/0003-parathreads.toml | 32 + .../smoke/0001-parachains-smoke-test.zndsl | 2 +- .../0002-parachains-upgrade-smoke-test.zndsl | 2 +- 53 files changed, 4196 insertions(+), 1873 deletions(-) create mode 100644 runtime/parachains/src/assigner.rs create mode 100644 runtime/parachains/src/assigner_on_demand/benchmarking.rs create mode 100644 runtime/parachains/src/assigner_on_demand/mock_helpers.rs create mode 100644 runtime/parachains/src/assigner_on_demand/mod.rs create mode 100644 runtime/parachains/src/assigner_on_demand/tests.rs create mode 100644 runtime/parachains/src/assigner_parachains.rs create mode 100644 runtime/parachains/src/configuration/migration/v8.rs create mode 100644 runtime/parachains/src/scheduler/common.rs create mode 100644 runtime/parachains/src/scheduler/migration.rs create mode 100644 runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs create mode 100644 zombienet_tests/misc/0003-parathreads.toml diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 0abfbfad7657..ccfbb4e5145f 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -48,7 +48,7 @@ use polkadot_node_subsystem_util::{ request_validators, Validator, }; use polkadot_primitives::{ - BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, CollatorId, + BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, CommittedCandidateReceipt, CoreIndex, CoreState, Hash, Id as ParaId, PvfExecTimeoutKind, SigningContext, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, }; @@ -354,7 +354,7 @@ async fn handle_active_leaves_update( let group_index = group_rotation_info.group_for_core(core_index, n_cores); if let Some(g) = validator_groups.get(group_index.0 as usize) { if validator.as_ref().map_or(false, |v| g.contains(&v.index())) { - assignment = Some((scheduled.para_id, scheduled.collator)); + assignment = Some(scheduled.para_id); } groups.insert(scheduled.para_id, g.clone()); } @@ -363,15 +363,15 @@ async fn handle_active_leaves_update( let table_context = TableContext { groups, validators, validator }; - let (assignment, required_collator) = match assignment { + let assignment = match assignment { None => { assignments_span.add_string_tag("assigned", "false"); - (None, None) + None }, - Some((assignment, required_collator)) => { + Some(assignment) => { assignments_span.add_string_tag("assigned", "true"); assignments_span.add_para_id(assignment); - (Some(assignment), required_collator) + Some(assignment) }, }; @@ -381,7 +381,6 @@ async fn handle_active_leaves_update( let job = CandidateBackingJob { parent, assignment, - required_collator, issued_statements: HashSet::new(), awaiting_validation: HashSet::new(), fallbacks: HashMap::new(), @@ -412,8 +411,6 @@ struct CandidateBackingJob { parent: Hash, /// The `ParaId` assigned to this validator assignment: Option, - /// The collator required to author the candidate, if any. - required_collator: Option, /// Spans for all candidates that are not yet backable. unbacked_candidates: HashMap, /// We issued `Seconded`, `Valid` or `Invalid` statements on about these candidates. @@ -913,21 +910,6 @@ impl CandidateBackingJob { candidate: &CandidateReceipt, pov: Arc, ) -> Result<(), Error> { - // Check that candidate is collated by the right collator. - if self - .required_collator - .as_ref() - .map_or(false, |c| c != &candidate.descriptor().collator) - { - // Break cycle - bounded as there is only one candidate to - // second per block. - ctx.send_unbounded_message(CollatorProtocolMessage::Invalid( - self.parent, - candidate.clone(), - )); - return Ok(()) - } - let candidate_hash = candidate.hash(); let mut span = self.get_unbacked_validation_child( root_span, @@ -1171,8 +1153,6 @@ impl CandidateBackingJob { return Ok(()) } - let descriptor = attesting.candidate.descriptor().clone(); - gum::debug!( target: LOG_TARGET, candidate_hash = ?candidate_hash, @@ -1180,16 +1160,6 @@ impl CandidateBackingJob { "Kicking off validation", ); - // Check that candidate is collated by the right collator. - if self.required_collator.as_ref().map_or(false, |c| c != &descriptor.collator) { - // If not, we've got the statement in the table but we will - // not issue validation work for it. - // - // Act as though we've issued a statement. - self.issued_statements.insert(candidate_hash); - return Ok(()) - } - let bg_sender = ctx.sender().clone(); let pov = PoVData::FetchFromValidator { from_validator: attesting.from_validator, diff --git a/node/core/backing/src/tests.rs b/node/core/backing/src/tests.rs index 4be7516c58b4..1a2c044ccc66 100644 --- a/node/core/backing/src/tests.rs +++ b/node/core/backing/src/tests.rs @@ -31,8 +31,8 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ - CandidateDescriptor, CollatorId, GroupRotationInfo, HeadData, PersistedValidationData, - PvfExecTimeoutKind, ScheduledCore, + CandidateDescriptor, GroupRotationInfo, HeadData, PersistedValidationData, PvfExecTimeoutKind, + ScheduledCore, }; use sp_application_crypto::AppCrypto; use sp_keyring::Sr25519Keyring; @@ -98,14 +98,10 @@ impl Default for TestState { let group_rotation_info = GroupRotationInfo { session_start_block: 0, group_rotation_frequency: 100, now: 1 }; - let thread_collator: CollatorId = Sr25519Keyring::Two.public().into(); let availability_cores = vec![ CoreState::Scheduled(ScheduledCore { para_id: chain_a, collator: None }), CoreState::Scheduled(ScheduledCore { para_id: chain_b, collator: None }), - CoreState::Scheduled(ScheduledCore { - para_id: thread_a, - collator: Some(thread_collator.clone()), - }), + CoreState::Scheduled(ScheduledCore { para_id: thread_a, collator: None }), ]; let mut head_data = HashMap::new(); @@ -1186,116 +1182,6 @@ fn backing_works_after_failed_validation() { }); } -// Test that a `CandidateBackingMessage::Second` issues validation work -// and in case validation is successful issues a `StatementDistributionMessage`. -#[test] -fn backing_doesnt_second_wrong_collator() { - let mut test_state = TestState::default(); - test_state.availability_cores[0] = CoreState::Scheduled(ScheduledCore { - para_id: ParaId::from(1), - collator: Some(Sr25519Keyring::Bob.public().into()), - }); - - test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - test_startup(&mut virtual_overseer, &test_state).await; - - let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; - - let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); - - let pov_hash = pov.hash(); - let candidate = TestCandidateBuilder { - para_id: test_state.chain_ids[0], - relay_parent: test_state.relay_parent, - pov_hash, - head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone()), - } - .build(); - - let second = CandidateBackingMessage::Second( - test_state.relay_parent, - candidate.to_plain(), - pov.clone(), - ); - - virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::CollatorProtocol( - CollatorProtocolMessage::Invalid(parent, c) - ) if parent == test_state.relay_parent && c == candidate.to_plain() => { - } - ); - - virtual_overseer - .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( - ActiveLeavesUpdate::stop_work(test_state.relay_parent), - ))) - .await; - virtual_overseer - }); -} - -#[test] -fn validation_work_ignores_wrong_collator() { - let mut test_state = TestState::default(); - test_state.availability_cores[0] = CoreState::Scheduled(ScheduledCore { - para_id: ParaId::from(1), - collator: Some(Sr25519Keyring::Bob.public().into()), - }); - - test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - test_startup(&mut virtual_overseer, &test_state).await; - - let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; - - let pov_hash = pov.hash(); - - let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); - - let candidate_a = TestCandidateBuilder { - para_id: test_state.chain_ids[0], - relay_parent: test_state.relay_parent, - pov_hash, - head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone()), - } - .build(); - - let public2 = Keystore::sr25519_generate_new( - &*test_state.keystore, - ValidatorId::ID, - Some(&test_state.validators[2].to_seed()), - ) - .expect("Insert key into keystore"); - let seconding = SignedFullStatement::sign( - &test_state.keystore, - Statement::Seconded(candidate_a.clone()), - &test_state.signing_context, - ValidatorIndex(2), - &public2.into(), - ) - .ok() - .flatten() - .expect("should be signed"); - - let statement = - CandidateBackingMessage::Statement(test_state.relay_parent, seconding.clone()); - - virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - - // The statement will be ignored because it has the wrong collator. - virtual_overseer - .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( - ActiveLeavesUpdate::stop_work(test_state.relay_parent), - ))) - .await; - virtual_overseer - }); -} - #[test] fn candidate_backing_reorders_votes() { use sp_core::Encode; diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs index b455285332be..f87a14971e8a 100644 --- a/node/network/collator-protocol/src/validator_side/mod.rs +++ b/node/network/collator-protocol/src/validator_side/mod.rs @@ -921,6 +921,7 @@ async fn process_incoming_peer_message( .span_per_relay_parent .get(&relay_parent) .map(|s| s.child("advertise-collation")); + if !state.view.contains(&relay_parent) { gum::debug!( target: LOG_TARGET, diff --git a/node/service/src/chain_spec.rs b/node/service/src/chain_spec.rs index 87a8650c2ed6..7e2d9c470450 100644 --- a/node/service/src/chain_spec.rs +++ b/node/service/src/chain_spec.rs @@ -211,8 +211,7 @@ fn default_parachains_host_configuration( max_pov_size: MAX_POV_SIZE, max_head_data_size: 32 * 1024, group_rotation_frequency: 20, - chain_availability_period: 4, - thread_availability_period: 4, + paras_availability_period: 4, max_upward_queue_count: 8, max_upward_queue_size: 1024 * 1024, max_downward_message_size: 1024 * 1024, @@ -223,10 +222,8 @@ fn default_parachains_host_configuration( hrmp_channel_max_capacity: 8, hrmp_channel_max_total_size: 8 * 1024, hrmp_max_parachain_inbound_channels: 4, - hrmp_max_parathread_inbound_channels: 4, hrmp_channel_max_message_size: 1024 * 1024, hrmp_max_parachain_outbound_channels: 4, - hrmp_max_parathread_outbound_channels: 4, hrmp_max_message_num_per_candidate: 5, dispute_period: 6, no_show_slots: 2, diff --git a/node/test/service/src/chain_spec.rs b/node/test/service/src/chain_spec.rs index 876bbb8806b4..9aadd7d203c0 100644 --- a/node/test/service/src/chain_spec.rs +++ b/node/test/service/src/chain_spec.rs @@ -175,8 +175,7 @@ fn polkadot_testnet_genesis( max_pov_size: MAX_POV_SIZE, max_head_data_size: 32 * 1024, group_rotation_frequency: 20, - chain_availability_period: 4, - thread_availability_period: 4, + paras_availability_period: 4, no_show_slots: 10, minimum_validation_upgrade_delay: 5, ..Default::default() diff --git a/primitives/src/lib.rs b/primitives/src/lib.rs index 1c8ef1eae73b..3680cb857e66 100644 --- a/primitives/src/lib.rs +++ b/primitives/src/lib.rs @@ -56,7 +56,8 @@ pub use v5::{ UpgradeRestriction, UpwardMessage, ValidDisputeStatementKind, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, ValidityError, ASSIGNMENT_KEY_TYPE_ID, LOWEST_PUBLIC_ID, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, - MAX_POV_SIZE, PARACHAINS_INHERENT_IDENTIFIER, PARACHAIN_KEY_TYPE_ID, + MAX_POV_SIZE, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, PARACHAINS_INHERENT_IDENTIFIER, + PARACHAIN_KEY_TYPE_ID, }; #[cfg(feature = "std")] diff --git a/primitives/src/v5/mod.rs b/primitives/src/v5/mod.rs index bdd10e623190..c973bb05bb48 100644 --- a/primitives/src/v5/mod.rs +++ b/primitives/src/v5/mod.rs @@ -385,6 +385,11 @@ pub const MAX_HEAD_DATA_SIZE: u32 = 1 * 1024 * 1024; // NOTE: This value is used in the runtime so be careful when changing it. pub const MAX_POV_SIZE: u32 = 5 * 1024 * 1024; +/// Default queue size we use for the on-demand order book. +/// +/// Can be adjusted in configuration. +pub const ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE: u32 = 10_000; + // The public key of a keypair used by a validator for determining assignments /// to approve included parachain candidates. mod assignment_app { @@ -809,28 +814,70 @@ impl TypeIndex for GroupIndex { } /// A claim on authoring the next block for a given parathread. -#[derive(Clone, Encode, Decode, TypeInfo, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(PartialEq))] -pub struct ParathreadClaim(pub Id, pub CollatorId); +#[derive(Clone, Encode, Decode, TypeInfo, PartialEq, RuntimeDebug)] +pub struct ParathreadClaim(pub Id, pub Option); /// An entry tracking a claim to ensure it does not pass the maximum number of retries. -#[derive(Clone, Encode, Decode, TypeInfo, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(PartialEq))] +#[derive(Clone, Encode, Decode, TypeInfo, PartialEq, RuntimeDebug)] pub struct ParathreadEntry { /// The claim. pub claim: ParathreadClaim, - /// Number of retries. + /// Number of retries pub retries: u32, } +/// An assignment for a parachain scheduled to be backed and included in a relay chain block. +#[derive(Clone, Encode, Decode, PartialEq, TypeInfo, RuntimeDebug)] +pub struct Assignment { + /// Assignment's ParaId + pub para_id: Id, +} + +impl Assignment { + /// Create a new `Assignment`. + pub fn new(para_id: Id) -> Self { + Self { para_id } + } +} + +/// An entry tracking a paras +#[derive(Clone, Encode, Decode, TypeInfo, PartialEq, RuntimeDebug)] +pub struct ParasEntry { + /// The `Assignment` + pub assignment: Assignment, + /// The number of times the entry has timed out in availability. + pub availability_timeouts: u32, + /// The block height where this entry becomes invalid. + pub ttl: N, +} + +impl ParasEntry { + /// Return `Id` from the underlying `Assignment`. + pub fn para_id(&self) -> Id { + self.assignment.para_id + } + + /// Create a new `ParasEntry`. + pub fn new(assignment: Assignment, now: N) -> Self { + ParasEntry { assignment, availability_timeouts: 0, ttl: now } + } +} + /// What is occupying a specific availability core. #[derive(Clone, Encode, Decode, TypeInfo, RuntimeDebug)] #[cfg_attr(feature = "std", derive(PartialEq))] -pub enum CoreOccupied { - /// A parathread. - Parathread(ParathreadEntry), - /// A parachain. - Parachain, +pub enum CoreOccupied { + /// The core is not occupied. + Free, + /// A paras. + Paras(ParasEntry), +} + +impl CoreOccupied { + /// Is core free? + pub fn is_free(&self) -> bool { + matches!(self, Self::Free) + } } /// A helper data-type for tracking validator-group rotations. @@ -962,7 +1009,9 @@ impl OccupiedCore { pub struct ScheduledCore { /// The ID of a para scheduled. pub para_id: Id, - /// The collator required to author the block, if any. + /// DEPRECATED: see: https://github.com/paritytech/polkadot/issues/7575 + /// + /// Will be removed in a future version. pub collator: Option, } @@ -992,7 +1041,7 @@ impl CoreState { pub fn para_id(&self) -> Option { match self { Self::Occupied(ref core) => Some(core.para_id()), - Self::Scheduled(ScheduledCore { para_id, .. }) => Some(*para_id), + Self::Scheduled(core) => Some(core.para_id), Self::Free => None, } } diff --git a/runtime/kusama/src/lib.rs b/runtime/kusama/src/lib.rs index 0248b02e12f6..ae1925e898a2 100644 --- a/runtime/kusama/src/lib.rs +++ b/runtime/kusama/src/lib.rs @@ -39,6 +39,7 @@ use scale_info::TypeInfo; use sp_std::{cmp::Ordering, collections::btree_map::BTreeMap, prelude::*}; use runtime_parachains::{ + assigner_parachains as parachains_assigner_parachains, configuration as parachains_configuration, disputes as parachains_disputes, disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, @@ -1166,7 +1167,11 @@ impl parachains_paras_inherent::Config for Runtime { type WeightInfo = weights::runtime_parachains_paras_inherent::WeightInfo; } -impl parachains_scheduler::Config for Runtime {} +impl parachains_scheduler::Config for Runtime { + type AssignmentProvider = ParaAssignmentProvider; +} + +impl parachains_assigner_parachains::Config for Runtime {} impl parachains_initializer::Config for Runtime { type Randomness = pallet_babe::RandomnessFromOneEpochAgo; @@ -1470,6 +1475,7 @@ construct_runtime! { ParaSessionInfo: parachains_session_info::{Pallet, Storage} = 61, ParasDisputes: parachains_disputes::{Pallet, Call, Storage, Event} = 62, ParasSlashing: parachains_slashing::{Pallet, Call, Storage, ValidateUnsigned} = 63, + ParaAssignmentProvider: parachains_assigner_parachains::{Pallet, Storage} = 64, // Parachain Onboarding Pallets. Start indices at 70 to leave room. Registrar: paras_registrar::{Pallet, Call, Storage, Event} = 70, @@ -1538,6 +1544,8 @@ pub mod migrations { >, pallet_im_online::migration::v1::Migration, parachains_configuration::migration::v7::MigrateToV7, + parachains_scheduler::migration::v1::MigrateToV1, + parachains_configuration::migration::v8::MigrateToV8, ); } diff --git a/runtime/kusama/src/weights/runtime_parachains_configuration.rs b/runtime/kusama/src/weights/runtime_parachains_configuration.rs index 077e9409076d..22609209c733 100644 --- a/runtime/kusama/src/weights/runtime_parachains_configuration.rs +++ b/runtime/kusama/src/weights/runtime_parachains_configuration.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("kusama-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet -// --chain=kusama-dev // --steps=50 // --repeat=20 -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --pallet=runtime_parachains::configuration // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=runtime_parachains::configuration +// --chain=kusama-dev // --header=./file_header.txt -// --output=./runtime/kusama/src/weights/runtime_parachains_configuration.rs +// --output=./runtime/kusama/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,56 +48,56 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::configuration`. pub struct WeightInfo(PhantomData); impl runtime_parachains::configuration::WeightInfo for WeightInfo { - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `127` // Estimated: `1612` - // Minimum execution time: 9_448_000 picoseconds. - Weight::from_parts(9_847_000, 0) + // Minimum execution time: 9_186_000 picoseconds. + Weight::from_parts(9_567_000, 0) .saturating_add(Weight::from_parts(0, 1612)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_u32() -> Weight { // Proof Size summary in bytes: // Measured: `127` // Estimated: `1612` - // Minimum execution time: 9_493_000 picoseconds. - Weight::from_parts(9_882_000, 0) + // Minimum execution time: 9_388_000 picoseconds. + Weight::from_parts(9_723_000, 0) .saturating_add(Weight::from_parts(0, 1612)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_option_u32() -> Weight { // Proof Size summary in bytes: // Measured: `127` // Estimated: `1612` - // Minimum execution time: 9_512_000 picoseconds. - Weight::from_parts(9_883_000, 0) + // Minimum execution time: 9_264_000 picoseconds. + Weight::from_parts(9_477_000, 0) .saturating_add(Weight::from_parts(0, 1612)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_hrmp_open_request_ttl() -> Weight { // Proof Size summary in bytes: // Measured: `0` @@ -108,34 +106,50 @@ impl runtime_parachains::configuration::WeightInfo for Weight::from_parts(2_000_000_000_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_balance() -> Weight { // Proof Size summary in bytes: // Measured: `127` // Estimated: `1612` - // Minimum execution time: 9_452_000 picoseconds. - Weight::from_parts(9_821_000, 0) + // Minimum execution time: 9_282_000 picoseconds. + Weight::from_parts(9_641_000, 0) .saturating_add(Weight::from_parts(0, 1612)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_executor_params() -> Weight { // Proof Size summary in bytes: // Measured: `127` // Estimated: `1612` - // Minimum execution time: 10_107_000 picoseconds. - Weight::from_parts(10_553_000, 0) + // Minimum execution time: 9_937_000 picoseconds. + Weight::from_parts(10_445_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_perbill() -> Weight { + // Proof Size summary in bytes: + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 9_106_000 picoseconds. + Weight::from_parts(9_645_000, 0) .saturating_add(Weight::from_parts(0, 1612)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/runtime/parachains/src/assigner.rs b/runtime/parachains/src/assigner.rs new file mode 100644 index 000000000000..55434da11f30 --- /dev/null +++ b/runtime/parachains/src/assigner.rs @@ -0,0 +1,111 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The Polkadot multiplexing assignment provider. +//! Provides blockspace assignments for both bulk and on demand parachains. +use frame_system::pallet_prelude::BlockNumberFor; +use primitives::{v5::Assignment, CoreIndex, Id as ParaId}; + +use crate::{ + configuration, paras, + scheduler::common::{AssignmentProvider, AssignmentProviderConfig}, +}; + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + #[pallet::pallet] + #[pallet::without_storage_info] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + configuration::Config + paras::Config { + type ParachainsAssignmentProvider: AssignmentProvider>; + type OnDemandAssignmentProvider: AssignmentProvider>; + } +} + +// Aliases to make the impl more readable. +type ParachainAssigner = ::ParachainsAssignmentProvider; +type OnDemandAssigner = ::OnDemandAssignmentProvider; + +impl Pallet { + // Helper fn for the AssignmentProvider implementation. + // Assumes that the first allocation of cores is to bulk parachains. + // This function will return false if there are no cores assigned to the bulk parachain + // assigner. + fn is_bulk_core(core_idx: &CoreIndex) -> bool { + let parachain_cores = + as AssignmentProvider>>::session_core_count(); + (0..parachain_cores).contains(&core_idx.0) + } +} + +impl AssignmentProvider> for Pallet { + fn session_core_count() -> u32 { + let parachain_cores = + as AssignmentProvider>>::session_core_count(); + let on_demand_cores = + as AssignmentProvider>>::session_core_count(); + + parachain_cores.saturating_add(on_demand_cores) + } + + /// Pops an `Assignment` from a specified `CoreIndex` + fn pop_assignment_for_core( + core_idx: CoreIndex, + concluded_para: Option, + ) -> Option { + if Pallet::::is_bulk_core(&core_idx) { + as AssignmentProvider>>::pop_assignment_for_core( + core_idx, + concluded_para, + ) + } else { + as AssignmentProvider>>::pop_assignment_for_core( + core_idx, + concluded_para, + ) + } + } + + fn push_assignment_for_core(core_idx: CoreIndex, assignment: Assignment) { + if Pallet::::is_bulk_core(&core_idx) { + as AssignmentProvider>>::push_assignment_for_core( + core_idx, assignment, + ) + } else { + as AssignmentProvider>>::push_assignment_for_core( + core_idx, assignment, + ) + } + } + + fn get_provider_config(core_idx: CoreIndex) -> AssignmentProviderConfig> { + if Pallet::::is_bulk_core(&core_idx) { + as AssignmentProvider>>::get_provider_config( + core_idx, + ) + } else { + as AssignmentProvider>>::get_provider_config( + core_idx, + ) + } + } +} diff --git a/runtime/parachains/src/assigner_on_demand/benchmarking.rs b/runtime/parachains/src/assigner_on_demand/benchmarking.rs new file mode 100644 index 000000000000..42ca94d5185f --- /dev/null +++ b/runtime/parachains/src/assigner_on_demand/benchmarking.rs @@ -0,0 +1,109 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! On demand assigner pallet benchmarking. + +#![cfg(feature = "runtime-benchmarks")] + +use super::{Pallet, *}; +use crate::{ + configuration::{HostConfiguration, Pallet as ConfigurationPallet}, + paras::{Pallet as ParasPallet, ParaGenesisArgs, ParaKind, ParachainsCache}, + shared::Pallet as ParasShared, +}; + +use frame_benchmarking::v2::*; +use frame_system::RawOrigin; +use sp_runtime::traits::Bounded; + +use primitives::{ + HeadData, Id as ParaId, SessionIndex, ValidationCode, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, +}; + +// Constants for the benchmarking +const SESSION_INDEX: SessionIndex = 1; + +// Initialize a parathread for benchmarking. +pub fn init_parathread(para_id: ParaId) +where + T: Config + crate::paras::Config + crate::shared::Config, +{ + ParasShared::::set_session_index(SESSION_INDEX); + let mut config = HostConfiguration::default(); + config.on_demand_cores = 1; + ConfigurationPallet::::force_set_active_config(config); + let mut parachains = ParachainsCache::new(); + ParasPallet::::initialize_para_now( + &mut parachains, + para_id, + &ParaGenesisArgs { + para_kind: ParaKind::Parathread, + genesis_head: HeadData(vec![1, 2, 3, 4]), + validation_code: ValidationCode(vec![1, 2, 3, 4]), + }, + ); +} + +#[benchmarks] +mod benchmarks { + /// We want to fill the queue to the maximum, so exactly one more item fits. + const MAX_FILL_BENCH: u32 = ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE.saturating_sub(1); + + use super::*; + #[benchmark] + fn place_order_keep_alive(s: Linear<1, MAX_FILL_BENCH>) { + // Setup + let caller = whitelisted_caller(); + let para_id = ParaId::from(111u32); + init_parathread::(para_id); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let assignment = Assignment::new(para_id); + + for _ in 0..s { + Pallet::::add_on_demand_assignment(assignment.clone(), QueuePushDirection::Back) + .unwrap(); + } + + #[extrinsic_call] + _(RawOrigin::Signed(caller.into()), BalanceOf::::max_value(), para_id) + } + + #[benchmark] + fn place_order_allow_death(s: Linear<1, MAX_FILL_BENCH>) { + // Setup + let caller = whitelisted_caller(); + let para_id = ParaId::from(111u32); + init_parathread::(para_id); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let assignment = Assignment::new(para_id); + + for _ in 0..s { + Pallet::::add_on_demand_assignment(assignment.clone(), QueuePushDirection::Back) + .unwrap(); + } + + #[extrinsic_call] + _(RawOrigin::Signed(caller.into()), BalanceOf::::max_value(), para_id) + } + + impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext( + crate::assigner_on_demand::mock_helpers::GenesisConfigBuilder::default().build() + ), + crate::mock::Test + ); +} diff --git a/runtime/parachains/src/assigner_on_demand/mock_helpers.rs b/runtime/parachains/src/assigner_on_demand/mock_helpers.rs new file mode 100644 index 000000000000..acfb24cbf194 --- /dev/null +++ b/runtime/parachains/src/assigner_on_demand/mock_helpers.rs @@ -0,0 +1,86 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Helper functions for tests, also used in runtime-benchmarks. + +#![cfg(test)] + +use super::*; + +use crate::{ + mock::MockGenesisConfig, + paras::{ParaGenesisArgs, ParaKind}, +}; + +use primitives::{Balance, HeadData, ValidationCode}; + +pub fn default_genesis_config() -> MockGenesisConfig { + MockGenesisConfig { + configuration: crate::configuration::GenesisConfig { + config: crate::configuration::HostConfiguration { ..Default::default() }, + }, + ..Default::default() + } +} + +#[derive(Debug)] +pub struct GenesisConfigBuilder { + pub on_demand_cores: u32, + pub on_demand_base_fee: Balance, + pub on_demand_fee_variability: Perbill, + pub on_demand_max_queue_size: u32, + pub on_demand_target_queue_utilization: Perbill, + pub onboarded_on_demand_chains: Vec, +} + +impl Default for GenesisConfigBuilder { + fn default() -> Self { + Self { + on_demand_cores: 10, + on_demand_base_fee: 10_000, + on_demand_fee_variability: Perbill::from_percent(1), + on_demand_max_queue_size: 100, + on_demand_target_queue_utilization: Perbill::from_percent(25), + onboarded_on_demand_chains: vec![], + } + } +} + +impl GenesisConfigBuilder { + pub(super) fn build(self) -> MockGenesisConfig { + let mut genesis = default_genesis_config(); + let config = &mut genesis.configuration.config; + config.on_demand_cores = self.on_demand_cores; + config.on_demand_base_fee = self.on_demand_base_fee; + config.on_demand_fee_variability = self.on_demand_fee_variability; + config.on_demand_queue_max_size = self.on_demand_max_queue_size; + config.on_demand_target_queue_utilization = self.on_demand_target_queue_utilization; + + let paras = &mut genesis.paras.paras; + for para_id in self.onboarded_on_demand_chains { + paras.push(( + para_id, + ParaGenesisArgs { + genesis_head: HeadData::from(vec![0u8]), + validation_code: ValidationCode::from(vec![0u8]), + para_kind: ParaKind::Parathread, + }, + )) + } + + genesis + } +} diff --git a/runtime/parachains/src/assigner_on_demand/mod.rs b/runtime/parachains/src/assigner_on_demand/mod.rs new file mode 100644 index 000000000000..5a60201e4fa8 --- /dev/null +++ b/runtime/parachains/src/assigner_on_demand/mod.rs @@ -0,0 +1,614 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The parachain on demand assignment module. +//! +//! Implements a mechanism for taking in orders for pay as you go (PAYG) or on demand +//! parachain (previously parathreads) assignments. This module is not handled by the +//! initializer but is instead instantiated in the `construct_runtime` macro. +//! +//! The module currently limits parallel execution of blocks from the same `ParaId` via +//! a core affinity mechanism. As long as there exists an affinity for a `CoreIndex` for +//! a specific `ParaId`, orders for blockspace for that `ParaId` will only be assigned to +//! that `CoreIndex`. This affinity mechanism can be removed if it can be shown that parallel +//! execution is valid. + +mod benchmarking; +mod mock_helpers; + +#[cfg(test)] +mod tests; + +use crate::{ + configuration, paras, + scheduler::common::{AssignmentProvider, AssignmentProviderConfig}, +}; + +use frame_support::{ + pallet_prelude::*, + traits::{ + Currency, + ExistenceRequirement::{self, AllowDeath, KeepAlive}, + WithdrawReasons, + }, +}; +use frame_system::pallet_prelude::*; +use primitives::{v5::Assignment, CoreIndex, Id as ParaId}; +use sp_runtime::{ + traits::{One, SaturatedConversion}, + FixedPointNumber, FixedPointOperand, FixedU128, Perbill, Saturating, +}; + +use sp_std::{collections::vec_deque::VecDeque, prelude::*}; + +const LOG_TARGET: &str = "runtime::parachains::assigner-on-demand"; + +pub use pallet::*; + +pub trait WeightInfo { + fn place_order_allow_death(s: u32) -> Weight; + fn place_order_keep_alive(s: u32) -> Weight; +} + +/// A weight info that is only suitable for testing. +pub struct TestWeightInfo; + +impl WeightInfo for TestWeightInfo { + fn place_order_allow_death(_: u32) -> Weight { + Weight::MAX + } + + fn place_order_keep_alive(_: u32) -> Weight { + Weight::MAX + } +} + +/// Keeps track of how many assignments a scheduler currently has at a specific `CoreIndex` for a +/// specific `ParaId`. +#[derive(Encode, Decode, Default, Clone, Copy, TypeInfo)] +#[cfg_attr(test, derive(PartialEq, Debug))] +pub struct CoreAffinityCount { + core_idx: CoreIndex, + count: u32, +} + +/// An indicator as to which end of the `OnDemandQueue` an assignment will be placed. +pub enum QueuePushDirection { + Back, + Front, +} + +/// Shorthand for the Balance type the runtime is using. +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + +/// Errors that can happen during spot traffic calculation. +#[derive(PartialEq)] +#[cfg_attr(feature = "std", derive(Debug))] +pub enum SpotTrafficCalculationErr { + /// The order queue capacity is at 0. + QueueCapacityIsZero, + /// The queue size is larger than the queue capacity. + QueueSizeLargerThanCapacity, + /// Arithmetic error during division, either division by 0 or over/underflow. + Division, +} + +#[frame_support::pallet] +pub mod pallet { + + use super::*; + + #[pallet::pallet] + #[pallet::without_storage_info] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + configuration::Config + paras::Config { + /// The runtime's definition of an event. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// The runtime's definition of a Currency. + type Currency: Currency; + + /// Something that provides the weight of this pallet. + type WeightInfo: WeightInfo; + + /// The default value for the spot traffic multiplier. + #[pallet::constant] + type TrafficDefaultValue: Get; + } + + /// Creates an empty spot traffic value if one isn't present in storage already. + #[pallet::type_value] + pub fn SpotTrafficOnEmpty() -> FixedU128 { + T::TrafficDefaultValue::get() + } + + /// Creates an empty on demand queue if one isn't present in storage already. + #[pallet::type_value] + pub fn OnDemandQueueOnEmpty() -> VecDeque { + VecDeque::new() + } + + /// Keeps track of the multiplier used to calculate the current spot price for the on demand + /// assigner. + #[pallet::storage] + pub(super) type SpotTraffic = + StorageValue<_, FixedU128, ValueQuery, SpotTrafficOnEmpty>; + + /// The order storage entry. Uses a VecDeque to be able to push to the front of the + /// queue from the scheduler on session boundaries. + #[pallet::storage] + pub type OnDemandQueue = + StorageValue<_, VecDeque, ValueQuery, OnDemandQueueOnEmpty>; + + /// Maps a `ParaId` to `CoreIndex` and keeps track of how many assignments the scheduler has in + /// it's lookahead. Keeping track of this affinity prevents parallel execution of the same + /// `ParaId` on two or more `CoreIndex`es. + #[pallet::storage] + pub(super) type ParaIdAffinity = + StorageMap<_, Twox256, ParaId, CoreAffinityCount, OptionQuery>; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// An order was placed at some spot price amount. + OnDemandOrderPlaced { para_id: ParaId, spot_price: BalanceOf }, + /// The value of the spot traffic multiplier changed. + SpotTrafficSet { traffic: FixedU128 }, + } + + #[pallet::error] + pub enum Error { + /// The `ParaId` supplied to the `place_order` call is not a valid `ParaThread`, making the + /// call is invalid. + InvalidParaId, + /// The order queue is full, `place_order` will not continue. + QueueFull, + /// The current spot price is higher than the max amount specified in the `place_order` + /// call, making it invalid. + SpotPriceHigherThanMaxAmount, + /// There are no on demand cores available. `place_order` will not add anything to the + /// queue. + NoOnDemandCores, + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(_now: BlockNumberFor) -> Weight { + let config = >::config(); + // Calculate spot price multiplier and store it. + let old_traffic = SpotTraffic::::get(); + match Self::calculate_spot_traffic( + old_traffic, + config.on_demand_queue_max_size, + Self::queue_size(), + config.on_demand_target_queue_utilization, + config.on_demand_fee_variability, + ) { + Ok(new_traffic) => { + // Only update storage on change + if new_traffic != old_traffic { + SpotTraffic::::set(new_traffic); + Pallet::::deposit_event(Event::::SpotTrafficSet { + traffic: new_traffic, + }); + return T::DbWeight::get().reads_writes(2, 1) + } + }, + Err(SpotTrafficCalculationErr::QueueCapacityIsZero) => { + log::debug!( + target: LOG_TARGET, + "Error calculating spot traffic: The order queue capacity is at 0." + ); + }, + Err(SpotTrafficCalculationErr::QueueSizeLargerThanCapacity) => { + log::debug!( + target: LOG_TARGET, + "Error calculating spot traffic: The queue size is larger than the queue capacity." + ); + }, + Err(SpotTrafficCalculationErr::Division) => { + log::debug!( + target: LOG_TARGET, + "Error calculating spot traffic: Arithmetic error during division, either division by 0 or over/underflow." + ); + }, + }; + T::DbWeight::get().reads_writes(2, 0) + } + } + + #[pallet::call] + impl Pallet { + /// Create a single on demand core order. + /// Will use the spot price for the current block and will reap the account if needed. + /// + /// Parameters: + /// - `origin`: The sender of the call, funds will be withdrawn from this account. + /// - `max_amount`: The maximum balance to withdraw from the origin to place an order. + /// - `para_id`: A `ParaId` the origin wants to provide blockspace for. + /// + /// Errors: + /// - `InsufficientBalance`: from the Currency implementation + /// - `InvalidParaId` + /// - `QueueFull` + /// - `SpotPriceHigherThanMaxAmount` + /// - `NoOnDemandCores` + /// + /// Events: + /// - `SpotOrderPlaced` + #[pallet::call_index(0)] + #[pallet::weight(::WeightInfo::place_order_allow_death(OnDemandQueue::::get().len() as u32))] + pub fn place_order_allow_death( + origin: OriginFor, + max_amount: BalanceOf, + para_id: ParaId, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + Pallet::::do_place_order(sender, max_amount, para_id, AllowDeath) + } + + /// Same as the [`place_order_allow_death`] call , but with a check that placing the order + /// will not reap the account. + /// + /// Parameters: + /// - `origin`: The sender of the call, funds will be withdrawn from this account. + /// - `max_amount`: The maximum balance to withdraw from the origin to place an order. + /// - `para_id`: A `ParaId` the origin wants to provide blockspace for. + /// + /// Errors: + /// - `InsufficientBalance`: from the Currency implementation + /// - `InvalidParaId` + /// - `QueueFull` + /// - `SpotPriceHigherThanMaxAmount` + /// - `NoOnDemandCores` + /// + /// Events: + /// - `SpotOrderPlaced` + #[pallet::call_index(1)] + #[pallet::weight(::WeightInfo::place_order_keep_alive(OnDemandQueue::::get().len() as u32))] + pub fn place_order_keep_alive( + origin: OriginFor, + max_amount: BalanceOf, + para_id: ParaId, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + Pallet::::do_place_order(sender, max_amount, para_id, KeepAlive) + } + } +} + +impl Pallet +where + BalanceOf: FixedPointOperand, +{ + /// Helper function for `place_order_*` calls. Used to differentiate between placing orders + /// with a keep alive check or to allow the account to be reaped. + /// + /// Parameters: + /// - `sender`: The sender of the call, funds will be withdrawn from this account. + /// - `max_amount`: The maximum balance to withdraw from the origin to place an order. + /// - `para_id`: A `ParaId` the origin wants to provide blockspace for. + /// - `existence_requirement`: Whether or not to ensure that the account will not be reaped. + /// + /// Errors: + /// - `InsufficientBalance`: from the Currency implementation + /// - `InvalidParaId` + /// - `QueueFull` + /// - `SpotPriceHigherThanMaxAmount` + /// - `NoOnDemandCores` + /// + /// Events: + /// - `SpotOrderPlaced` + fn do_place_order( + sender: ::AccountId, + max_amount: BalanceOf, + para_id: ParaId, + existence_requirement: ExistenceRequirement, + ) -> DispatchResult { + let config = >::config(); + + // Are there any schedulable cores in this session + ensure!(config.on_demand_cores > 0, Error::::NoOnDemandCores); + + // Traffic always falls back to 1.0 + let traffic = SpotTraffic::::get(); + + // Calculate spot price + let spot_price: BalanceOf = + traffic.saturating_mul_int(config.on_demand_base_fee.saturated_into::>()); + + // Is the current price higher than `max_amount` + ensure!(spot_price.le(&max_amount), Error::::SpotPriceHigherThanMaxAmount); + + // Charge the sending account the spot price + T::Currency::withdraw(&sender, spot_price, WithdrawReasons::FEE, existence_requirement)?; + + let assignment = Assignment::new(para_id); + + let res = Pallet::::add_on_demand_assignment(assignment, QueuePushDirection::Back); + + match res { + Ok(_) => { + Pallet::::deposit_event(Event::::OnDemandOrderPlaced { para_id, spot_price }); + return Ok(()) + }, + Err(err) => return Err(err), + } + } + + /// The spot price multiplier. This is based on the transaction fee calculations defined in: + /// https://research.web3.foundation/Polkadot/overview/token-economics#setting-transaction-fees + /// + /// Parameters: + /// - `traffic`: The previously calculated multiplier, can never go below 1.0. + /// - `queue_capacity`: The max size of the order book. + /// - `queue_size`: How many orders are currently in the order book. + /// - `target_queue_utilisation`: How much of the queue_capacity should be ideally occupied, + /// expressed in percentages(perbill). + /// - `variability`: A variability factor, i.e. how quickly the spot price adjusts. This number + /// can be chosen by p/(k*(1-s)) where p is the desired ratio increase in spot price over k + /// number of blocks. s is the target_queue_utilisation. A concrete example: v = + /// 0.05/(20*(1-0.25)) = 0.0033. + /// + /// Returns: + /// - A `FixedU128` in the range of `Config::TrafficDefaultValue` - `FixedU128::MAX` on + /// success. + /// + /// Errors: + /// - `SpotTrafficCalculationErr::QueueCapacityIsZero` + /// - `SpotTrafficCalculationErr::QueueSizeLargerThanCapacity` + /// - `SpotTrafficCalculationErr::Division` + pub(crate) fn calculate_spot_traffic( + traffic: FixedU128, + queue_capacity: u32, + queue_size: u32, + target_queue_utilisation: Perbill, + variability: Perbill, + ) -> Result { + // Return early if queue has no capacity. + if queue_capacity == 0 { + return Err(SpotTrafficCalculationErr::QueueCapacityIsZero) + } + + // Return early if queue size is greater than capacity. + if queue_size > queue_capacity { + return Err(SpotTrafficCalculationErr::QueueSizeLargerThanCapacity) + } + + // (queue_size / queue_capacity) - target_queue_utilisation + let queue_util_ratio = FixedU128::from_rational(queue_size.into(), queue_capacity.into()); + let positive = queue_util_ratio >= target_queue_utilisation.into(); + let queue_util_diff = queue_util_ratio.max(target_queue_utilisation.into()) - + queue_util_ratio.min(target_queue_utilisation.into()); + + // variability * queue_util_diff + let var_times_qud = queue_util_diff.saturating_mul(variability.into()); + + // variability^2 * queue_util_diff^2 + let var_times_qud_pow = var_times_qud.saturating_mul(var_times_qud); + + // (variability^2 * queue_util_diff^2)/2 + let div_by_two: FixedU128; + match var_times_qud_pow.const_checked_div(2.into()) { + Some(dbt) => div_by_two = dbt, + None => return Err(SpotTrafficCalculationErr::Division), + } + + // traffic * (1 + queue_util_diff) + div_by_two + if positive { + let new_traffic = queue_util_diff + .saturating_add(div_by_two) + .saturating_add(One::one()) + .saturating_mul(traffic); + Ok(new_traffic.max(::TrafficDefaultValue::get())) + } else { + let new_traffic = queue_util_diff.saturating_sub(div_by_two).saturating_mul(traffic); + Ok(new_traffic.max(::TrafficDefaultValue::get())) + } + } + + /// Adds an assignment to the on demand queue. + /// + /// Paramenters: + /// - `assignment`: The on demand assignment to add to the queue. + /// - `location`: Whether to push this entry to the back or the front of the queue. Pushing an + /// entry to the front of the queue is only used when the scheduler wants to push back an + /// entry it has already popped. + /// Returns: + /// - The unit type on success. + /// + /// Errors: + /// - `InvalidParaId` + /// - `QueueFull` + pub fn add_on_demand_assignment( + assignment: Assignment, + location: QueuePushDirection, + ) -> Result<(), DispatchError> { + // Only parathreads are valid paraids for on the go parachains. + ensure!(>::is_parathread(assignment.para_id), Error::::InvalidParaId); + + let config = >::config(); + + OnDemandQueue::::try_mutate(|queue| { + // Abort transaction if queue is too large + ensure!(Self::queue_size() < config.on_demand_queue_max_size, Error::::QueueFull); + match location { + QueuePushDirection::Back => queue.push_back(assignment), + QueuePushDirection::Front => queue.push_front(assignment), + }; + Ok(()) + }) + } + + /// Get the size of the on demand queue. + /// + /// Returns: + /// - The size of the on demand queue. + fn queue_size() -> u32 { + let config = >::config(); + match OnDemandQueue::::get().len().try_into() { + Ok(size) => return size, + Err(_) => { + log::debug!( + target: LOG_TARGET, + "Failed to fetch the on demand queue size, returning the max size." + ); + return config.on_demand_queue_max_size + }, + } + } + + /// Getter for the order queue. + pub fn get_queue() -> VecDeque { + OnDemandQueue::::get() + } + + /// Getter for the affinity tracker. + pub fn get_affinity_map(para_id: ParaId) -> Option { + ParaIdAffinity::::get(para_id) + } + + /// Decreases the affinity of a `ParaId` to a specified `CoreIndex`. + /// Subtracts from the count of the `CoreAffinityCount` if an entry is found and the core_idx + /// matches. When the count reaches 0, the entry is removed. + /// A non-existant entry is a no-op. + fn decrease_affinity(para_id: ParaId, core_idx: CoreIndex) { + ParaIdAffinity::::mutate(para_id, |maybe_affinity| { + if let Some(affinity) = maybe_affinity { + if affinity.core_idx == core_idx { + let new_count = affinity.count.saturating_sub(1); + if new_count > 0 { + *maybe_affinity = Some(CoreAffinityCount { core_idx, count: new_count }); + } else { + *maybe_affinity = None; + } + } + } + }); + } + + /// Increases the affinity of a `ParaId` to a specified `CoreIndex`. + /// Adds to the count of the `CoreAffinityCount` if an entry is found and the core_idx matches. + /// A non-existant entry will be initialized with a count of 1 and uses the supplied + /// `CoreIndex`. + fn increase_affinity(para_id: ParaId, core_idx: CoreIndex) { + ParaIdAffinity::::mutate(para_id, |maybe_affinity| match maybe_affinity { + Some(affinity) => + if affinity.core_idx == core_idx { + *maybe_affinity = Some(CoreAffinityCount { + core_idx, + count: affinity.count.saturating_add(1), + }); + }, + None => { + *maybe_affinity = Some(CoreAffinityCount { core_idx, count: 1 }); + }, + }) + } +} + +impl AssignmentProvider> for Pallet { + fn session_core_count() -> u32 { + let config = >::config(); + config.on_demand_cores + } + + /// Take the next queued entry that is available for a given core index. + /// Invalidates and removes orders with a `para_id` that is not `ParaLifecycle::Parathread` + /// but only in [0..P] range slice of the order queue, where P is the element that is + /// removed from the order queue. + /// + /// Parameters: + /// - `core_idx`: The core index + /// - `previous_paraid`: Which paraid was previously processed on the requested core. Is None if + /// nothing was processed on the core. + fn pop_assignment_for_core( + core_idx: CoreIndex, + previous_para: Option, + ) -> Option { + // Only decrease the affinity of the previous para if it exists. + // A nonexistant `ParaId` indicates that the scheduler has not processed any + // `ParaId` this session. + if let Some(previous_para_id) = previous_para { + Pallet::::decrease_affinity(previous_para_id, core_idx) + } + + let mut queue: VecDeque = OnDemandQueue::::get(); + + let mut invalidated_para_id_indexes: Vec = vec![]; + + // Get the position of the next `ParaId`. Select either a valid `ParaId` that has an + // affinity to the same `CoreIndex` as the scheduler asks for or a valid `ParaId` with no + // affinity at all. + let pos = queue.iter().enumerate().position(|(index, assignment)| { + if >::is_parathread(assignment.para_id) { + match ParaIdAffinity::::get(&assignment.para_id) { + Some(affinity) => return affinity.core_idx == core_idx, + None => return true, + } + } + // Record no longer valid para_ids. + invalidated_para_id_indexes.push(index); + return false + }); + + // Collect the popped value. + let popped = pos.and_then(|p: usize| { + if let Some(assignment) = queue.remove(p) { + Pallet::::increase_affinity(assignment.para_id, core_idx); + return Some(assignment) + }; + None + }); + + // Only remove the invalid indexes *after* using the index. + // Removed in reverse order so that the indexes don't shift. + invalidated_para_id_indexes.iter().rev().for_each(|idx| { + queue.remove(*idx); + }); + + // Write changes to storage. + OnDemandQueue::::set(queue); + + popped + } + + /// Push an assignment back to the queue. + /// Typically used on session boundaries. + /// Parameters: + /// - `core_idx`: The core index + /// - `assignment`: The on demand assignment. + fn push_assignment_for_core(core_idx: CoreIndex, assignment: Assignment) { + Pallet::::decrease_affinity(assignment.para_id, core_idx); + // Skip the queue on push backs from scheduler + match Pallet::::add_on_demand_assignment(assignment, QueuePushDirection::Front) { + Ok(_) => {}, + Err(_) => {}, + } + } + + fn get_provider_config(_core_idx: CoreIndex) -> AssignmentProviderConfig> { + let config = >::config(); + AssignmentProviderConfig { + availability_period: config.paras_availability_period, + max_availability_timeouts: config.on_demand_retries, + ttl: config.on_demand_ttl, + } + } +} diff --git a/runtime/parachains/src/assigner_on_demand/tests.rs b/runtime/parachains/src/assigner_on_demand/tests.rs new file mode 100644 index 000000000000..8041179cd90c --- /dev/null +++ b/runtime/parachains/src/assigner_on_demand/tests.rs @@ -0,0 +1,558 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use super::*; + +use crate::{ + assigner_on_demand::{mock_helpers::GenesisConfigBuilder, Error}, + initializer::SessionChangeNotification, + mock::{ + new_test_ext, Balances, OnDemandAssigner, Paras, ParasShared, RuntimeOrigin, Scheduler, + System, Test, + }, + paras::{ParaGenesisArgs, ParaKind}, +}; +use frame_support::{assert_noop, assert_ok, error::BadOrigin}; +use pallet_balances::Error as BalancesError; +use primitives::{ + v5::{Assignment, ValidationCode}, + BlockNumber, SessionIndex, +}; +use sp_std::collections::btree_map::BTreeMap; + +fn schedule_blank_para(id: ParaId, parakind: ParaKind) { + let validation_code: ValidationCode = vec![1, 2, 3].into(); + assert_ok!(Paras::schedule_para_initialize( + id, + ParaGenesisArgs { + genesis_head: Vec::new().into(), + validation_code: validation_code.clone(), + para_kind: parakind, + } + )); + + assert_ok!(Paras::add_trusted_validation_code(RuntimeOrigin::root(), validation_code)); +} + +fn run_to_block( + to: BlockNumber, + new_session: impl Fn(BlockNumber) -> Option>, +) { + while System::block_number() < to { + let b = System::block_number(); + + Scheduler::initializer_finalize(); + Paras::initializer_finalize(b); + + if let Some(notification) = new_session(b + 1) { + let mut notification_with_session_index = notification; + // We will make every session change trigger an action queue. Normally this may require + // 2 or more session changes. + if notification_with_session_index.session_index == SessionIndex::default() { + notification_with_session_index.session_index = ParasShared::scheduled_session(); + } + Paras::initializer_on_new_session(¬ification_with_session_index); + Scheduler::initializer_on_new_session(¬ification_with_session_index); + } + + System::on_finalize(b); + + System::on_initialize(b + 1); + System::set_block_number(b + 1); + + Paras::initializer_initialize(b + 1); + Scheduler::initializer_initialize(b + 1); + + // In the real runtime this is expected to be called by the `InclusionInherent` pallet. + Scheduler::update_claimqueue(BTreeMap::new(), b + 1); + } +} + +#[test] +fn spot_traffic_capacity_zero_returns_none() { + match OnDemandAssigner::calculate_spot_traffic( + FixedU128::from(u128::MAX), + 0u32, + u32::MAX, + Perbill::from_percent(100), + Perbill::from_percent(1), + ) { + Ok(_) => panic!("Error"), + Err(e) => assert_eq!(e, SpotTrafficCalculationErr::QueueCapacityIsZero), + }; +} + +#[test] +fn spot_traffic_queue_size_larger_than_capacity_returns_none() { + match OnDemandAssigner::calculate_spot_traffic( + FixedU128::from(u128::MAX), + 1u32, + 2u32, + Perbill::from_percent(100), + Perbill::from_percent(1), + ) { + Ok(_) => panic!("Error"), + Err(e) => assert_eq!(e, SpotTrafficCalculationErr::QueueSizeLargerThanCapacity), + } +} + +#[test] +fn spot_traffic_calculation_identity() { + match OnDemandAssigner::calculate_spot_traffic( + FixedU128::from_u32(1), + 1000, + 100, + Perbill::from_percent(10), + Perbill::from_percent(3), + ) { + Ok(res) => { + assert_eq!(res, FixedU128::from_u32(1)) + }, + _ => (), + } +} + +#[test] +fn spot_traffic_calculation_u32_max() { + match OnDemandAssigner::calculate_spot_traffic( + FixedU128::from_u32(1), + u32::MAX, + u32::MAX, + Perbill::from_percent(100), + Perbill::from_percent(3), + ) { + Ok(res) => { + assert_eq!(res, FixedU128::from_u32(1)) + }, + _ => panic!("Error"), + }; +} + +#[test] +fn spot_traffic_calculation_u32_traffic_max() { + match OnDemandAssigner::calculate_spot_traffic( + FixedU128::from(u128::MAX), + u32::MAX, + u32::MAX, + Perbill::from_percent(1), + Perbill::from_percent(1), + ) { + Ok(res) => assert_eq!(res, FixedU128::from(u128::MAX)), + _ => panic!("Error"), + }; +} + +#[test] +fn sustained_target_increases_spot_traffic() { + let mut traffic = FixedU128::from_u32(1u32); + for _ in 0..50 { + traffic = OnDemandAssigner::calculate_spot_traffic( + traffic, + 100, + 12, + Perbill::from_percent(10), + Perbill::from_percent(100), + ) + .unwrap() + } + assert_eq!(traffic, FixedU128::from_inner(2_718_103_312_071_174_015u128)) +} + +#[test] +fn spot_traffic_can_decrease() { + let traffic = FixedU128::from_u32(100u32); + match OnDemandAssigner::calculate_spot_traffic( + traffic, + 100u32, + 0u32, + Perbill::from_percent(100), + Perbill::from_percent(100), + ) { + Ok(new_traffic) => + assert_eq!(new_traffic, FixedU128::from_inner(50_000_000_000_000_000_000u128)), + _ => panic!("Error"), + } +} + +#[test] +fn spot_traffic_decreases_over_time() { + let mut traffic = FixedU128::from_u32(100u32); + for _ in 0..5 { + traffic = OnDemandAssigner::calculate_spot_traffic( + traffic, + 100u32, + 0u32, + Perbill::from_percent(100), + Perbill::from_percent(100), + ) + .unwrap(); + println!("{traffic}"); + } + assert_eq!(traffic, FixedU128::from_inner(3_125_000_000_000_000_000u128)) +} + +#[test] +fn place_order_works() { + let alice = 1u64; + let amt = 10_000_000u128; + let para_id = ParaId::from(111); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + // Initialize the parathread and wait for it to be ready. + schedule_blank_para(para_id, ParaKind::Parathread); + + assert!(!Paras::is_parathread(para_id)); + + run_to_block(100, |n| if n == 100 { Some(Default::default()) } else { None }); + + assert!(Paras::is_parathread(para_id)); + + // Does not work unsigned + assert_noop!( + OnDemandAssigner::place_order_allow_death(RuntimeOrigin::none(), amt, para_id), + BadOrigin + ); + + // Does not work with max_amount lower than fee + let low_max_amt = 1u128; + assert_noop!( + OnDemandAssigner::place_order_allow_death( + RuntimeOrigin::signed(alice), + low_max_amt, + para_id, + ), + Error::::SpotPriceHigherThanMaxAmount, + ); + + // Does not work with insufficient balance + assert_noop!( + OnDemandAssigner::place_order_allow_death(RuntimeOrigin::signed(alice), amt, para_id), + BalancesError::::InsufficientBalance + ); + + // Works + Balances::make_free_balance_be(&alice, amt); + run_to_block(101, |n| if n == 101 { Some(Default::default()) } else { None }); + assert_ok!(OnDemandAssigner::place_order_allow_death( + RuntimeOrigin::signed(alice), + amt, + para_id + )); + }); +} + +#[test] +fn place_order_keep_alive_keeps_alive() { + let alice = 1u64; + let amt = 1u128; // The same as crate::mock's EXISTENTIAL_DEPOSIT + let max_amt = 10_000_000u128; + let para_id = ParaId::from(111); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + // Initialize the parathread and wait for it to be ready. + schedule_blank_para(para_id, ParaKind::Parathread); + Balances::make_free_balance_be(&alice, amt); + + assert!(!Paras::is_parathread(para_id)); + run_to_block(100, |n| if n == 100 { Some(Default::default()) } else { None }); + assert!(Paras::is_parathread(para_id)); + + assert_noop!( + OnDemandAssigner::place_order_keep_alive( + RuntimeOrigin::signed(alice), + max_amt, + para_id + ), + BalancesError::::InsufficientBalance + ); + }); +} + +#[test] +fn add_on_demand_assignment_works() { + let para_a = ParaId::from(111); + let assignment = Assignment::new(para_a); + + let mut genesis = GenesisConfigBuilder::default(); + genesis.on_demand_max_queue_size = 1; + new_test_ext(genesis.build()).execute_with(|| { + // Initialize the parathread and wait for it to be ready. + schedule_blank_para(para_a, ParaKind::Parathread); + + // `para_a` is not onboarded as a parathread yet. + assert_noop!( + OnDemandAssigner::add_on_demand_assignment( + assignment.clone(), + QueuePushDirection::Back + ), + Error::::InvalidParaId + ); + + assert!(!Paras::is_parathread(para_a)); + run_to_block(100, |n| if n == 100 { Some(Default::default()) } else { None }); + assert!(Paras::is_parathread(para_a)); + + // `para_a` is now onboarded as a valid parathread. + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment.clone(), + QueuePushDirection::Back + )); + + // Max queue size is 1, queue should be full. + assert_noop!( + OnDemandAssigner::add_on_demand_assignment(assignment, QueuePushDirection::Back), + Error::::QueueFull + ); + }); +} + +#[test] +fn spotqueue_push_directions() { + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + let para_a = ParaId::from(111); + let para_b = ParaId::from(222); + let para_c = ParaId::from(333); + + schedule_blank_para(para_a, ParaKind::Parathread); + schedule_blank_para(para_b, ParaKind::Parathread); + schedule_blank_para(para_c, ParaKind::Parathread); + + run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); + + let assignment_a = Assignment { para_id: para_a }; + let assignment_b = Assignment { para_id: para_b }; + let assignment_c = Assignment { para_id: para_c }; + + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_a.clone(), + QueuePushDirection::Front + )); + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_b.clone(), + QueuePushDirection::Front + )); + + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_c.clone(), + QueuePushDirection::Back + )); + + assert_eq!(OnDemandAssigner::queue_size(), 3); + assert_eq!( + OnDemandAssigner::get_queue(), + VecDeque::from(vec![assignment_b, assignment_a, assignment_c]) + ) + }); +} + +#[test] +fn affinity_changes_work() { + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + let para_a = ParaId::from(111); + schedule_blank_para(para_a, ParaKind::Parathread); + + run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); + + let assignment_a = Assignment { para_id: para_a }; + // There should be no affinity before starting. + assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); + + // Add enough assignments to the order queue. + for _ in 0..10 { + OnDemandAssigner::add_on_demand_assignment( + assignment_a.clone(), + QueuePushDirection::Front, + ) + .expect("Invalid paraid or queue full"); + } + + // There should be no affinity before the scheduler pops. + assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); + + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), None); + + // Affinity count is 1 after popping. + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 1); + + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(para_a)); + + // Affinity count is 1 after popping with a previous para. + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 1); + assert_eq!(OnDemandAssigner::queue_size(), 8); + + for _ in 0..3 { + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), None); + } + + // Affinity count is 4 after popping 3 times without a previous para. + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 4); + assert_eq!(OnDemandAssigner::queue_size(), 5); + + for _ in 0..5 { + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(para_a)); + } + + // Affinity count should still be 4 but queue should be empty. + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 4); + assert_eq!(OnDemandAssigner::queue_size(), 0); + + // Pop 4 times and get to exactly 0 (None) affinity. + for _ in 0..4 { + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(para_a)); + } + assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); + + // Decreasing affinity beyond 0 should still be None. + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(para_a)); + assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); + }); +} + +#[test] +fn affinity_prohibits_parallel_scheduling() { + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + let para_a = ParaId::from(111); + let para_b = ParaId::from(222); + + schedule_blank_para(para_a, ParaKind::Parathread); + schedule_blank_para(para_b, ParaKind::Parathread); + + run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); + + let assignment_a = Assignment { para_id: para_a }; + let assignment_b = Assignment { para_id: para_b }; + + // There should be no affinity before starting. + assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); + assert!(OnDemandAssigner::get_affinity_map(para_b).is_none()); + + // Add 2 assignments for para_a for every para_b. + OnDemandAssigner::add_on_demand_assignment(assignment_a.clone(), QueuePushDirection::Back) + .expect("Invalid paraid or queue full"); + + OnDemandAssigner::add_on_demand_assignment(assignment_a.clone(), QueuePushDirection::Back) + .expect("Invalid paraid or queue full"); + + OnDemandAssigner::add_on_demand_assignment(assignment_b.clone(), QueuePushDirection::Back) + .expect("Invalid paraid or queue full"); + + assert_eq!(OnDemandAssigner::queue_size(), 3); + + // Approximate having 1 core. + for _ in 0..3 { + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), None); + } + + // Affinity on one core is meaningless. + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 2); + assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().count, 1); + assert_eq!( + OnDemandAssigner::get_affinity_map(para_a).unwrap().core_idx, + OnDemandAssigner::get_affinity_map(para_b).unwrap().core_idx + ); + + // Clear affinity + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(para_a)); + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(para_a)); + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(para_b)); + + // Add 2 assignments for para_a for every para_b. + OnDemandAssigner::add_on_demand_assignment(assignment_a.clone(), QueuePushDirection::Back) + .expect("Invalid paraid or queue full"); + + OnDemandAssigner::add_on_demand_assignment(assignment_a.clone(), QueuePushDirection::Back) + .expect("Invalid paraid or queue full"); + + OnDemandAssigner::add_on_demand_assignment(assignment_b.clone(), QueuePushDirection::Back) + .expect("Invalid paraid or queue full"); + + // Approximate having 2 cores. + for _ in 0..3 { + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), None); + OnDemandAssigner::pop_assignment_for_core(CoreIndex(1), None); + } + + // Affinity should be the same as before, but on different cores. + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 2); + assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().count, 1); + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().core_idx, CoreIndex(0)); + assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().core_idx, CoreIndex(1)); + }); +} + +#[test] +fn cannot_place_order_when_no_on_demand_cores() { + let mut genesis = GenesisConfigBuilder::default(); + genesis.on_demand_cores = 0; + let para_id = ParaId::from(10); + let alice = 1u64; + let amt = 10_000_000u128; + + new_test_ext(genesis.build()).execute_with(|| { + schedule_blank_para(para_id, ParaKind::Parathread); + Balances::make_free_balance_be(&alice, amt); + + assert!(!Paras::is_parathread(para_id)); + + run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); + + assert!(Paras::is_parathread(para_id)); + + assert_noop!( + OnDemandAssigner::place_order_allow_death(RuntimeOrigin::signed(alice), amt, para_id), + Error::::NoOnDemandCores + ); + }); +} + +#[test] +fn on_demand_orders_cannot_be_popped_if_lifecycle_changes() { + let para_id = ParaId::from(10); + let assignment = Assignment { para_id }; + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + // Register the para_id as a parathread + schedule_blank_para(para_id, ParaKind::Parathread); + + assert!(!Paras::is_parathread(para_id)); + run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); + assert!(Paras::is_parathread(para_id)); + + // Add two assignments for a para_id with a valid lifecycle. + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment.clone(), + QueuePushDirection::Back + )); + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment.clone(), + QueuePushDirection::Back + )); + + // First pop is fine + assert!(OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), None) == Some(assignment)); + + // Deregister para + assert_ok!(Paras::schedule_para_cleanup(para_id)); + + // Run to new session and verify that para_id is no longer a valid parathread. + assert!(Paras::is_parathread(para_id)); + run_to_block(20, |n| if n == 20 { Some(Default::default()) } else { None }); + assert!(!Paras::is_parathread(para_id)); + + // Second pop should be None. + assert!(OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(para_id)) == None); + }); +} diff --git a/runtime/parachains/src/assigner_parachains.rs b/runtime/parachains/src/assigner_parachains.rs new file mode 100644 index 000000000000..9a6b970597d5 --- /dev/null +++ b/runtime/parachains/src/assigner_parachains.rs @@ -0,0 +1,70 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The bulk (parachain slot auction) blockspace assignment provider. +//! This provider is tightly coupled with the configuration and paras modules. + +use crate::{ + configuration, paras, + scheduler::common::{AssignmentProvider, AssignmentProviderConfig}, +}; +use frame_system::pallet_prelude::BlockNumberFor; +pub use pallet::*; +use primitives::{v5::Assignment, CoreIndex, Id as ParaId}; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + #[pallet::pallet] + #[pallet::without_storage_info] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + configuration::Config + paras::Config {} +} + +impl AssignmentProvider> for Pallet { + fn session_core_count() -> u32 { + >::parachains().len() as u32 + } + + fn pop_assignment_for_core( + core_idx: CoreIndex, + _concluded_para: Option, + ) -> Option { + >::parachains() + .get(core_idx.0 as usize) + .copied() + .map(|para_id| Assignment::new(para_id)) + } + + /// Bulk assignment has no need to push the assignment back on a session change, + /// this is a no-op in the case of a bulk assignment slot. + fn push_assignment_for_core(_: CoreIndex, _: Assignment) {} + + fn get_provider_config(_core_idx: CoreIndex) -> AssignmentProviderConfig> { + let config = >::config(); + AssignmentProviderConfig { + availability_period: config.paras_availability_period, + // The next assignment already goes to the same [`ParaId`], no timeout tracking needed. + max_availability_timeouts: 0, + // The next assignment already goes to the same [`ParaId`], this can be any number + // that's high enough to clear the time it takes to clear backing/availability. + ttl: BlockNumberFor::::from(10u32), + } + } +} diff --git a/runtime/parachains/src/builder.rs b/runtime/parachains/src/builder.rs index 3f95b2087e6c..4921af5bedda 100644 --- a/runtime/parachains/src/builder.rs +++ b/runtime/parachains/src/builder.rs @@ -17,20 +17,22 @@ use crate::{ configuration, inclusion, initializer, paras, paras::ParaKind, - paras_inherent::{self}, - scheduler, session_info, shared, + paras_inherent, + scheduler::{self, common::AssignmentProviderConfig}, + session_info, shared, }; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; use primitives::{ - collator_signature_payload, AvailabilityBitfield, BackedCandidate, CandidateCommitments, - CandidateDescriptor, CandidateHash, CollatorId, CollatorSignature, CommittedCandidateReceipt, - CompactStatement, CoreIndex, CoreOccupied, DisputeStatement, DisputeStatementSet, GroupIndex, - HeadData, Id as ParaId, IndexedVec, InherentData as ParachainsInherentData, - InvalidDisputeStatementKind, PersistedValidationData, SessionIndex, SigningContext, - UncheckedSigned, ValidDisputeStatementKind, ValidationCode, ValidatorId, ValidatorIndex, - ValidityAttestation, + collator_signature_payload, + v5::{Assignment, ParasEntry}, + AvailabilityBitfield, BackedCandidate, CandidateCommitments, CandidateDescriptor, + CandidateHash, CollatorId, CollatorSignature, CommittedCandidateReceipt, CompactStatement, + CoreIndex, CoreOccupied, DisputeStatement, DisputeStatementSet, GroupIndex, HeadData, + Id as ParaId, IndexedVec, InherentData as ParachainsInherentData, InvalidDisputeStatementKind, + PersistedValidationData, SessionIndex, SigningContext, UncheckedSigned, + ValidDisputeStatementKind, ValidationCode, ValidatorId, ValidatorIndex, ValidityAttestation, }; use sp_core::{sr25519, H256}; use sp_runtime::{ @@ -689,13 +691,22 @@ impl BenchBuilder { ); assert_eq!(inclusion::PendingAvailability::::iter().count(), used_cores as usize,); - // Mark all the used cores as occupied. We expect that their are + // Mark all the used cores as occupied. We expect that there are // `backed_and_concluding_cores` that are pending availability and that there are // `used_cores - backed_and_concluding_cores ` which are about to be disputed. - scheduler::AvailabilityCores::::set(vec![ - Some(CoreOccupied::Parachain); - used_cores as usize - ]); + let now = >::block_number() + One::one(); + let cores = (0..used_cores) + .into_iter() + .map(|i| { + let AssignmentProviderConfig { ttl, .. } = + scheduler::Pallet::::assignment_provider_config(CoreIndex(i)); + CoreOccupied::Paras(ParasEntry::new( + Assignment::new(ParaId::from(i as u32)), + now + ttl, + )) + }) + .collect(); + scheduler::AvailabilityCores::::set(cores); Bench:: { data: ParachainsInherentData { diff --git a/runtime/parachains/src/configuration.rs b/runtime/parachains/src/configuration.rs index 0631b280aadd..03d1ae420495 100644 --- a/runtime/parachains/src/configuration.rs +++ b/runtime/parachains/src/configuration.rs @@ -25,9 +25,9 @@ use parity_scale_codec::{Decode, Encode}; use polkadot_parachain::primitives::{MAX_HORIZONTAL_MESSAGE_NUM, MAX_UPWARD_MESSAGE_NUM}; use primitives::{ vstaging::AsyncBackingParams, Balance, ExecutorParams, SessionIndex, MAX_CODE_SIZE, - MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, + MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; -use sp_runtime::traits::Zero; +use sp_runtime::{traits::Zero, Perbill}; use sp_std::prelude::*; #[cfg(test)] @@ -42,7 +42,7 @@ pub use pallet::*; const LOG_TARGET: &str = "runtime::configuration"; -/// All configuration of the runtime with respect to parachains and parathreads. +/// All configuration of the runtime with respect to paras. #[derive( Clone, Encode, @@ -113,10 +113,9 @@ pub struct HostConfiguration { /// been completed. /// /// Note, there are situations in which `expected_at` in the past. For example, if - /// [`chain_availability_period`] or [`thread_availability_period`] is less than the delay set - /// by this field or if PVF pre-check took more time than the delay. In such cases, the upgrade - /// is further at the earliest possible time determined by - /// [`minimum_validation_upgrade_delay`]. + /// [`paras_availability_period`] is less than the delay set by + /// this field or if PVF pre-check took more time than the delay. In such cases, the upgrade is + /// further at the earliest possible time determined by [`minimum_validation_upgrade_delay`]. /// /// The rationale for this delay has to do with relay-chain reversions. In case there is an /// invalid candidate produced with the new version of the code, then the relay-chain can @@ -143,8 +142,6 @@ pub struct HostConfiguration { pub max_downward_message_size: u32, /// The maximum number of outbound HRMP channels a parachain is allowed to open. pub hrmp_max_parachain_outbound_channels: u32, - /// The maximum number of outbound HRMP channels a parathread is allowed to open. - pub hrmp_max_parathread_outbound_channels: u32, /// The deposit that the sender should provide for opening an HRMP channel. pub hrmp_sender_deposit: Balance, /// The deposit that the recipient should provide for accepting opening an HRMP channel. @@ -155,8 +152,6 @@ pub struct HostConfiguration { pub hrmp_channel_max_total_size: u32, /// The maximum number of inbound HRMP channels a parachain is allowed to accept. pub hrmp_max_parachain_inbound_channels: u32, - /// The maximum number of inbound HRMP channels a parathread is allowed to accept. - pub hrmp_max_parathread_inbound_channels: u32, /// The maximum size of a message that could ever be put into an HRMP channel. /// /// This parameter affects the upper bound of size of `CandidateCommitments`. @@ -171,26 +166,34 @@ pub struct HostConfiguration { /// How long to keep code on-chain, in blocks. This should be sufficiently long that disputes /// have concluded. pub code_retention_period: BlockNumber, - /// The amount of execution cores to dedicate to parathread execution. - pub parathread_cores: u32, - /// The number of retries that a parathread author has to submit their block. - pub parathread_retries: u32, + /// The amount of execution cores to dedicate to on demand execution. + pub on_demand_cores: u32, + /// The number of retries that a on demand author has to submit their block. + pub on_demand_retries: u32, + /// The maximum queue size of the pay as you go module. + pub on_demand_queue_max_size: u32, + /// The target utilization of the spot price queue in percentages. + pub on_demand_target_queue_utilization: Perbill, + /// How quickly the fee rises in reaction to increased utilization. + /// The lower the number the slower the increase. + pub on_demand_fee_variability: Perbill, + /// The minimum amount needed to claim a slot in the spot pricing queue. + pub on_demand_base_fee: Balance, + /// The number of blocks an on demand claim stays in the scheduler's claimqueue before getting + /// cleared. This number should go reasonably higher than the number of blocks in the async + /// backing lookahead. + pub on_demand_ttl: BlockNumber, /// How often parachain groups should be rotated across parachains. /// /// Must be non-zero. pub group_rotation_frequency: BlockNumber, - /// The availability period, in blocks, for parachains. This is the amount of blocks + /// The availability period, in blocks. This is the amount of blocks /// after inclusion that validators have to make the block available and signal its /// availability to the chain. /// /// Must be at least 1. - pub chain_availability_period: BlockNumber, - /// The availability period, in blocks, for parathreads. Same as the - /// `chain_availability_period`, but a differing timeout due to differing requirements. - /// - /// Must be at least 1. - pub thread_availability_period: BlockNumber, - /// The amount of blocks ahead to schedule parachains and parathreads. + pub paras_availability_period: BlockNumber, + /// The amount of blocks ahead to schedule paras. pub scheduling_lookahead: u32, /// The maximum number of validators to have per core. /// @@ -237,8 +240,7 @@ pub struct HostConfiguration { /// To prevent that, we introduce the minimum number of blocks after which the upgrade can be /// scheduled. This number is controlled by this field. /// - /// This value should be greater than [`chain_availability_period`] and - /// [`thread_availability_period`]. + /// This value should be greater than [`paras_availability_period`]. pub minimum_validation_upgrade_delay: BlockNumber, } @@ -250,8 +252,7 @@ impl> Default for HostConfiguration> Default for HostConfiguration> Default for HostConfiguration> Default for HostConfiguration { /// `group_rotation_frequency` is set to zero. ZeroGroupRotationFrequency, - /// `chain_availability_period` is set to zero. - ZeroChainAvailabilityPeriod, - /// `thread_availability_period` is set to zero. - ZeroThreadAvailabilityPeriod, + /// `paras_availability_period` is set to zero. + ZeroParasAvailabilityPeriod, /// `no_show_slots` is set to zero. ZeroNoShowSlots, /// `max_code_size` exceeds the hard limit of `MAX_CODE_SIZE`. @@ -309,15 +311,10 @@ pub enum InconsistentError { MaxHeadDataSizeExceedHardLimit { max_head_data_size: u32 }, /// `max_pov_size` exceeds the hard limit of `MAX_POV_SIZE`. MaxPovSizeExceedHardLimit { max_pov_size: u32 }, - /// `minimum_validation_upgrade_delay` is less than `chain_availability_period`. + /// `minimum_validation_upgrade_delay` is less than `paras_availability_period`. MinimumValidationUpgradeDelayLessThanChainAvailabilityPeriod { minimum_validation_upgrade_delay: BlockNumber, - chain_availability_period: BlockNumber, - }, - /// `minimum_validation_upgrade_delay` is less than `thread_availability_period`. - MinimumValidationUpgradeDelayLessThanThreadAvailabilityPeriod { - minimum_validation_upgrade_delay: BlockNumber, - thread_availability_period: BlockNumber, + paras_availability_period: BlockNumber, }, /// `validation_upgrade_delay` is less than or equal 1. ValidationUpgradeDelayIsTooLow { validation_upgrade_delay: BlockNumber }, @@ -349,12 +346,8 @@ where return Err(ZeroGroupRotationFrequency) } - if self.chain_availability_period.is_zero() { - return Err(ZeroChainAvailabilityPeriod) - } - - if self.thread_availability_period.is_zero() { - return Err(ZeroThreadAvailabilityPeriod) + if self.paras_availability_period.is_zero() { + return Err(ZeroParasAvailabilityPeriod) } if self.no_show_slots.is_zero() { @@ -375,15 +368,10 @@ where return Err(MaxPovSizeExceedHardLimit { max_pov_size: self.max_pov_size }) } - if self.minimum_validation_upgrade_delay <= self.chain_availability_period { + if self.minimum_validation_upgrade_delay <= self.paras_availability_period { return Err(MinimumValidationUpgradeDelayLessThanChainAvailabilityPeriod { minimum_validation_upgrade_delay: self.minimum_validation_upgrade_delay.clone(), - chain_availability_period: self.chain_availability_period.clone(), - }) - } else if self.minimum_validation_upgrade_delay <= self.thread_availability_period { - return Err(MinimumValidationUpgradeDelayLessThanThreadAvailabilityPeriod { - minimum_validation_upgrade_delay: self.minimum_validation_upgrade_delay.clone(), - thread_availability_period: self.thread_availability_period.clone(), + paras_availability_period: self.paras_availability_period.clone(), }) } @@ -442,6 +430,7 @@ pub trait WeightInfo { fn set_config_with_balance() -> Weight; fn set_hrmp_open_request_ttl() -> Weight; fn set_config_with_executor_params() -> Weight; + fn set_config_with_perbill() -> Weight; } pub struct TestWeightInfo; @@ -464,6 +453,9 @@ impl WeightInfo for TestWeightInfo { fn set_config_with_executor_params() -> Weight { Weight::MAX } + fn set_config_with_perbill() -> Weight { + Weight::MAX + } } #[frame_support::pallet] @@ -481,7 +473,8 @@ pub mod pallet { /// + /// v5-v6: (remove UMP dispatch queue) /// v6-v7: - const STORAGE_VERSION: StorageVersion = StorageVersion::new(7); + /// v7-v8: + const STORAGE_VERSION: StorageVersion = StorageVersion::new(8); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -626,29 +619,29 @@ pub mod pallet { }) } - /// Set the number of parathread execution cores. + /// Set the number of on demand execution cores. #[pallet::call_index(6)] #[pallet::weight(( T::WeightInfo::set_config_with_u32(), DispatchClass::Operational, ))] - pub fn set_parathread_cores(origin: OriginFor, new: u32) -> DispatchResult { + pub fn set_on_demand_cores(origin: OriginFor, new: u32) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.parathread_cores = new; + config.on_demand_cores = new; }) } - /// Set the number of retries for a particular parathread. + /// Set the number of retries for a particular on demand. #[pallet::call_index(7)] #[pallet::weight(( T::WeightInfo::set_config_with_u32(), DispatchClass::Operational, ))] - pub fn set_parathread_retries(origin: OriginFor, new: u32) -> DispatchResult { + pub fn set_on_demand_retries(origin: OriginFor, new: u32) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.parathread_retries = new; + config.on_demand_retries = new; }) } @@ -668,35 +661,19 @@ pub mod pallet { }) } - /// Set the availability period for parachains. + /// Set the availability period for paras. #[pallet::call_index(9)] #[pallet::weight(( T::WeightInfo::set_config_with_block_number(), DispatchClass::Operational, ))] - pub fn set_chain_availability_period( - origin: OriginFor, - new: BlockNumberFor, - ) -> DispatchResult { - ensure_root(origin)?; - Self::schedule_config_update(|config| { - config.chain_availability_period = new; - }) - } - - /// Set the availability period for parathreads. - #[pallet::call_index(10)] - #[pallet::weight(( - T::WeightInfo::set_config_with_block_number(), - DispatchClass::Operational, - ))] - pub fn set_thread_availability_period( + pub fn set_paras_availability_period( origin: OriginFor, new: BlockNumberFor, ) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.thread_availability_period = new; + config.paras_availability_period = new; }) } @@ -989,22 +966,6 @@ pub mod pallet { }) } - /// Sets the maximum number of inbound HRMP channels a parathread is allowed to accept. - #[pallet::call_index(35)] - #[pallet::weight(( - T::WeightInfo::set_config_with_u32(), - DispatchClass::Operational, - ))] - pub fn set_hrmp_max_parathread_inbound_channels( - origin: OriginFor, - new: u32, - ) -> DispatchResult { - ensure_root(origin)?; - Self::schedule_config_update(|config| { - config.hrmp_max_parathread_inbound_channels = new; - }) - } - /// Sets the maximum size of a message that could ever be put into an HRMP channel. #[pallet::call_index(36)] #[pallet::weight(( @@ -1034,22 +995,6 @@ pub mod pallet { }) } - /// Sets the maximum number of outbound HRMP channels a parathread is allowed to open. - #[pallet::call_index(38)] - #[pallet::weight(( - T::WeightInfo::set_config_with_u32(), - DispatchClass::Operational, - ))] - pub fn set_hrmp_max_parathread_outbound_channels( - origin: OriginFor, - new: u32, - ) -> DispatchResult { - ensure_root(origin)?; - Self::schedule_config_update(|config| { - config.hrmp_max_parathread_outbound_channels = new; - }) - } - /// Sets the maximum number of outbound HRMP messages can be sent by a candidate. #[pallet::call_index(39)] #[pallet::weight(( @@ -1139,6 +1084,72 @@ pub mod pallet { config.executor_params = new; }) } + + /// Set the on demand (parathreads) base fee. + #[pallet::call_index(47)] + #[pallet::weight(( + T::WeightInfo::set_config_with_balance(), + DispatchClass::Operational, + ))] + pub fn set_on_demand_base_fee(origin: OriginFor, new: Balance) -> DispatchResult { + ensure_root(origin)?; + Self::schedule_config_update(|config| { + config.on_demand_base_fee = new; + }) + } + + /// Set the on demand (parathreads) fee variability. + #[pallet::call_index(48)] + #[pallet::weight(( + T::WeightInfo::set_config_with_perbill(), + DispatchClass::Operational, + ))] + pub fn set_on_demand_fee_variability(origin: OriginFor, new: Perbill) -> DispatchResult { + ensure_root(origin)?; + Self::schedule_config_update(|config| { + config.on_demand_fee_variability = new; + }) + } + + /// Set the on demand (parathreads) queue max size. + #[pallet::call_index(49)] + #[pallet::weight(( + T::WeightInfo::set_config_with_option_u32(), + DispatchClass::Operational, + ))] + pub fn set_on_demand_queue_max_size(origin: OriginFor, new: u32) -> DispatchResult { + ensure_root(origin)?; + Self::schedule_config_update(|config| { + config.on_demand_queue_max_size = new; + }) + } + /// Set the on demand (parathreads) fee variability. + #[pallet::call_index(50)] + #[pallet::weight(( + T::WeightInfo::set_config_with_perbill(), + DispatchClass::Operational, + ))] + pub fn set_on_demand_target_queue_utilization( + origin: OriginFor, + new: Perbill, + ) -> DispatchResult { + ensure_root(origin)?; + Self::schedule_config_update(|config| { + config.on_demand_target_queue_utilization = new; + }) + } + /// Set the on demand (parathreads) ttl in the claimqueue. + #[pallet::call_index(51)] + #[pallet::weight(( + T::WeightInfo::set_config_with_block_number(), + DispatchClass::Operational + ))] + pub fn set_on_demand_ttl(origin: OriginFor, new: BlockNumberFor) -> DispatchResult { + ensure_root(origin)?; + Self::schedule_config_update(|config| { + config.on_demand_ttl = new; + }) + } } #[pallet::hooks] diff --git a/runtime/parachains/src/configuration/benchmarking.rs b/runtime/parachains/src/configuration/benchmarking.rs index ef8fafd91c96..d9d11ab56e49 100644 --- a/runtime/parachains/src/configuration/benchmarking.rs +++ b/runtime/parachains/src/configuration/benchmarking.rs @@ -47,6 +47,8 @@ benchmarks! { ExecutorParam::PvfExecTimeout(PvfExecTimeoutKind::Approval, 12_000), ][..])) + set_config_with_perbill {}: set_on_demand_fee_variability(RawOrigin::Root, Perbill::from_percent(100)) + impl_benchmark_test_suite!( Pallet, crate::mock::new_test_ext(Default::default()), diff --git a/runtime/parachains/src/configuration/migration.rs b/runtime/parachains/src/configuration/migration.rs index ae035abac505..4499b116462b 100644 --- a/runtime/parachains/src/configuration/migration.rs +++ b/runtime/parachains/src/configuration/migration.rs @@ -18,3 +18,4 @@ pub mod v6; pub mod v7; +pub mod v8; diff --git a/runtime/parachains/src/configuration/migration/v7.rs b/runtime/parachains/src/configuration/migration/v7.rs index 78a7cf9e4dc0..113651381207 100644 --- a/runtime/parachains/src/configuration/migration/v7.rs +++ b/runtime/parachains/src/configuration/migration/v7.rs @@ -23,13 +23,106 @@ use frame_support::{ weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::SessionIndex; +use primitives::{vstaging::AsyncBackingParams, Balance, ExecutorParams, SessionIndex}; use sp_std::vec::Vec; use frame_support::traits::OnRuntimeUpgrade; use super::v6::V6HostConfiguration; -type V7HostConfiguration = configuration::HostConfiguration; + +#[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, Clone)] +pub struct V7HostConfiguration { + pub max_code_size: u32, + pub max_head_data_size: u32, + pub max_upward_queue_count: u32, + pub max_upward_queue_size: u32, + pub max_upward_message_size: u32, + pub max_upward_message_num_per_candidate: u32, + pub hrmp_max_message_num_per_candidate: u32, + pub validation_upgrade_cooldown: BlockNumber, + pub validation_upgrade_delay: BlockNumber, + pub async_backing_params: AsyncBackingParams, + pub max_pov_size: u32, + pub max_downward_message_size: u32, + pub hrmp_max_parachain_outbound_channels: u32, + pub hrmp_max_parathread_outbound_channels: u32, + pub hrmp_sender_deposit: Balance, + pub hrmp_recipient_deposit: Balance, + pub hrmp_channel_max_capacity: u32, + pub hrmp_channel_max_total_size: u32, + pub hrmp_max_parachain_inbound_channels: u32, + pub hrmp_max_parathread_inbound_channels: u32, + pub hrmp_channel_max_message_size: u32, + pub executor_params: ExecutorParams, + pub code_retention_period: BlockNumber, + pub parathread_cores: u32, + pub parathread_retries: u32, + pub group_rotation_frequency: BlockNumber, + pub chain_availability_period: BlockNumber, + pub thread_availability_period: BlockNumber, + pub scheduling_lookahead: u32, + pub max_validators_per_core: Option, + pub max_validators: Option, + pub dispute_period: SessionIndex, + pub dispute_post_conclusion_acceptance_period: BlockNumber, + pub no_show_slots: u32, + pub n_delay_tranches: u32, + pub zeroth_delay_tranche_width: u32, + pub needed_approvals: u32, + pub relay_vrf_modulo_samples: u32, + pub pvf_voting_ttl: SessionIndex, + pub minimum_validation_upgrade_delay: BlockNumber, +} + +impl> Default for V7HostConfiguration { + fn default() -> Self { + Self { + async_backing_params: AsyncBackingParams { + max_candidate_depth: 0, + allowed_ancestry_len: 0, + }, + group_rotation_frequency: 1u32.into(), + chain_availability_period: 1u32.into(), + thread_availability_period: 1u32.into(), + no_show_slots: 1u32.into(), + validation_upgrade_cooldown: Default::default(), + validation_upgrade_delay: 2u32.into(), + code_retention_period: Default::default(), + max_code_size: Default::default(), + max_pov_size: Default::default(), + max_head_data_size: Default::default(), + parathread_cores: Default::default(), + parathread_retries: Default::default(), + scheduling_lookahead: Default::default(), + max_validators_per_core: Default::default(), + max_validators: None, + dispute_period: 6, + dispute_post_conclusion_acceptance_period: 100.into(), + n_delay_tranches: Default::default(), + zeroth_delay_tranche_width: Default::default(), + needed_approvals: Default::default(), + relay_vrf_modulo_samples: Default::default(), + max_upward_queue_count: Default::default(), + max_upward_queue_size: Default::default(), + max_downward_message_size: Default::default(), + max_upward_message_size: Default::default(), + max_upward_message_num_per_candidate: Default::default(), + hrmp_sender_deposit: Default::default(), + hrmp_recipient_deposit: Default::default(), + hrmp_channel_max_capacity: Default::default(), + hrmp_channel_max_total_size: Default::default(), + hrmp_max_parachain_inbound_channels: Default::default(), + hrmp_max_parathread_inbound_channels: Default::default(), + hrmp_channel_max_message_size: Default::default(), + hrmp_max_parachain_outbound_channels: Default::default(), + hrmp_max_parathread_outbound_channels: Default::default(), + hrmp_max_message_num_per_candidate: Default::default(), + pvf_voting_ttl: 2u32.into(), + minimum_validation_upgrade_delay: 2.into(), + executor_params: Default::default(), + } + } +} mod v6 { use super::*; diff --git a/runtime/parachains/src/configuration/migration/v8.rs b/runtime/parachains/src/configuration/migration/v8.rs new file mode 100644 index 000000000000..7f7cc1cdefcd --- /dev/null +++ b/runtime/parachains/src/configuration/migration/v8.rs @@ -0,0 +1,319 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A module that is responsible for migration of storage. + +use crate::configuration::{self, Config, Pallet}; +use frame_support::{ + pallet_prelude::*, + traits::{Defensive, StorageVersion}, + weights::Weight, +}; +use frame_system::pallet_prelude::BlockNumberFor; +use primitives::SessionIndex; +use sp_runtime::Perbill; +use sp_std::vec::Vec; + +use frame_support::traits::OnRuntimeUpgrade; + +use super::v7::V7HostConfiguration; +type V8HostConfiguration = configuration::HostConfiguration; + +mod v7 { + use super::*; + + #[frame_support::storage_alias] + pub(crate) type ActiveConfig = + StorageValue, V7HostConfiguration>, OptionQuery>; + + #[frame_support::storage_alias] + pub(crate) type PendingConfigs = StorageValue< + Pallet, + Vec<(SessionIndex, V7HostConfiguration>)>, + OptionQuery, + >; +} + +mod v8 { + use super::*; + + #[frame_support::storage_alias] + pub(crate) type ActiveConfig = + StorageValue, V8HostConfiguration>, OptionQuery>; + + #[frame_support::storage_alias] + pub(crate) type PendingConfigs = StorageValue< + Pallet, + Vec<(SessionIndex, V8HostConfiguration>)>, + OptionQuery, + >; +} + +pub struct MigrateToV8(sp_std::marker::PhantomData); +impl OnRuntimeUpgrade for MigrateToV8 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + log::trace!(target: crate::configuration::LOG_TARGET, "Running pre_upgrade() for HostConfiguration MigrateToV8"); + Ok(Vec::new()) + } + + fn on_runtime_upgrade() -> Weight { + log::info!(target: configuration::LOG_TARGET, "HostConfiguration MigrateToV8 started"); + if StorageVersion::get::>() == 7 { + let weight_consumed = migrate_to_v8::(); + + log::info!(target: configuration::LOG_TARGET, "HostConfiguration MigrateToV8 executed successfully"); + StorageVersion::new(8).put::>(); + + weight_consumed + } else { + log::warn!(target: configuration::LOG_TARGET, "HostConfiguration MigrateToV8 should be removed."); + T::DbWeight::get().reads(1) + } + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + log::trace!(target: crate::configuration::LOG_TARGET, "Running post_upgrade() for HostConfiguration MigrateToV8"); + ensure!( + StorageVersion::get::>() >= 8, + "Storage version should be >= 8 after the migration" + ); + + Ok(()) + } +} + +fn migrate_to_v8() -> Weight { + // Unusual formatting is justified: + // - make it easier to verify that fields assign what they supposed to assign. + // - this code is transient and will be removed after all migrations are done. + // - this code is important enough to optimize for legibility sacrificing consistency. + #[rustfmt::skip] + let translate = + |pre: V7HostConfiguration>| -> + V8HostConfiguration> + { + V8HostConfiguration { +max_code_size : pre.max_code_size, +max_head_data_size : pre.max_head_data_size, +max_upward_queue_count : pre.max_upward_queue_count, +max_upward_queue_size : pre.max_upward_queue_size, +max_upward_message_size : pre.max_upward_message_size, +max_upward_message_num_per_candidate : pre.max_upward_message_num_per_candidate, +hrmp_max_message_num_per_candidate : pre.hrmp_max_message_num_per_candidate, +validation_upgrade_cooldown : pre.validation_upgrade_cooldown, +validation_upgrade_delay : pre.validation_upgrade_delay, +max_pov_size : pre.max_pov_size, +max_downward_message_size : pre.max_downward_message_size, +hrmp_sender_deposit : pre.hrmp_sender_deposit, +hrmp_recipient_deposit : pre.hrmp_recipient_deposit, +hrmp_channel_max_capacity : pre.hrmp_channel_max_capacity, +hrmp_channel_max_total_size : pre.hrmp_channel_max_total_size, +hrmp_max_parachain_inbound_channels : pre.hrmp_max_parachain_inbound_channels, +hrmp_max_parachain_outbound_channels : pre.hrmp_max_parachain_outbound_channels, +hrmp_channel_max_message_size : pre.hrmp_channel_max_message_size, +code_retention_period : pre.code_retention_period, +on_demand_cores : pre.parathread_cores, +on_demand_retries : pre.parathread_retries, +group_rotation_frequency : pre.group_rotation_frequency, +paras_availability_period : pre.chain_availability_period, +scheduling_lookahead : pre.scheduling_lookahead, +max_validators_per_core : pre.max_validators_per_core, +max_validators : pre.max_validators, +dispute_period : pre.dispute_period, +dispute_post_conclusion_acceptance_period: pre.dispute_post_conclusion_acceptance_period, +no_show_slots : pre.no_show_slots, +n_delay_tranches : pre.n_delay_tranches, +zeroth_delay_tranche_width : pre.zeroth_delay_tranche_width, +needed_approvals : pre.needed_approvals, +relay_vrf_modulo_samples : pre.relay_vrf_modulo_samples, +pvf_voting_ttl : pre.pvf_voting_ttl, +minimum_validation_upgrade_delay : pre.minimum_validation_upgrade_delay, +async_backing_params : pre.async_backing_params, +executor_params : pre.executor_params, +on_demand_queue_max_size : 10_000u32, +on_demand_base_fee : 10_000_000u128, +on_demand_fee_variability : Perbill::from_percent(3), +on_demand_target_queue_utilization : Perbill::from_percent(25), +on_demand_ttl : 5u32.into(), + } + }; + + let v7 = v7::ActiveConfig::::get() + .defensive_proof("Could not decode old config") + .unwrap_or_default(); + let v8 = translate(v7); + v8::ActiveConfig::::set(Some(v8)); + + // Allowed to be empty. + let pending_v7 = v7::PendingConfigs::::get().unwrap_or_default(); + let mut pending_v8 = Vec::new(); + + for (session, v7) in pending_v7.into_iter() { + let v8 = translate(v7); + pending_v8.push((session, v8)); + } + v8::PendingConfigs::::set(Some(pending_v8.clone())); + + let num_configs = (pending_v8.len() + 1) as u64; + T::DbWeight::get().reads_writes(num_configs, num_configs) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::{new_test_ext, Test}; + + #[test] + fn v8_deserialized_from_actual_data() { + // Example how to get new `raw_config`: + // We'll obtain the raw_config at a specified a block + // Steps: + // 1. Go to Polkadot.js -> Developer -> Chain state -> Storage: https://polkadot.js.org/apps/#/chainstate + // 2. Set these parameters: + // 2.1. selected state query: configuration; activeConfig(): + // PolkadotRuntimeParachainsConfigurationHostConfiguration + // 2.2. blockhash to query at: + // 0xf89d3ab5312c5f70d396dc59612f0aa65806c798346f9db4b35278baed2e0e53 (the hash of + // the block) + // 2.3. Note the value of encoded storage key -> + // 0x06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385 for the + // referenced block. + // 2.4. You'll also need the decoded values to update the test. + // 3. Go to Polkadot.js -> Developer -> Chain state -> Raw storage + // 3.1 Enter the encoded storage key and you get the raw config. + + // This exceeds the maximal line width length, but that's fine, since this is not code and + // doesn't need to be read and also leaving it as one line allows to easily copy it. + let raw_config = + hex_literal::hex![" + 0000300000800000080000000000100000c8000005000000050000000200000002000000000000000000000000005000000010000400000000000000000000000000000000000000000000000000000000000000000000000800000000200000040000000000100000b004000000000000000000001027000080b2e60e80c3c9018096980000000000000000000000000005000000140000000400000001000000010100000000060000006400000002000000190000000000000002000000020000000200000005000000" + ]; + + let v8 = + V8HostConfiguration::::decode(&mut &raw_config[..]).unwrap(); + + // We check only a sample of the values here. If we missed any fields or messed up data + // types that would skew all the fields coming after. + assert_eq!(v8.max_code_size, 3_145_728); + assert_eq!(v8.validation_upgrade_cooldown, 2); + assert_eq!(v8.max_pov_size, 5_242_880); + assert_eq!(v8.hrmp_channel_max_message_size, 1_048_576); + assert_eq!(v8.n_delay_tranches, 25); + assert_eq!(v8.minimum_validation_upgrade_delay, 5); + assert_eq!(v8.group_rotation_frequency, 20); + assert_eq!(v8.on_demand_cores, 0); + assert_eq!(v8.on_demand_base_fee, 10_000_000); + } + + #[test] + fn test_migrate_to_v8() { + // Host configuration has lots of fields. However, in this migration we only remove one + // field. The most important part to check are a couple of the last fields. We also pick + // extra fields to check arbitrarily, e.g. depending on their position (i.e. the middle) and + // also their type. + // + // We specify only the picked fields and the rest should be provided by the `Default` + // implementation. That implementation is copied over between the two types and should work + // fine. + let v7 = V7HostConfiguration:: { + needed_approvals: 69, + thread_availability_period: 55, + hrmp_recipient_deposit: 1337, + max_pov_size: 1111, + chain_availability_period: 33, + minimum_validation_upgrade_delay: 20, + ..Default::default() + }; + + let mut pending_configs = Vec::new(); + pending_configs.push((100, v7.clone())); + pending_configs.push((300, v7.clone())); + + new_test_ext(Default::default()).execute_with(|| { + // Implant the v6 version in the state. + v7::ActiveConfig::::set(Some(v7)); + v7::PendingConfigs::::set(Some(pending_configs)); + + migrate_to_v8::(); + + let v8 = v8::ActiveConfig::::get().unwrap(); + let mut configs_to_check = v8::PendingConfigs::::get().unwrap(); + configs_to_check.push((0, v8.clone())); + + for (_, v7) in configs_to_check { + #[rustfmt::skip] + { + assert_eq!(v7.max_code_size , v8.max_code_size); + assert_eq!(v7.max_head_data_size , v8.max_head_data_size); + assert_eq!(v7.max_upward_queue_count , v8.max_upward_queue_count); + assert_eq!(v7.max_upward_queue_size , v8.max_upward_queue_size); + assert_eq!(v7.max_upward_message_size , v8.max_upward_message_size); + assert_eq!(v7.max_upward_message_num_per_candidate , v8.max_upward_message_num_per_candidate); + assert_eq!(v7.hrmp_max_message_num_per_candidate , v8.hrmp_max_message_num_per_candidate); + assert_eq!(v7.validation_upgrade_cooldown , v8.validation_upgrade_cooldown); + assert_eq!(v7.validation_upgrade_delay , v8.validation_upgrade_delay); + assert_eq!(v7.max_pov_size , v8.max_pov_size); + assert_eq!(v7.max_downward_message_size , v8.max_downward_message_size); + assert_eq!(v7.hrmp_max_parachain_outbound_channels , v8.hrmp_max_parachain_outbound_channels); + assert_eq!(v7.hrmp_sender_deposit , v8.hrmp_sender_deposit); + assert_eq!(v7.hrmp_recipient_deposit , v8.hrmp_recipient_deposit); + assert_eq!(v7.hrmp_channel_max_capacity , v8.hrmp_channel_max_capacity); + assert_eq!(v7.hrmp_channel_max_total_size , v8.hrmp_channel_max_total_size); + assert_eq!(v7.hrmp_max_parachain_inbound_channels , v8.hrmp_max_parachain_inbound_channels); + assert_eq!(v7.hrmp_channel_max_message_size , v8.hrmp_channel_max_message_size); + assert_eq!(v7.code_retention_period , v8.code_retention_period); + assert_eq!(v7.on_demand_cores , v8.on_demand_cores); + assert_eq!(v7.on_demand_retries , v8.on_demand_retries); + assert_eq!(v7.group_rotation_frequency , v8.group_rotation_frequency); + assert_eq!(v7.paras_availability_period , v8.paras_availability_period); + assert_eq!(v7.scheduling_lookahead , v8.scheduling_lookahead); + assert_eq!(v7.max_validators_per_core , v8.max_validators_per_core); + assert_eq!(v7.max_validators , v8.max_validators); + assert_eq!(v7.dispute_period , v8.dispute_period); + assert_eq!(v7.no_show_slots , v8.no_show_slots); + assert_eq!(v7.n_delay_tranches , v8.n_delay_tranches); + assert_eq!(v7.zeroth_delay_tranche_width , v8.zeroth_delay_tranche_width); + assert_eq!(v7.needed_approvals , v8.needed_approvals); + assert_eq!(v7.relay_vrf_modulo_samples , v8.relay_vrf_modulo_samples); + assert_eq!(v7.pvf_voting_ttl , v8.pvf_voting_ttl); + assert_eq!(v7.minimum_validation_upgrade_delay , v8.minimum_validation_upgrade_delay); + assert_eq!(v7.async_backing_params.allowed_ancestry_len, v8.async_backing_params.allowed_ancestry_len); + assert_eq!(v7.async_backing_params.max_candidate_depth , v8.async_backing_params.max_candidate_depth); + assert_eq!(v7.executor_params , v8.executor_params); + }; // ; makes this a statement. `rustfmt::skip` cannot be put on an expression. + } + }); + } + + // Test that migration doesn't panic in case there're no pending configurations upgrades in + // pallet's storage. + #[test] + fn test_migrate_to_v8_no_pending() { + let v7 = V7HostConfiguration::::default(); + + new_test_ext(Default::default()).execute_with(|| { + // Implant the v6 version in the state. + v7::ActiveConfig::::set(Some(v7)); + // Ensure there're no pending configs. + v7::PendingConfigs::::set(None); + + // Shouldn't fail. + migrate_to_v8::(); + }); + } +} diff --git a/runtime/parachains/src/configuration/tests.rs b/runtime/parachains/src/configuration/tests.rs index 0c2b5a779cb5..b2a81894a939 100644 --- a/runtime/parachains/src/configuration/tests.rs +++ b/runtime/parachains/src/configuration/tests.rs @@ -216,11 +216,7 @@ fn invariants() { ); assert_err!( - Configuration::set_chain_availability_period(RuntimeOrigin::root(), 0), - Error::::InvalidNewValue - ); - assert_err!( - Configuration::set_thread_availability_period(RuntimeOrigin::root(), 0), + Configuration::set_paras_availability_period(RuntimeOrigin::root(), 0), Error::::InvalidNewValue ); assert_err!( @@ -229,17 +225,12 @@ fn invariants() { ); ActiveConfig::::put(HostConfiguration { - chain_availability_period: 10, - thread_availability_period: 8, + paras_availability_period: 10, minimum_validation_upgrade_delay: 11, ..Default::default() }); assert_err!( - Configuration::set_chain_availability_period(RuntimeOrigin::root(), 12), - Error::::InvalidNewValue - ); - assert_err!( - Configuration::set_thread_availability_period(RuntimeOrigin::root(), 12), + Configuration::set_paras_availability_period(RuntimeOrigin::root(), 12), Error::::InvalidNewValue ); assert_err!( @@ -291,11 +282,10 @@ fn setting_pending_config_members() { max_code_size: 100_000, max_pov_size: 1024, max_head_data_size: 1_000, - parathread_cores: 2, - parathread_retries: 5, + on_demand_cores: 2, + on_demand_retries: 5, group_rotation_frequency: 20, - chain_availability_period: 10, - thread_availability_period: 8, + paras_availability_period: 10, scheduling_lookahead: 3, max_validators_per_core: None, max_validators: None, @@ -316,14 +306,17 @@ fn setting_pending_config_members() { hrmp_channel_max_capacity: 3921, hrmp_channel_max_total_size: 7687, hrmp_max_parachain_inbound_channels: 37, - hrmp_max_parathread_inbound_channels: 19, hrmp_channel_max_message_size: 8192, hrmp_max_parachain_outbound_channels: 10, - hrmp_max_parathread_outbound_channels: 20, hrmp_max_message_num_per_candidate: 20, pvf_voting_ttl: 3, minimum_validation_upgrade_delay: 20, executor_params: Default::default(), + on_demand_queue_max_size: 10_000u32, + on_demand_base_fee: 10_000_000u128, + on_demand_fee_variability: Perbill::from_percent(3), + on_demand_target_queue_utilization: Perbill::from_percent(25), + on_demand_ttl: 5u32, }; Configuration::set_validation_upgrade_cooldown( @@ -345,9 +338,9 @@ fn setting_pending_config_members() { Configuration::set_max_pov_size(RuntimeOrigin::root(), new_config.max_pov_size).unwrap(); Configuration::set_max_head_data_size(RuntimeOrigin::root(), new_config.max_head_data_size) .unwrap(); - Configuration::set_parathread_cores(RuntimeOrigin::root(), new_config.parathread_cores) + Configuration::set_on_demand_cores(RuntimeOrigin::root(), new_config.on_demand_cores) .unwrap(); - Configuration::set_parathread_retries(RuntimeOrigin::root(), new_config.parathread_retries) + Configuration::set_on_demand_retries(RuntimeOrigin::root(), new_config.on_demand_retries) .unwrap(); Configuration::set_group_rotation_frequency( RuntimeOrigin::root(), @@ -361,14 +354,9 @@ fn setting_pending_config_members() { new_config.minimum_validation_upgrade_delay, ) .unwrap(); - Configuration::set_chain_availability_period( + Configuration::set_paras_availability_period( RuntimeOrigin::root(), - new_config.chain_availability_period, - ) - .unwrap(); - Configuration::set_thread_availability_period( - RuntimeOrigin::root(), - new_config.thread_availability_period, + new_config.paras_availability_period, ) .unwrap(); Configuration::set_scheduling_lookahead( @@ -462,11 +450,6 @@ fn setting_pending_config_members() { new_config.hrmp_max_parachain_inbound_channels, ) .unwrap(); - Configuration::set_hrmp_max_parathread_inbound_channels( - RuntimeOrigin::root(), - new_config.hrmp_max_parathread_inbound_channels, - ) - .unwrap(); Configuration::set_hrmp_channel_max_message_size( RuntimeOrigin::root(), new_config.hrmp_channel_max_message_size, @@ -477,11 +460,6 @@ fn setting_pending_config_members() { new_config.hrmp_max_parachain_outbound_channels, ) .unwrap(); - Configuration::set_hrmp_max_parathread_outbound_channels( - RuntimeOrigin::root(), - new_config.hrmp_max_parathread_outbound_channels, - ) - .unwrap(); Configuration::set_hrmp_max_message_num_per_candidate( RuntimeOrigin::root(), new_config.hrmp_max_message_num_per_candidate, diff --git a/runtime/parachains/src/dmp.rs b/runtime/parachains/src/dmp.rs index b88475c980ef..e4a7d5e17465 100644 --- a/runtime/parachains/src/dmp.rs +++ b/runtime/parachains/src/dmp.rs @@ -94,8 +94,9 @@ impl fmt::Debug for ProcessedDownwardMessagesAcceptanceErr { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { use ProcessedDownwardMessagesAcceptanceErr::*; match *self { - AdvancementRule => - write!(fmt, "DMQ is not empty, but processed_downward_messages is 0",), + AdvancementRule => { + write!(fmt, "DMQ is not empty, but processed_downward_messages is 0",) + }, Underflow { processed_downward_messages, dmq_length } => write!( fmt, "processed_downward_messages = {}, but dmq_length is only {}", diff --git a/runtime/parachains/src/hrmp.rs b/runtime/parachains/src/hrmp.rs index 1be2fe57b1df..27f9fdab7684 100644 --- a/runtime/parachains/src/hrmp.rs +++ b/runtime/parachains/src/hrmp.rs @@ -1184,11 +1184,7 @@ impl Pallet { let egress_cnt = HrmpEgressChannelsIndex::::decode_len(&origin).unwrap_or(0) as u32; let open_req_cnt = HrmpOpenChannelRequestCount::::get(&origin); - let channel_num_limit = if >::is_parathread(origin) { - config.hrmp_max_parathread_outbound_channels - } else { - config.hrmp_max_parachain_outbound_channels - }; + let channel_num_limit = config.hrmp_max_parachain_outbound_channels; ensure!( egress_cnt + open_req_cnt < channel_num_limit, Error::::OpenHrmpChannelLimitExceeded, @@ -1254,11 +1250,7 @@ impl Pallet { // check if by accepting this open channel request, this parachain would exceed the // number of inbound channels. let config = >::config(); - let channel_num_limit = if >::is_parathread(origin) { - config.hrmp_max_parathread_inbound_channels - } else { - config.hrmp_max_parachain_inbound_channels - }; + let channel_num_limit = config.hrmp_max_parachain_inbound_channels; let ingress_cnt = HrmpIngressChannelsIndex::::decode_len(&origin).unwrap_or(0) as u32; let accepted_cnt = HrmpAcceptedChannelRequestCount::::get(&origin); ensure!( diff --git a/runtime/parachains/src/hrmp/tests.rs b/runtime/parachains/src/hrmp/tests.rs index 8b9fd7136a13..8cfaf48d10ef 100644 --- a/runtime/parachains/src/hrmp/tests.rs +++ b/runtime/parachains/src/hrmp/tests.rs @@ -69,10 +69,8 @@ pub(crate) fn run_to_block(to: BlockNumber, new_session: Option pub(super) struct GenesisConfigBuilder { hrmp_channel_max_capacity: u32, hrmp_channel_max_message_size: u32, - hrmp_max_parathread_outbound_channels: u32, - hrmp_max_parachain_outbound_channels: u32, - hrmp_max_parathread_inbound_channels: u32, - hrmp_max_parachain_inbound_channels: u32, + hrmp_max_paras_outbound_channels: u32, + hrmp_max_paras_inbound_channels: u32, hrmp_max_message_num_per_candidate: u32, hrmp_channel_max_total_size: u32, hrmp_sender_deposit: Balance, @@ -84,10 +82,8 @@ impl Default for GenesisConfigBuilder { Self { hrmp_channel_max_capacity: 2, hrmp_channel_max_message_size: 8, - hrmp_max_parathread_outbound_channels: 1, - hrmp_max_parachain_outbound_channels: 2, - hrmp_max_parathread_inbound_channels: 1, - hrmp_max_parachain_inbound_channels: 2, + hrmp_max_paras_outbound_channels: 2, + hrmp_max_paras_inbound_channels: 2, hrmp_max_message_num_per_candidate: 2, hrmp_channel_max_total_size: 16, hrmp_sender_deposit: 100, @@ -102,10 +98,8 @@ impl GenesisConfigBuilder { let config = &mut genesis.configuration.config; config.hrmp_channel_max_capacity = self.hrmp_channel_max_capacity; config.hrmp_channel_max_message_size = self.hrmp_channel_max_message_size; - config.hrmp_max_parathread_outbound_channels = self.hrmp_max_parathread_outbound_channels; - config.hrmp_max_parachain_outbound_channels = self.hrmp_max_parachain_outbound_channels; - config.hrmp_max_parathread_inbound_channels = self.hrmp_max_parathread_inbound_channels; - config.hrmp_max_parachain_inbound_channels = self.hrmp_max_parachain_inbound_channels; + config.hrmp_max_parachain_outbound_channels = self.hrmp_max_paras_outbound_channels; + config.hrmp_max_parachain_inbound_channels = self.hrmp_max_paras_inbound_channels; config.hrmp_max_message_num_per_candidate = self.hrmp_max_message_num_per_candidate; config.hrmp_channel_max_total_size = self.hrmp_channel_max_total_size; config.hrmp_sender_deposit = self.hrmp_sender_deposit; diff --git a/runtime/parachains/src/inclusion/mod.rs b/runtime/parachains/src/inclusion/mod.rs index f4ef3b95065e..9786b87f1162 100644 --- a/runtime/parachains/src/inclusion/mod.rs +++ b/runtime/parachains/src/inclusion/mod.rs @@ -23,7 +23,7 @@ use crate::{ configuration::{self, HostConfiguration}, disputes, dmp, hrmp, paras, - scheduler::CoreAssignment, + scheduler::common::CoreAssignment, shared, }; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; @@ -178,7 +178,7 @@ pub trait RewardValidators { #[derive(Encode, Decode, PartialEq, TypeInfo)] #[cfg_attr(test, derive(Debug))] pub(crate) struct ProcessedCandidates { - pub(crate) core_indices: Vec, + pub(crate) core_indices: Vec<(CoreIndex, ParaId)>, pub(crate) candidate_receipt_with_backing_validator_indices: Vec<(CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>)>, } @@ -322,8 +322,6 @@ pub mod pallet { UnscheduledCandidate, /// Candidate scheduled despite pending candidate already existing for the para. CandidateScheduledBeforeParaFree, - /// Candidate included with the wrong collator. - WrongCollator, /// Scheduled cores out of order. ScheduledOutOfOrder, /// Head data exceeds the configured maximum. @@ -599,7 +597,7 @@ impl Pallet { pub(crate) fn process_candidates( parent_storage_root: T::Hash, candidates: Vec>, - scheduled: Vec, + scheduled: Vec>>, group_validators: GV, ) -> Result, DispatchError> where @@ -630,15 +628,16 @@ impl Pallet { let mut core_indices_and_backers = Vec::with_capacity(candidates.len()); let mut last_core = None; - let mut check_assignment_in_order = |assignment: &CoreAssignment| -> DispatchResult { - ensure!( - last_core.map_or(true, |core| assignment.core > core), - Error::::ScheduledOutOfOrder, - ); + let mut check_assignment_in_order = + |assignment: &CoreAssignment>| -> DispatchResult { + ensure!( + last_core.map_or(true, |core| assignment.core > core), + Error::::ScheduledOutOfOrder, + ); - last_core = Some(assignment.core); - Ok(()) - }; + last_core = Some(assignment.core); + Ok(()) + }; let signing_context = SigningContext { parent_hash, session_index: shared::Pallet::::session_index() }; @@ -680,17 +679,10 @@ impl Pallet { let para_id = backed_candidate.descriptor().para_id; let mut backers = bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()]; - for (i, assignment) in scheduled[skip..].iter().enumerate() { - check_assignment_in_order(assignment)?; - - if para_id == assignment.para_id { - if let Some(required_collator) = assignment.required_collator() { - ensure!( - required_collator == &backed_candidate.descriptor().collator, - Error::::WrongCollator, - ); - } + for (i, core_assignment) in scheduled[skip..].iter().enumerate() { + check_assignment_in_order(core_assignment)?; + if para_id == core_assignment.paras_entry.para_id() { ensure!( >::get(¶_id).is_none() && >::get(¶_id).is_none(), @@ -700,7 +692,7 @@ impl Pallet { // account for already skipped, and then skip this one. skip = i + skip + 1; - let group_vals = group_validators(assignment.group_idx) + let group_vals = group_validators(core_assignment.group_idx) .ok_or_else(|| Error::::InvalidGroupIndex)?; // check the signatures in the backing and that it is a majority. @@ -752,9 +744,9 @@ impl Pallet { } core_indices_and_backers.push(( - assignment.core, + (core_assignment.core, core_assignment.paras_entry.para_id()), backers, - assignment.group_idx, + core_assignment.group_idx, )); continue 'next_backed_candidate } @@ -788,7 +780,7 @@ impl Pallet { Self::deposit_event(Event::::CandidateBacked( candidate.candidate.to_plain(), candidate.candidate.commitments.head_data.clone(), - core, + core.0, group, )); @@ -800,7 +792,7 @@ impl Pallet { >::insert( ¶_id, CandidatePendingAvailability { - core, + core: core.0, hash: candidate_hash, descriptor, availability_votes, diff --git a/runtime/parachains/src/inclusion/tests.rs b/runtime/parachains/src/inclusion/tests.rs index 3b4d7a7df357..70179782a53a 100644 --- a/runtime/parachains/src/inclusion/tests.rs +++ b/runtime/parachains/src/inclusion/tests.rs @@ -24,7 +24,6 @@ use crate::{ }, paras::{ParaGenesisArgs, ParaKind}, paras_inherent::DisputedBitfield, - scheduler::AssignmentKind, }; use primitives::{SignedAvailabilityBitfields, UncheckedSignedAvailabilityBitfields}; @@ -33,6 +32,7 @@ use frame_support::assert_noop; use keyring::Sr25519Keyring; use parity_scale_codec::DecodeAll; use primitives::{ + v5::{Assignment, ParasEntry}, BlockNumber, CandidateCommitments, CandidateDescriptor, CollatorId, CompactStatement as Statement, Hash, SignedAvailabilityBitfield, SignedStatement, ValidationCode, ValidatorId, ValidityAttestation, PARACHAIN_KEY_TYPE_ID, @@ -44,7 +44,7 @@ use test_helpers::{dummy_collator, dummy_collator_signature, dummy_validation_co fn default_config() -> HostConfiguration { let mut config = HostConfiguration::default(); - config.parathread_cores = 1; + config.on_demand_cores = 1; config.max_code_size = 0b100000; config.max_head_data_size = 0b100000; config @@ -201,7 +201,7 @@ pub(crate) fn run_to_block( } pub(crate) fn expected_bits() -> usize { - Paras::parachains().len() + Configuration::config().parathread_cores as usize + Paras::parachains().len() + Configuration::config().on_demand_cores as usize } fn default_bitfield() -> AvailabilityBitfield { @@ -877,26 +877,23 @@ fn candidate_checks() { .map(|m| m.into_iter().map(ValidatorIndex).collect::>()) }; + let entry_ttl = 10_000; let thread_collator: CollatorId = Sr25519Keyring::Two.public().into(); - let chain_a_assignment = CoreAssignment { core: CoreIndex::from(0), - para_id: chain_a, - kind: AssignmentKind::Parachain, + paras_entry: ParasEntry::new(Assignment::new(chain_a), entry_ttl), group_idx: GroupIndex::from(0), }; let chain_b_assignment = CoreAssignment { core: CoreIndex::from(1), - para_id: chain_b, - kind: AssignmentKind::Parachain, + paras_entry: ParasEntry::new(Assignment::new(chain_b), entry_ttl), group_idx: GroupIndex::from(1), }; let thread_a_assignment = CoreAssignment { core: CoreIndex::from(2), - para_id: thread_a, - kind: AssignmentKind::Parathread(thread_collator.clone(), 0), + paras_entry: ParasEntry::new(Assignment::new(thread_a), entry_ttl), group_idx: GroupIndex::from(2), }; @@ -1056,45 +1053,6 @@ fn candidate_checks() { ); } - // candidate has wrong collator. - { - let mut candidate = TestCandidateBuilder { - para_id: thread_a, - relay_parent: System::parent_hash(), - pov_hash: Hash::repeat_byte(1), - persisted_validation_data_hash: make_vdata_hash(thread_a).unwrap(), - hrmp_watermark: RELAY_PARENT_NUM, - ..Default::default() - } - .build(); - - assert!(CollatorId::from(Sr25519Keyring::One.public()) != thread_collator); - collator_sign_candidate(Sr25519Keyring::One, &mut candidate); - - let backed = back_candidate( - candidate, - &validators, - group_validators(GroupIndex::from(2)).unwrap().as_ref(), - &keystore, - &signing_context, - BackingKind::Threshold, - ); - - assert_noop!( - ParaInclusion::process_candidates( - Default::default(), - vec![backed], - vec![ - chain_a_assignment.clone(), - chain_b_assignment.clone(), - thread_a_assignment.clone(), - ], - &group_validators, - ), - Error::::WrongCollator, - ); - } - // candidate not well-signed by collator. { let mut candidate = TestCandidateBuilder { @@ -1424,26 +1382,23 @@ fn backing_works() { .map(|vs| vs.into_iter().map(ValidatorIndex).collect::>()) }; - let thread_collator: CollatorId = Sr25519Keyring::Two.public().into(); + let entry_ttl = 10_000; let chain_a_assignment = CoreAssignment { core: CoreIndex::from(0), - para_id: chain_a, - kind: AssignmentKind::Parachain, + paras_entry: ParasEntry::new(Assignment::new(chain_a), entry_ttl), group_idx: GroupIndex::from(0), }; let chain_b_assignment = CoreAssignment { core: CoreIndex::from(1), - para_id: chain_b, - kind: AssignmentKind::Parachain, + paras_entry: ParasEntry::new(Assignment::new(chain_b), entry_ttl), group_idx: GroupIndex::from(1), }; let thread_a_assignment = CoreAssignment { core: CoreIndex::from(2), - para_id: thread_a, - kind: AssignmentKind::Parathread(thread_collator.clone(), 0), + paras_entry: ParasEntry::new(Assignment::new(thread_a), entry_ttl), group_idx: GroupIndex::from(2), }; @@ -1507,7 +1462,7 @@ fn backing_works() { BackingKind::Threshold, ); - let backed_candidates = vec![backed_a, backed_b, backed_c]; + let backed_candidates = vec![backed_a.clone(), backed_b.clone(), backed_c]; let get_backing_group_idx = { // the order defines the group implicitly for this test case let backed_candidates_with_groups = backed_candidates @@ -1544,7 +1499,11 @@ fn backing_works() { assert_eq!( occupied_cores, - vec![CoreIndex::from(0), CoreIndex::from(1), CoreIndex::from(2)] + vec![ + (CoreIndex::from(0), chain_a), + (CoreIndex::from(1), chain_b), + (CoreIndex::from(2), thread_a) + ] ); // Transform the votes into the setup we expect @@ -1702,10 +1661,11 @@ fn can_include_candidate_with_ok_code_upgrade() { .map(|vs| vs.into_iter().map(ValidatorIndex).collect::>()) }; + let entry_ttl = 10_000; + let chain_a_assignment = CoreAssignment { core: CoreIndex::from(0), - para_id: chain_a, - kind: AssignmentKind::Parachain, + paras_entry: ParasEntry::new(Assignment::new(chain_a), entry_ttl), group_idx: GroupIndex::from(0), }; @@ -1739,7 +1699,7 @@ fn can_include_candidate_with_ok_code_upgrade() { ) .expect("candidates scheduled, in order, and backed"); - assert_eq!(occupied_cores, vec![CoreIndex::from(0)]); + assert_eq!(occupied_cores, vec![(CoreIndex::from(0), chain_a)]); let backers = { let num_backers = minimum_backing_votes(group_validators(GroupIndex(0)).unwrap().len()); @@ -1958,8 +1918,11 @@ fn para_upgrade_delay_scheduled_from_inclusion() { let chain_a_assignment = CoreAssignment { core: CoreIndex::from(0), - para_id: chain_a, - kind: AssignmentKind::Parachain, + paras_entry: ParasEntry { + assignment: Assignment { para_id: chain_a }, + availability_timeouts: 0, + ttl: 5, + }, group_idx: GroupIndex::from(0), }; @@ -1993,7 +1956,7 @@ fn para_upgrade_delay_scheduled_from_inclusion() { ) .expect("candidates scheduled, in order, and backed"); - assert_eq!(occupied_cores, vec![CoreIndex::from(0)]); + assert_eq!(occupied_cores, vec![(CoreIndex::from(0), chain_a)]); // Run a couple of blocks before the inclusion. run_to_block(7, |_| None); diff --git a/runtime/parachains/src/initializer.rs b/runtime/parachains/src/initializer.rs index e006c38e6dec..b4f8721be518 100644 --- a/runtime/parachains/src/initializer.rs +++ b/runtime/parachains/src/initializer.rs @@ -240,6 +240,9 @@ impl Pallet { buf }; + // inform about upcoming new session + scheduler::Pallet::::pre_new_session(); + let configuration::SessionChangeOutcome { prev_config, new_config } = configuration::Pallet::::initializer_on_new_session(&session_index); let new_config = new_config.unwrap_or_else(|| prev_config.clone()); diff --git a/runtime/parachains/src/lib.rs b/runtime/parachains/src/lib.rs index 43c5c6441ad9..a9348ebd2f41 100644 --- a/runtime/parachains/src/lib.rs +++ b/runtime/parachains/src/lib.rs @@ -23,6 +23,9 @@ #![cfg_attr(feature = "runtime-benchmarks", recursion_limit = "256")] #![cfg_attr(not(feature = "std"), no_std)] +pub mod assigner; +pub mod assigner_on_demand; +pub mod assigner_parachains; pub mod configuration; pub mod disputes; pub mod dmp; diff --git a/runtime/parachains/src/mock.rs b/runtime/parachains/src/mock.rs index bab896c419f6..f978b6c3360e 100644 --- a/runtime/parachains/src/mock.rs +++ b/runtime/parachains/src/mock.rs @@ -17,7 +17,7 @@ //! Mocks for all the traits. use crate::{ - configuration, disputes, dmp, hrmp, + assigner, assigner_on_demand, assigner_parachains, configuration, disputes, dmp, hrmp, inclusion::{self, AggregateMessageOrigin, UmpQueueId}, initializer, origin, paras, paras::ParaKind, @@ -43,7 +43,7 @@ use sp_io::TestExternalities; use sp_runtime::{ traits::{AccountIdConversion, BlakeTwo256, IdentityLookup}, transaction_validity::TransactionPriority, - BuildStorage, Perbill, Permill, + BuildStorage, FixedU128, Perbill, Permill, }; use std::{cell::RefCell, collections::HashMap}; @@ -62,6 +62,9 @@ frame_support::construct_runtime!( ParaInclusion: inclusion, ParaInherent: paras_inherent, Scheduler: scheduler, + Assigner: assigner, + OnDemandAssigner: assigner_on_demand, + ParachainsAssigner: assigner_parachains, Initializer: initializer, Dmp: dmp, Hrmp: hrmp, @@ -281,7 +284,9 @@ impl crate::disputes::SlashingHandler for Test { fn initializer_on_new_session(_: SessionIndex) {} } -impl crate::scheduler::Config for Test {} +impl crate::scheduler::Config for Test { + type AssignmentProvider = Assigner; +} pub struct TestMessageQueueWeight; impl pallet_message_queue::WeightInfo for TestMessageQueueWeight { @@ -334,6 +339,24 @@ impl pallet_message_queue::Config for Test { type ServiceWeight = MessageQueueServiceWeight; } +impl assigner::Config for Test { + type ParachainsAssignmentProvider = ParachainsAssigner; + type OnDemandAssignmentProvider = OnDemandAssigner; +} + +impl assigner_parachains::Config for Test {} + +parameter_types! { + pub const OnDemandTrafficDefaultValue: FixedU128 = FixedU128::from_u32(1); +} + +impl assigner_on_demand::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type TrafficDefaultValue = OnDemandTrafficDefaultValue; + type WeightInfo = crate::assigner_on_demand::TestWeightInfo; +} + impl crate::inclusion::Config for Test { type WeightInfo = (); type RuntimeEvent = RuntimeEvent; diff --git a/runtime/parachains/src/paras/tests.rs b/runtime/parachains/src/paras/tests.rs index a9b51fe2b45e..e2067448b288 100644 --- a/runtime/parachains/src/paras/tests.rs +++ b/runtime/parachains/src/paras/tests.rs @@ -746,8 +746,7 @@ fn full_parachain_cleanup_storage() { minimum_validation_upgrade_delay: 2, // Those are not relevant to this test. However, HostConfiguration is still a // subject for the consistency check. - chain_availability_period: 1, - thread_availability_period: 1, + paras_availability_period: 1, ..Default::default() }, }, diff --git a/runtime/parachains/src/paras_inherent/mod.rs b/runtime/parachains/src/paras_inherent/mod.rs index da0b972bc92c..0ace3c312269 100644 --- a/runtime/parachains/src/paras_inherent/mod.rs +++ b/runtime/parachains/src/paras_inherent/mod.rs @@ -28,7 +28,8 @@ use crate::{ inclusion::CandidateCheckContext, initializer, metrics::METRICS, - scheduler::{self, CoreAssignment, FreedReason}, + scheduler, + scheduler::common::{CoreAssignment, FreedReason}, shared, ParaId, }; use bitvec::prelude::BitVec; @@ -518,7 +519,7 @@ impl Pallet { .map(|(_session, candidate)| candidate) .collect::>(); - let mut freed_disputed: Vec<_> = + let freed_disputed: BTreeMap = >::collect_disputed(¤t_concluded_invalid_disputes) .into_iter() .map(|core| (core, FreedReason::Concluded)) @@ -528,16 +529,10 @@ impl Pallet { // a core index that was freed due to a dispute. // // I.e. 010100 would indicate, the candidates on Core 1 and 3 would be disputed. - let disputed_bitfield = create_disputed_bitfield( - expected_bits, - freed_disputed.iter().map(|(core_index, _)| core_index), - ); + let disputed_bitfield = create_disputed_bitfield(expected_bits, freed_disputed.keys()); if !freed_disputed.is_empty() { - // unstable sort is fine, because core indices are unique - // i.e. the same candidate can't occupy 2 cores at once. - freed_disputed.sort_unstable_by_key(|pair| pair.0); // sort by core index - >::free_cores(freed_disputed.clone()); + >::update_claimqueue(freed_disputed.clone(), now); } let bitfields = sanitize_bitfields::( @@ -569,10 +564,7 @@ impl Pallet { let freed = collect_all_freed_cores::(freed_concluded.iter().cloned()); - >::clear(); - >::schedule(freed, now); - - let scheduled = >::scheduled(); + let scheduled = >::update_claimqueue(freed, now); let relay_parent_number = now - One::one(); let parent_storage_root = *parent_header.state_root(); @@ -614,7 +606,7 @@ impl Pallet { >::group_validators, )?; // Note which of the scheduled cores were actually occupied by a backed candidate. - >::occupied(&occupied); + >::occupied(occupied.into_iter().map(|e| (e.0, e.1)).collect()); set_scrapable_on_chain_backings::( current_session, @@ -908,7 +900,7 @@ fn sanitize_backed_candidates< relay_parent: T::Hash, mut backed_candidates: Vec>, mut candidate_has_concluded_invalid_dispute_or_is_invalid: F, - scheduled: &[CoreAssignment], + scheduled: &[CoreAssignment>], ) -> Vec> { // Remove any candidates that were concluded invalid. // This does not assume sorting. @@ -918,7 +910,7 @@ fn sanitize_backed_candidates< let scheduled_paras_to_core_idx = scheduled .into_iter() - .map(|core_assignment| (core_assignment.para_id, core_assignment.core)) + .map(|core_assignment| (core_assignment.paras_entry.para_id(), core_assignment.core)) .collect::>(); // Assure the backed candidate's `ParaId`'s core is free. diff --git a/runtime/parachains/src/paras_inherent/tests.rs b/runtime/parachains/src/paras_inherent/tests.rs index 4de12bcc91b7..4636200b762b 100644 --- a/runtime/parachains/src/paras_inherent/tests.rs +++ b/runtime/parachains/src/paras_inherent/tests.rs @@ -72,7 +72,10 @@ mod enter { // freed via becoming fully available, the backed candidates will not be filtered out in // `create_inherent` and will not cause `enter` to early. fn include_backed_candidates() { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { + let config = MockGenesisConfig::default(); + assert!(config.configuration.config.scheduling_lookahead > 0); + + new_test_ext(config).execute_with(|| { let dispute_statements = BTreeMap::new(); let mut backed_and_concluding = BTreeMap::new(); @@ -106,7 +109,7 @@ mod enter { .unwrap(); // The current schedule is empty prior to calling `create_inherent_enter`. - assert_eq!(>::scheduled(), vec![]); + assert!(>::claimqueue_is_empty()); // Nothing is filtered out (including the backed candidates.) assert_eq!( @@ -253,7 +256,7 @@ mod enter { .unwrap(); // The current schedule is empty prior to calling `create_inherent_enter`. - assert_eq!(>::scheduled(), vec![]); + assert!(>::claimqueue_is_empty()); let multi_dispute_inherent_data = Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); @@ -322,7 +325,7 @@ mod enter { .unwrap(); // The current schedule is empty prior to calling `create_inherent_enter`. - assert_eq!(>::scheduled(), vec![]); + assert!(>::claimqueue_is_empty()); let limit_inherent_data = Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); @@ -391,7 +394,7 @@ mod enter { .unwrap(); // The current schedule is empty prior to calling `create_inherent_enter`. - assert_eq!(>::scheduled(), vec![]); + assert!(>::claimqueue_is_empty()); // Nothing is filtered out (including the backed candidates.) let limit_inherent_data = @@ -475,7 +478,7 @@ mod enter { .unwrap(); // The current schedule is empty prior to calling `create_inherent_enter`. - assert_eq!(>::scheduled(), vec![]); + assert!(>::claimqueue_is_empty()); // Nothing is filtered out (including the backed candidates.) let limit_inherent_data = @@ -601,7 +604,10 @@ mod enter { #[test] // Ensure that when a block is over weight due to disputes and bitfields, we filter. fn limit_candidates_over_weight_1() { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { + let config = MockGenesisConfig::default(); + assert!(config.configuration.config.scheduling_lookahead > 0); + + new_test_ext(config).execute_with(|| { // Create the inherent data for this block let mut dispute_statements = BTreeMap::new(); // Control the number of statements per dispute to ensure we have enough space @@ -953,7 +959,10 @@ mod sanitizers { use crate::mock::Test; use keyring::Sr25519Keyring; - use primitives::PARACHAIN_KEY_TYPE_ID; + use primitives::{ + v5::{Assignment, ParasEntry}, + PARACHAIN_KEY_TYPE_ID, + }; use sc_keystore::LocalKeystore; use sp_keystore::{Keystore, KeystorePtr}; use std::sync::Arc; @@ -1225,19 +1234,22 @@ mod sanitizers { let has_concluded_invalid = |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; + let entry_ttl = 10_000; let scheduled = (0_usize..2) .into_iter() .map(|idx| { + let core_idx = CoreIndex::from(idx as u32); let ca = CoreAssignment { - kind: scheduler::AssignmentKind::Parachain, + paras_entry: ParasEntry::new( + Assignment::new(ParaId::from(1_u32 + idx as u32)), + entry_ttl, + ), group_idx: GroupIndex::from(idx as u32), - para_id: ParaId::from(1_u32 + idx as u32), - core: CoreIndex::from(idx as u32), + core: core_idx, }; ca }) .collect::>(); - let scheduled = &scheduled[..]; let group_validators = |group_index: GroupIndex| { match group_index { @@ -1282,14 +1294,14 @@ mod sanitizers { relay_parent, backed_candidates.clone(), has_concluded_invalid, - scheduled + &scheduled ), backed_candidates ); // nothing is scheduled, so no paraids match, thus all backed candidates are skipped { - let scheduled = &[][..]; + let scheduled = &Vec::new(); assert!(sanitize_backed_candidates::( relay_parent, backed_candidates.clone(), @@ -1306,7 +1318,7 @@ mod sanitizers { relay_parent, backed_candidates.clone(), has_concluded_invalid, - scheduled + &scheduled ) .is_empty()); } @@ -1330,7 +1342,7 @@ mod sanitizers { relay_parent, backed_candidates.clone(), has_concluded_invalid, - scheduled + &scheduled ) .len(), backed_candidates.len() / 2 diff --git a/runtime/parachains/src/runtime_api_impl/v5.rs b/runtime/parachains/src/runtime_api_impl/v5.rs index 4c9c8c911f62..36b93a70a9f2 100644 --- a/runtime/parachains/src/runtime_api_impl/v5.rs +++ b/runtime/parachains/src/runtime_api_impl/v5.rs @@ -27,8 +27,8 @@ use primitives::{ CoreIndex, CoreOccupied, CoreState, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCore, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, - ScheduledCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, - ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, ValidatorSignature, }; use sp_runtime::traits::One; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; @@ -52,13 +52,8 @@ pub fn validator_groups( /// Implementation for the `availability_cores` function of the runtime API. pub fn availability_cores() -> Vec>> { let cores = >::availability_cores(); - let parachains = >::parachains(); let config = >::config(); - let now = >::block_number() + One::one(); - >::clear(); - >::schedule(Vec::new(), now); - let rotation_info = >::group_rotation_info(now); let time_out_at = |backed_in_number, availability_period| { @@ -102,73 +97,39 @@ pub fn availability_cores() -> Vec CoreState::Occupied(match occupied { - CoreOccupied::Parachain => { - let para_id = parachains[i]; - let pending_availability = - >::pending_availability(para_id) - .expect("Occupied core always has pending availability; qed"); - - let backed_in_number = *pending_availability.backed_in_number(); - OccupiedCore { - next_up_on_available: >::next_up_on_available( - CoreIndex(i as u32), - ), - occupied_since: backed_in_number, - time_out_at: time_out_at( - backed_in_number, - config.chain_availability_period, - ), - next_up_on_time_out: >::next_up_on_time_out( - CoreIndex(i as u32), - ), - availability: pending_availability.availability_votes().clone(), - group_responsible: group_responsible_for( - backed_in_number, - pending_availability.core_occupied(), - ), - candidate_hash: pending_availability.candidate_hash(), - candidate_descriptor: pending_availability.candidate_descriptor().clone(), - } - }, - CoreOccupied::Parathread(p) => { - let para_id = p.claim.0; - let pending_availability = - >::pending_availability(para_id) - .expect("Occupied core always has pending availability; qed"); - - let backed_in_number = *pending_availability.backed_in_number(); - OccupiedCore { - next_up_on_available: >::next_up_on_available( - CoreIndex(i as u32), - ), - occupied_since: backed_in_number, - time_out_at: time_out_at( - backed_in_number, - config.thread_availability_period, - ), - next_up_on_time_out: >::next_up_on_time_out( - CoreIndex(i as u32), - ), - availability: pending_availability.availability_votes().clone(), - group_responsible: group_responsible_for( - backed_in_number, - pending_availability.core_occupied(), - ), - candidate_hash: pending_availability.candidate_hash(), - candidate_descriptor: pending_availability.candidate_descriptor().clone(), - } - }, - }), - None => CoreState::Free, + CoreOccupied::Paras(entry) => { + let pending_availability = + >::pending_availability(entry.para_id()) + .expect("Occupied core always has pending availability; qed"); + + let backed_in_number = *pending_availability.backed_in_number(); + CoreState::Occupied(OccupiedCore { + next_up_on_available: >::next_up_on_available(CoreIndex( + i as u32, + )), + occupied_since: backed_in_number, + time_out_at: time_out_at(backed_in_number, config.paras_availability_period), + next_up_on_time_out: >::next_up_on_time_out(CoreIndex( + i as u32, + )), + availability: pending_availability.availability_votes().clone(), + group_responsible: group_responsible_for( + backed_in_number, + pending_availability.core_occupied(), + ), + candidate_hash: pending_availability.candidate_hash(), + candidate_descriptor: pending_availability.candidate_descriptor().clone(), + }) + }, + CoreOccupied::Free => CoreState::Free, }) .collect(); // This will overwrite only `Free` cores if the scheduler module is working as intended. - for scheduled in >::scheduled() { - core_states[scheduled.core.0 as usize] = CoreState::Scheduled(ScheduledCore { - para_id: scheduled.para_id, - collator: scheduled.required_collator().map(|c| c.clone()), + for scheduled in >::scheduled_claimqueue(now) { + core_states[scheduled.core.0 as usize] = CoreState::Scheduled(primitives::ScheduledCore { + para_id: scheduled.paras_entry.para_id(), + collator: None, }); } diff --git a/runtime/parachains/src/scheduler.rs b/runtime/parachains/src/scheduler.rs index 6882834187dc..81a8bfc535e0 100644 --- a/runtime/parachains/src/scheduler.rs +++ b/runtime/parachains/src/scheduler.rs @@ -36,135 +36,46 @@ //! number of groups as availability cores. Validator groups will be assigned to different //! availability cores over time. +use crate::{configuration, initializer::SessionChangeNotification, paras}; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::BlockNumberFor; use primitives::{ - CollatorId, CoreIndex, CoreOccupied, GroupIndex, GroupRotationInfo, Id as ParaId, - ParathreadClaim, ParathreadEntry, ScheduledCore, ValidatorIndex, + v5::ParasEntry, CoreIndex, CoreOccupied, GroupIndex, GroupRotationInfo, Id as ParaId, + ScheduledCore, ValidatorIndex, }; -use scale_info::TypeInfo; use sp_runtime::traits::{One, Saturating}; -use sp_std::prelude::*; +use sp_std::{ + collections::{btree_map::BTreeMap, vec_deque::VecDeque}, + prelude::*, +}; -use crate::{configuration, initializer::SessionChangeNotification, paras}; +pub mod common; + +use common::{AssignmentProvider, AssignmentProviderConfig, CoreAssignment, FreedReason}; pub use pallet::*; #[cfg(test)] mod tests; -/// A queued parathread entry, pre-assigned to a core. -#[derive(Encode, Decode, TypeInfo)] -#[cfg_attr(test, derive(PartialEq, Debug))] -pub struct QueuedParathread { - claim: ParathreadEntry, - core_offset: u32, -} - -/// The queue of all parathread claims. -#[derive(Encode, Decode, TypeInfo)] -#[cfg_attr(test, derive(PartialEq, Debug))] -pub struct ParathreadClaimQueue { - queue: Vec, - // this value is between 0 and config.parathread_cores - next_core_offset: u32, -} - -impl ParathreadClaimQueue { - /// Queue a parathread entry to be processed. - /// - /// Provide the entry and the number of parathread cores, which must be greater than 0. - fn enqueue_entry(&mut self, entry: ParathreadEntry, n_parathread_cores: u32) { - let core_offset = self.next_core_offset; - self.next_core_offset = (self.next_core_offset + 1) % n_parathread_cores; - - self.queue.push(QueuedParathread { claim: entry, core_offset }) - } - - /// Take next queued entry with given core offset, if any. - fn take_next_on_core(&mut self, core_offset: u32) -> Option { - let pos = self.queue.iter().position(|queued| queued.core_offset == core_offset); - pos.map(|i| self.queue.remove(i).claim) - } - - /// Get the next queued entry with given core offset, if any. - fn get_next_on_core(&self, core_offset: u32) -> Option<&ParathreadEntry> { - let pos = self.queue.iter().position(|queued| queued.core_offset == core_offset); - pos.map(|i| &self.queue[i].claim) - } -} - -impl Default for ParathreadClaimQueue { - fn default() -> Self { - Self { queue: vec![], next_core_offset: 0 } - } -} - -/// Reasons a core might be freed -#[derive(Clone, Copy)] -pub enum FreedReason { - /// The core's work concluded and the parablock assigned to it is considered available. - Concluded, - /// The core's work timed out. - TimedOut, -} - -/// The assignment type. -#[derive(Clone, Encode, Decode, TypeInfo)] -#[cfg_attr(feature = "std", derive(PartialEq, Debug))] -pub enum AssignmentKind { - /// A parachain. - Parachain, - /// A parathread. - Parathread(CollatorId, u32), -} - -/// How a free core is scheduled to be assigned. -#[derive(Clone, Encode, Decode, TypeInfo)] -#[cfg_attr(feature = "std", derive(PartialEq, Debug))] -pub struct CoreAssignment { - /// The core that is assigned. - pub core: CoreIndex, - /// The unique ID of the para that is assigned to the core. - pub para_id: ParaId, - /// The kind of the assignment. - pub kind: AssignmentKind, - /// The index of the validator group assigned to the core. - pub group_idx: GroupIndex, -} - -impl CoreAssignment { - /// Get the ID of a collator who is required to collate this block. - pub fn required_collator(&self) -> Option<&CollatorId> { - match self.kind { - AssignmentKind::Parachain => None, - AssignmentKind::Parathread(ref id, _) => Some(id), - } - } - - /// Get the `CoreOccupied` from this. - pub fn to_core_occupied(&self) -> CoreOccupied { - match self.kind { - AssignmentKind::Parachain => CoreOccupied::Parachain, - AssignmentKind::Parathread(ref collator, retries) => - CoreOccupied::Parathread(ParathreadEntry { - claim: ParathreadClaim(self.para_id, collator.clone()), - retries, - }), - } - } -} +const LOG_TARGET: &str = "runtime::parachains::scheduler"; +pub mod migration; #[frame_support::pallet] pub mod pallet { use super::*; + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + #[pallet::pallet] #[pallet::without_storage_info] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); #[pallet::config] - pub trait Config: frame_system::Config + configuration::Config + paras::Config {} + pub trait Config: frame_system::Config + configuration::Config + paras::Config { + type AssignmentProvider: AssignmentProvider>; + } /// All the validator groups. One for each core. Indices are into `ActiveValidators` - not the /// broader set of Polkadot validators, but instead just the subset used for parachains during @@ -177,13 +88,6 @@ pub mod pallet { #[pallet::getter(fn validator_groups)] pub(crate) type ValidatorGroups = StorageValue<_, Vec>, ValueQuery>; - /// A queue of upcoming claims and which core they should be mapped onto. - /// - /// The number of queued claims is bounded at the `scheduling_lookahead` - /// multiplied by the number of parathread multiplexer cores. Reasonably, 10 * 50 = 500. - #[pallet::storage] - pub(crate) type ParathreadQueue = StorageValue<_, ParathreadClaimQueue, ValueQuery>; - /// One entry for each availability core. Entries are `None` if the core is not currently /// occupied. Can be temporarily `Some` if scheduled but not occupied. /// The i'th parachain belongs to the i'th core, with the remaining cores all being @@ -194,15 +98,8 @@ pub mod pallet { /// * The number of validators divided by `configuration.max_validators_per_core`. #[pallet::storage] #[pallet::getter(fn availability_cores)] - pub(crate) type AvailabilityCores = StorageValue<_, Vec>, ValueQuery>; - - /// An index used to ensure that only one claim on a parathread exists in the queue or is - /// currently being handled by an occupied core. - /// - /// Bounded by the number of parathread cores and scheduling lookahead. Reasonably, 10 * 50 = - /// 500. - #[pallet::storage] - pub(crate) type ParathreadClaimIndex = StorageValue<_, Vec, ValueQuery>; + pub(crate) type AvailabilityCores = + StorageValue<_, Vec>>, ValueQuery>; /// The block number where the session start occurred. Used to track how many group rotations /// have occurred. @@ -215,18 +112,24 @@ pub mod pallet { #[pallet::getter(fn session_start_block)] pub(crate) type SessionStartBlock = StorageValue<_, BlockNumberFor, ValueQuery>; - /// Currently scheduled cores - free but up to be occupied. - /// - /// Bounded by the number of cores: one for each parachain and parathread multiplexer. - /// - /// The value contained here will not be valid after the end of a block. Runtime APIs should be - /// used to determine scheduled cores/ for the upcoming block. + /// One entry for each availability core. The `VecDeque` represents the assignments to be + /// scheduled on that core. `None` is used to signal to not schedule the next para of the core + /// as there is one currently being scheduled. Not using `None` here would overwrite the + /// `CoreState` in the runtime API. The value contained here will not be valid after the end of + /// a block. Runtime APIs should be used to determine scheduled cores/ for the upcoming block. #[pallet::storage] - #[pallet::getter(fn scheduled)] - pub(crate) type Scheduled = StorageValue<_, Vec, ValueQuery>; - // sorted ascending by CoreIndex. + #[pallet::getter(fn claimqueue)] + pub(crate) type ClaimQueue = StorageValue< + _, + BTreeMap>>>>, + ValueQuery, + >; } +type PositionInClaimqueue = u32; +type TimedoutParas = BTreeMap>>; +type ConcludedParas = BTreeMap; + impl Pallet { /// Called by the initializer to initialize the scheduler pallet. pub(crate) fn initializer_initialize(_now: BlockNumberFor) -> Weight { @@ -236,6 +139,12 @@ impl Pallet { /// Called by the initializer to finalize the scheduler pallet. pub(crate) fn initializer_finalize() {} + /// Called before the initializer notifies of a new session. + pub(crate) fn pre_new_session() { + Self::push_claimqueue_items_to_assignment_provider(); + Self::push_occupied_cores_to_assignment_provider(); + } + /// Called by the initializer to note that a new session has started. pub(crate) fn initializer_on_new_session( notification: &SessionChangeNotification>, @@ -243,10 +152,8 @@ impl Pallet { let SessionChangeNotification { validators, new_config, .. } = notification; let config = new_config; - let mut thread_queue = ParathreadQueue::::get(); - let n_parachains = >::parachains().len() as u32; let n_cores = core::cmp::max( - n_parachains + config.parathread_cores, + T::AssignmentProvider::session_core_count(), match config.max_validators_per_core { Some(x) if x != 0 => validators.len() as u32 / x, _ => 0, @@ -254,19 +161,7 @@ impl Pallet { ); AvailabilityCores::::mutate(|cores| { - // clear all occupied cores. - for maybe_occupied in cores.iter_mut() { - if let Some(CoreOccupied::Parathread(claim)) = maybe_occupied.take() { - let queued = QueuedParathread { - claim, - core_offset: 0, // this gets set later in the re-balancing. - }; - - thread_queue.queue.push(queued); - } - } - - cores.resize(n_cores as _, None); + cores.resize(n_cores as _, CoreOccupied::Free); }); // shuffle validators into groups. @@ -303,288 +198,134 @@ impl Pallet { ValidatorGroups::::set(groups); } - // prune out all parathread claims with too many retries. - // assign all non-pruned claims to new cores, if they've changed. - ParathreadClaimIndex::::mutate(|claim_index| { - // wipe all parathread metadata if no parathread cores are configured. - if config.parathread_cores == 0 { - thread_queue = ParathreadClaimQueue { queue: Vec::new(), next_core_offset: 0 }; - claim_index.clear(); - return - } - - // prune out all entries beyond retry or that no longer correspond to live parathread. - thread_queue.queue.retain(|queued| { - let will_keep = queued.claim.retries <= config.parathread_retries && - >::is_parathread(queued.claim.claim.0); - - if !will_keep { - let claim_para = queued.claim.claim.0; - - // clean up the pruned entry from the index. - if let Ok(i) = claim_index.binary_search(&claim_para) { - claim_index.remove(i); - } - } - - will_keep - }); - - // do re-balancing of claims. - { - for (i, queued) in thread_queue.queue.iter_mut().enumerate() { - queued.core_offset = (i as u32) % config.parathread_cores; - } - - thread_queue.next_core_offset = - ((thread_queue.queue.len()) as u32) % config.parathread_cores; - } - }); - ParathreadQueue::::set(thread_queue); - let now = >::block_number() + One::one(); >::set(now); } - /// Add a parathread claim to the queue. If there is a competing claim in the queue or currently - /// assigned to a core, this call will fail. This call will also fail if the queue is full. - /// - /// Fails if the claim does not correspond to any live parathread. - #[allow(unused)] - pub fn add_parathread_claim(claim: ParathreadClaim) { - if !>::is_parathread(claim.0) { - return - } - - let config = >::config(); - let queue_max_size = config.parathread_cores * config.scheduling_lookahead; - - ParathreadQueue::::mutate(|queue| { - if queue.queue.len() >= queue_max_size as usize { - return - } - - let para_id = claim.0; - - let competes_with_another = - ParathreadClaimIndex::::mutate(|index| match index.binary_search(¶_id) { - Ok(_) => true, - Err(i) => { - index.insert(i, para_id); - false - }, - }); - - if competes_with_another { - return - } - - let entry = ParathreadEntry { claim, retries: 0 }; - queue.enqueue_entry(entry, config.parathread_cores); - }) - } - /// Free unassigned cores. Provide a list of cores that should be considered newly-freed along - /// with the reason for them being freed. The list is assumed to be sorted in ascending order by - /// core index. - pub(crate) fn free_cores(just_freed_cores: impl IntoIterator) { - let config = >::config(); + /// with the reason for them being freed. Returns a tuple of concluded and timedout paras. + fn free_cores( + just_freed_cores: impl IntoIterator, + ) -> (ConcludedParas, TimedoutParas) { + let mut timedout_paras: BTreeMap>> = + BTreeMap::new(); + let mut concluded_paras = BTreeMap::new(); AvailabilityCores::::mutate(|cores| { - for (freed_index, freed_reason) in just_freed_cores { - if (freed_index.0 as usize) < cores.len() { - match cores[freed_index.0 as usize].take() { - None => continue, - Some(CoreOccupied::Parachain) => {}, - Some(CoreOccupied::Parathread(entry)) => { + let c_len = cores.len(); + + just_freed_cores + .into_iter() + .filter(|(freed_index, _)| (freed_index.0 as usize) < c_len) + .for_each(|(freed_index, freed_reason)| { + match &cores[freed_index.0 as usize] { + CoreOccupied::Free => {}, + CoreOccupied::Paras(entry) => { match freed_reason { FreedReason::Concluded => { - // After a parathread candidate has successfully been included, - // open it up for further claims! - ParathreadClaimIndex::::mutate(|index| { - if let Ok(i) = index.binary_search(&entry.claim.0) { - index.remove(i); - } - }) + concluded_paras.insert(freed_index, entry.para_id()); }, FreedReason::TimedOut => { - // If a parathread candidate times out, it's not the collator's - // fault, so we don't increment retries. - ParathreadQueue::::mutate(|queue| { - queue.enqueue_entry(entry, config.parathread_cores); - }) + timedout_paras.insert(freed_index, entry.clone()); }, - } + }; }, - } - } - } - }) - } - - /// Schedule all unassigned cores, where possible. Provide a list of cores that should be - /// considered newly-freed along with the reason for them being freed. The list is assumed to be - /// sorted in ascending order by core index. - pub(crate) fn schedule( - just_freed_cores: impl IntoIterator, - now: BlockNumberFor, - ) { - Self::free_cores(just_freed_cores); - - let cores = AvailabilityCores::::get(); - let parachains = >::parachains(); - let mut scheduled = Scheduled::::get(); - let mut parathread_queue = ParathreadQueue::::get(); + }; - if ValidatorGroups::::get().is_empty() { - return - } + cores[freed_index.0 as usize] = CoreOccupied::Free; + }) + }); - { - let mut prev_scheduled_in_order = scheduled.iter().enumerate().peekable(); + (concluded_paras, timedout_paras) + } - // Updates to the previous list of scheduled updates and the position of where to insert - // them, without accounting for prior updates. - let mut scheduled_updates: Vec<(usize, CoreAssignment)> = Vec::new(); + /// Note that the given cores have become occupied. Update the claimqueue accordingly. + pub(crate) fn occupied( + now_occupied: BTreeMap, + ) -> BTreeMap { + let mut availability_cores = AvailabilityCores::::get(); - // single-sweep O(n) in the number of cores. - for (core_index, _core) in cores.iter().enumerate().filter(|(_, ref c)| c.is_none()) { - let schedule_and_insert_at = { - // advance the iterator until just before the core index we are looking at now. - while prev_scheduled_in_order - .peek() - .map_or(false, |(_, assign)| (assign.core.0 as usize) < core_index) - { - let _ = prev_scheduled_in_order.next(); - } + log::debug!(target: LOG_TARGET, "[occupied] now_occupied {:?}", now_occupied); + + let pos_mapping: BTreeMap = now_occupied + .iter() + .flat_map(|(core_idx, para_id)| { + match Self::remove_from_claimqueue(*core_idx, *para_id) { + Err(e) => { + log::debug!( + target: LOG_TARGET, + "[occupied] error on remove_from_claimqueue {}", + e + ); + None + }, + Ok((pos_in_claimqueue, pe)) => { + // is this correct? + availability_cores[core_idx.0 as usize] = CoreOccupied::Paras(pe); - // check the first entry already scheduled with core index >= than the one we - // are looking at. 3 cases: - // 1. No such entry, clearly this core is not scheduled, so we need to schedule - // and put at the end. 2. Entry exists and has same index as the core we are - // inspecting. do not schedule again. 3. Entry exists and has higher index than - // the core we are inspecting. schedule and note insertion position. - prev_scheduled_in_order.peek().map_or( - Some(scheduled.len()), - |(idx_in_scheduled, assign)| { - if (assign.core.0 as usize) == core_index { - None - } else { - Some(*idx_in_scheduled) - } - }, - ) - }; - - let schedule_and_insert_at = match schedule_and_insert_at { - None => continue, - Some(at) => at, - }; - - let core = CoreIndex(core_index as u32); - - let core_assignment = if core_index < parachains.len() { - // parachain core. - Some(CoreAssignment { - kind: AssignmentKind::Parachain, - para_id: parachains[core_index], - core, - group_idx: Self::group_assigned_to_core(core, now).expect( - "core is not out of bounds and we are guaranteed \ - to be after the most recent session start; qed", - ), - }) - } else { - // parathread core offset, rel. to beginning. - let core_offset = (core_index - parachains.len()) as u32; - - parathread_queue.take_next_on_core(core_offset).map(|entry| CoreAssignment { - kind: AssignmentKind::Parathread(entry.claim.1, entry.retries), - para_id: entry.claim.0, - core, - group_idx: Self::group_assigned_to_core(core, now).expect( - "core is not out of bounds and we are guaranteed \ - to be after the most recent session start; qed", - ), - }) - }; - - if let Some(assignment) = core_assignment { - scheduled_updates.push((schedule_and_insert_at, assignment)) + Some((*core_idx, pos_in_claimqueue)) + }, } - } + }) + .collect(); - // at this point, because `Scheduled` is guaranteed to be sorted and we navigated - // unassigned core indices in ascending order, we can enact the updates prepared by the - // previous actions. - // - // while inserting, we have to account for the amount of insertions already done. - // - // This is O(n) as well, capped at n operations, where n is the number of cores. - for (num_insertions_before, (insert_at, to_insert)) in - scheduled_updates.into_iter().enumerate() - { - let insert_at = num_insertions_before + insert_at; - scheduled.insert(insert_at, to_insert); - } + // Drop expired claims after processing now_occupied. + Self::drop_expired_claims_from_claimqueue(); - // scheduled is guaranteed to be sorted after this point because it was sorted before, - // and we applied sorted updates at their correct positions, accounting for the offsets - // of previous insertions. - } + AvailabilityCores::::set(availability_cores); - Scheduled::::set(scheduled); - ParathreadQueue::::set(parathread_queue); + pos_mapping } - /// Note that the given cores have become occupied. Behavior undefined if any of the given cores - /// were not scheduled or the slice is not sorted ascending by core index. - /// - /// Complexity: O(n) in the number of scheduled cores, which is capped at the number of total - /// cores. This is efficient in the case that most scheduled cores are occupied. - pub(crate) fn occupied(now_occupied: &[CoreIndex]) { - if now_occupied.is_empty() { - return - } + /// Iterates through every element in all claim queues and tries to add new assignments from the + /// `AssignmentProvider`. A claim is considered expired if it's `ttl` field is lower than the + /// current block height. + fn drop_expired_claims_from_claimqueue() { + let now = >::block_number(); + let availability_cores = AvailabilityCores::::get(); - let mut availability_cores = AvailabilityCores::::get(); - Scheduled::::mutate(|scheduled| { - // The constraints on the function require that `now_occupied` is a sorted subset of the - // `scheduled` cores, which are also sorted. - - let mut occupied_iter = now_occupied.iter().cloned().peekable(); - scheduled.retain(|assignment| { - let retain = occupied_iter - .peek() - .map_or(true, |occupied_idx| occupied_idx != &assignment.core); - - if !retain { - // remove this entry - it's now occupied. and begin inspecting the next extry - // of the occupied iterator. - let _ = occupied_iter.next(); - - availability_cores[assignment.core.0 as usize] = - Some(assignment.to_core_occupied()); + ClaimQueue::::mutate(|cq| { + for (idx, _) in (0u32..).zip(availability_cores) { + let core_idx = CoreIndex(idx); + if let Some(core_claimqueue) = cq.get_mut(&core_idx) { + let mut dropped_claims: Vec> = vec![]; + core_claimqueue.retain(|maybe_entry| { + if let Some(entry) = maybe_entry { + if entry.ttl < now { + dropped_claims.push(Some(entry.para_id())); + return false + } + } + true + }); + + // For all claims dropped due to TTL, attempt to pop a new entry to + // the back of the claimqueue. + for drop in dropped_claims { + match T::AssignmentProvider::pop_assignment_for_core(core_idx, drop) { + Some(assignment) => { + let AssignmentProviderConfig { ttl, .. } = + T::AssignmentProvider::get_provider_config(core_idx); + core_claimqueue.push_back(Some(ParasEntry::new( + assignment.clone(), + now + ttl, + ))); + }, + None => (), + } + } } - - retain - }) + } }); - - AvailabilityCores::::set(availability_cores); } /// Get the para (chain or thread) ID assigned to a particular core or index, if any. Core /// indices out of bounds will return `None`, as will indices of unassigned cores. pub(crate) fn core_para(core_index: CoreIndex) -> Option { let cores = AvailabilityCores::::get(); - match cores.get(core_index.0 as usize).and_then(|c| c.as_ref()) { - None => None, - Some(CoreOccupied::Parachain) => { - let parachains = >::parachains(); - Some(parachains[core_index.0 as usize]) - }, - Some(CoreOccupied::Parathread(ref entry)) => Some(entry.claim.0), + match cores.get(core_index.0 as usize) { + None | Some(CoreOccupied::Free) => None, + Some(CoreOccupied::Paras(entry)) => Some(entry.para_id()), } } @@ -630,52 +371,44 @@ impl Pallet { /// Returns an optional predicate that should be used for timing out occupied cores. /// /// If `None`, no timing-out should be done. The predicate accepts the index of the core, and - /// the block number since which it has been occupied, and the respective parachain and - /// parathread timeouts, i.e. only within `max(config.chain_availability_period, - /// config.thread_availability_period)` of the last rotation would this return `Some`, unless - /// there are no rotations. + /// the block number since which it has been occupied, and the respective parachain timeouts, + /// i.e. only within `config.paras_availability_period` of the last rotation would this return + /// `Some`, unless there are no rotations. /// - /// This really should not be a box, but is working around a compiler limitation filed here: - /// https://github.com/rust-lang/rust/issues/73226 - /// which prevents us from testing the code if using `impl Trait`. + /// The timeout used to depend, but does not depend any more on group rotations. First of all + /// it only matters if a para got another chance (a retry). If there is a retry and it happens + /// still within the same group rotation a censoring backing group would need to censor again + /// and lose out again on backing rewards. This is bad for the censoring backing group, it does + /// not matter for the parachain as long as it is retried often enough (so it eventually gets a + /// try on another backing group) - the effect is similar to having a prolonged timeout. It + /// should also be noted that for both malicious and offline backing groups it is actually more + /// realistic that the candidate will not be backed to begin with, instead of getting backed + /// and then not made available. pub(crate) fn availability_timeout_predicate( - ) -> Option) -> bool>> { + ) -> Option) -> bool> { let now = >::block_number(); let config = >::config(); - let session_start = >::get(); + let blocks_since_session_start = now.saturating_sub(session_start); let blocks_since_last_rotation = blocks_since_session_start % config.group_rotation_frequency.max(1u8.into()); - let absolute_cutoff = - sp_std::cmp::max(config.chain_availability_period, config.thread_availability_period); - - let availability_cores = AvailabilityCores::::get(); - - if blocks_since_last_rotation >= absolute_cutoff { + if blocks_since_last_rotation >= config.paras_availability_period { None } else { - Some(Box::new(move |core_index: CoreIndex, pending_since| { + Some(|core_index: CoreIndex, pending_since| { + let availability_cores = AvailabilityCores::::get(); + let AssignmentProviderConfig { availability_period, .. } = + T::AssignmentProvider::get_provider_config(core_index); + let now = >::block_number(); match availability_cores.get(core_index.0 as usize) { - None => true, // out-of-bounds, doesn't really matter what is returned. - Some(None) => true, // core not occupied, still doesn't really matter. - Some(Some(CoreOccupied::Parachain)) => { - if blocks_since_last_rotation >= config.chain_availability_period { - false // no pruning except recently after rotation. - } else { - now.saturating_sub(pending_since) >= config.chain_availability_period - } - }, - Some(Some(CoreOccupied::Parathread(_))) => { - if blocks_since_last_rotation >= config.thread_availability_period { - false // no pruning except recently after rotation. - } else { - now.saturating_sub(pending_since) >= config.thread_availability_period - } - }, + None => true, // out-of-bounds, doesn't really matter what is returned. + Some(CoreOccupied::Free) => true, // core free, still doesn't matter. + Some(CoreOccupied::Paras(_)) => + now.saturating_sub(pending_since) >= availability_period, } - })) + }) } } @@ -692,83 +425,254 @@ impl Pallet { /// Return the next thing that will be scheduled on this core assuming it is currently /// occupied and the candidate occupying it became available. - /// - /// For parachains, this is always the ID of the parachain and no specified collator. - /// For parathreads, this is based on the next item in the `ParathreadQueue` assigned to that - /// core, and is None if there isn't one. pub(crate) fn next_up_on_available(core: CoreIndex) -> Option { - let parachains = >::parachains(); - if (core.0 as usize) < parachains.len() { - Some(ScheduledCore { para_id: parachains[core.0 as usize], collator: None }) - } else { - let queue = ParathreadQueue::::get(); - let core_offset = (core.0 as usize - parachains.len()) as u32; - queue.get_next_on_core(core_offset).map(|entry| ScheduledCore { - para_id: entry.claim.0, - collator: Some(entry.claim.1.clone()), - }) - } + ClaimQueue::::get().get(&core).and_then(|a| { + a.iter() + .find_map(|e| e.as_ref()) + .map(|pe| Self::paras_entry_to_scheduled_core(pe)) + }) + } + + fn paras_entry_to_scheduled_core(pe: &ParasEntry>) -> ScheduledCore { + ScheduledCore { para_id: pe.para_id(), collator: None } } /// Return the next thing that will be scheduled on this core assuming it is currently - /// occupied and the candidate occupying it became available. - /// - /// For parachains, this is always the ID of the parachain and no specified collator. - /// For parathreads, this is based on the next item in the `ParathreadQueue` assigned to that - /// core, or if there isn't one, the claim that is currently occupying the core, as long - /// as the claim's retries would not exceed the limit. Otherwise None. + /// occupied and the candidate occupying it times out. pub(crate) fn next_up_on_time_out(core: CoreIndex) -> Option { - let parachains = >::parachains(); - if (core.0 as usize) < parachains.len() { - Some(ScheduledCore { para_id: parachains[core.0 as usize], collator: None }) - } else { - let queue = ParathreadQueue::::get(); - - // This is the next scheduled para on this core. - let core_offset = (core.0 as usize - parachains.len()) as u32; - queue - .get_next_on_core(core_offset) - .map(|entry| ScheduledCore { - para_id: entry.claim.0, - collator: Some(entry.claim.1.clone()), - }) - .or_else(|| { - // Or, if none, the claim currently occupying the core, - // as it would be put back on the queue after timing out. - let cores = AvailabilityCores::::get(); - cores.get(core.0 as usize).and_then(|c| c.as_ref()).and_then(|o| { - match o { - CoreOccupied::Parathread(entry) => Some(ScheduledCore { - para_id: entry.claim.0, - collator: Some(entry.claim.1.clone()), - }), - CoreOccupied::Parachain => None, // defensive; not possible. - } - }) - }) + Self::next_up_on_available(core).or_else(|| { + // Or, if none, the claim currently occupying the core, + // as it would be put back on the queue after timing out if number of retries is not at + // the maximum. + let cores = AvailabilityCores::::get(); + cores.get(core.0 as usize).and_then(|c| match c { + CoreOccupied::Free => None, + CoreOccupied::Paras(pe) => { + let AssignmentProviderConfig { max_availability_timeouts, .. } = + T::AssignmentProvider::get_provider_config(core); + + if pe.availability_timeouts < max_availability_timeouts { + Some(Self::paras_entry_to_scheduled_core(pe)) + } else { + None + } + }, + }) + }) + } + + /// Pushes occupied cores to the assignment provider. + fn push_occupied_cores_to_assignment_provider() { + AvailabilityCores::::mutate(|cores| { + for (core_idx, core) in cores.iter_mut().enumerate() { + match core { + CoreOccupied::Free => continue, + CoreOccupied::Paras(entry) => { + let core_idx = CoreIndex::from(core_idx as u32); + Self::maybe_push_assignment(core_idx, entry.clone()); + }, + } + *core = CoreOccupied::Free; + } + }); + } + + // on new session + fn push_claimqueue_items_to_assignment_provider() { + for (core_idx, core_claimqueue) in ClaimQueue::::take() { + // Push back in reverse order so that when we pop from the provider again, + // the entries in the claimqueue are in the same order as they are right now. + for para_entry in core_claimqueue.into_iter().flatten().rev() { + Self::maybe_push_assignment(core_idx, para_entry); + } } } - // Free all scheduled cores and return parathread claims to queue, with retries incremented. - pub(crate) fn clear() { - let config = >::config(); - ParathreadQueue::::mutate(|queue| { - for core_assignment in Scheduled::::take() { - if let AssignmentKind::Parathread(collator, retries) = core_assignment.kind { - if !>::is_parathread(core_assignment.para_id) { + /// Push assignments back to the provider on session change unless the paras + /// timed out on availability before. + fn maybe_push_assignment(core_idx: CoreIndex, pe: ParasEntry>) { + if pe.availability_timeouts == 0 { + T::AssignmentProvider::push_assignment_for_core(core_idx, pe.assignment); + } + } + + // + // ClaimQueue related functions + // + fn claimqueue_lookahead() -> u32 { + >::config().scheduling_lookahead + } + + /// Updates the claimqueue by moving it to the next paras and filling empty spots with new + /// paras. + pub(crate) fn update_claimqueue( + just_freed_cores: impl IntoIterator, + now: BlockNumberFor, + ) -> Vec>> { + Self::move_claimqueue_forward(); + Self::free_cores_and_fill_claimqueue(just_freed_cores, now) + } + + /// Moves all elements in the claimqueue forward. + fn move_claimqueue_forward() { + let mut cq = ClaimQueue::::get(); + for (_, core_queue) in cq.iter_mut() { + // First pop the finished claims from the front. + match core_queue.front() { + None => {}, + Some(None) => { + core_queue.pop_front(); + }, + Some(_) => {}, + } + } + + ClaimQueue::::set(cq); + } + + /// Frees cores and fills the free claimqueue spots by popping from the `AssignmentProvider`. + fn free_cores_and_fill_claimqueue( + just_freed_cores: impl IntoIterator, + now: BlockNumberFor, + ) -> Vec>> { + let (mut concluded_paras, mut timedout_paras) = Self::free_cores(just_freed_cores); + + // This can only happen on new sessions at which we move all assignments back to the + // provider. Hence, there's nothing we need to do here. + if ValidatorGroups::::get().is_empty() { + vec![] + } else { + let n_lookahead = Self::claimqueue_lookahead(); + let n_session_cores = T::AssignmentProvider::session_core_count(); + let cq = ClaimQueue::::get(); + let ttl = >::config().on_demand_ttl; + + for core_idx in 0..n_session_cores { + let core_idx = CoreIndex::from(core_idx); + + // add previously timedout paras back into the queue + if let Some(mut entry) = timedout_paras.remove(&core_idx) { + let AssignmentProviderConfig { max_availability_timeouts, .. } = + T::AssignmentProvider::get_provider_config(core_idx); + if entry.availability_timeouts < max_availability_timeouts { + // Increment the timeout counter. + entry.availability_timeouts += 1; + // Reset the ttl so that a timed out assignment. + entry.ttl = now + ttl; + Self::add_to_claimqueue(core_idx, entry); + // The claim has been added back into the claimqueue. + // Do not pop another assignment for the core. continue + } else { + // Consider timed out assignments for on demand parachains as concluded for + // the assignment provider + let ret = concluded_paras.insert(core_idx, entry.para_id()); + debug_assert!(ret.is_none()); } + } - let entry = ParathreadEntry { - claim: ParathreadClaim(core_assignment.para_id, collator), - retries: retries + 1, - }; - - if entry.retries <= config.parathread_retries { - queue.enqueue_entry(entry, config.parathread_cores); + // We consider occupied cores to be part of the claimqueue + let n_lookahead_used = cq.get(&core_idx).map_or(0, |v| v.len() as u32) + + if Self::is_core_occupied(core_idx) { 1 } else { 0 }; + for _ in n_lookahead_used..n_lookahead { + let concluded_para = concluded_paras.remove(&core_idx); + if let Some(assignment) = + T::AssignmentProvider::pop_assignment_for_core(core_idx, concluded_para) + { + Self::add_to_claimqueue(core_idx, ParasEntry::new(assignment, now + ttl)); } } } + + debug_assert!(timedout_paras.is_empty()); + debug_assert!(concluded_paras.is_empty()); + + Self::scheduled_claimqueue(now) + } + } + + fn is_core_occupied(core_idx: CoreIndex) -> bool { + match AvailabilityCores::::get().get(core_idx.0 as usize) { + None | Some(CoreOccupied::Free) => false, + Some(CoreOccupied::Paras(_)) => true, + } + } + + fn add_to_claimqueue(core_idx: CoreIndex, pe: ParasEntry>) { + ClaimQueue::::mutate(|la| { + let la_deque = la.entry(core_idx).or_insert_with(|| VecDeque::new()); + la_deque.push_back(Some(pe)); }); } + + /// Returns `ParasEntry` with `para_id` at `core_idx` if found. + fn remove_from_claimqueue( + core_idx: CoreIndex, + para_id: ParaId, + ) -> Result<(PositionInClaimqueue, ParasEntry>), &'static str> { + ClaimQueue::::mutate(|cq| { + let core_claims = cq.get_mut(&core_idx).ok_or("core_idx not found in lookahead")?; + + let pos = core_claims + .iter() + .position(|a| a.as_ref().map_or(false, |pe| pe.para_id() == para_id)) + .ok_or("para id not found at core_idx lookahead")?; + + let pe = core_claims + .remove(pos) + .ok_or("remove returned None")? + .ok_or("Element in Claimqueue was None.")?; + + // Since the core is now occupied, the next entry in the claimqueue in order to achieve + // 12 second block times needs to be None + if core_claims.front() != Some(&None) { + core_claims.push_front(None); + } + Ok((pos as u32, pe)) + }) + } + + // TODO: Temporary to imitate the old schedule() call. Will be adjusted when we make the + // scheduler AB ready + pub(crate) fn scheduled_claimqueue( + now: BlockNumberFor, + ) -> Vec>> { + let claimqueue = ClaimQueue::::get(); + + claimqueue + .into_iter() + .flat_map(|(core_idx, v)| { + v.front() + .cloned() + .flatten() + .and_then(|pe| Self::paras_entry_to_core_assignment(now, core_idx, pe)) + }) + .collect() + } + + fn paras_entry_to_core_assignment( + now: BlockNumberFor, + core_idx: CoreIndex, + pe: ParasEntry>, + ) -> Option>> { + let group_idx = Self::group_assigned_to_core(core_idx, now)?; + Some(CoreAssignment { core: core_idx, group_idx, paras_entry: pe }) + } + + #[cfg(any(feature = "runtime-benchmarks", test))] + pub(crate) fn assignment_provider_config( + core_idx: CoreIndex, + ) -> AssignmentProviderConfig> { + T::AssignmentProvider::get_provider_config(core_idx) + } + + #[cfg(any(feature = "try-runtime", test))] + fn claimqueue_len() -> usize { + ClaimQueue::::get().iter().map(|la_vec| la_vec.1.len()).sum() + } + + #[cfg(all(not(feature = "runtime-benchmarks"), test))] + pub(crate) fn claimqueue_is_empty() -> bool { + Self::claimqueue_len() == 0 + } } diff --git a/runtime/parachains/src/scheduler/common.rs b/runtime/parachains/src/scheduler/common.rs new file mode 100644 index 000000000000..c0404a875f33 --- /dev/null +++ b/runtime/parachains/src/scheduler/common.rs @@ -0,0 +1,98 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Common traits and types used by the scheduler and assignment providers. + +use frame_support::pallet_prelude::*; +use primitives::{ + v5::{Assignment, ParasEntry}, + CoreIndex, GroupIndex, Id as ParaId, +}; +use scale_info::TypeInfo; +use sp_std::prelude::*; + +// Only used to link to configuration documentation. +#[allow(unused)] +use crate::configuration::HostConfiguration; + +/// Reasons a core might be freed +#[derive(Clone, Copy)] +pub enum FreedReason { + /// The core's work concluded and the parablock assigned to it is considered available. + Concluded, + /// The core's work timed out. + TimedOut, +} + +/// A set of variables required by the scheduler in order to operate. +pub struct AssignmentProviderConfig { + /// The availability period specified by the implementation. + /// See [`HostConfiguration::paras_availability_period`] for more information. + pub availability_period: BlockNumber, + + /// How many times a collation can time out on availability. + /// Zero timeouts still means that a collation can be provided as per the slot auction + /// assignment provider. + pub max_availability_timeouts: u32, + + /// How long the collator has to provide a collation to the backing group before being dropped. + pub ttl: BlockNumber, +} + +pub trait AssignmentProvider { + /// How many cores are allocated to this provider. + fn session_core_count() -> u32; + + /// Pops an [`Assignment`] from the provider for a specified [`CoreIndex`]. + /// The `concluded_para` field makes the caller report back to the provider + /// which [`ParaId`] it processed last on the supplied [`CoreIndex`]. + fn pop_assignment_for_core( + core_idx: CoreIndex, + concluded_para: Option, + ) -> Option; + + /// Push back an already popped assignment. Intended for provider implementations + /// that need to be able to keep track of assignments over session boundaries, + /// such as the on demand assignment provider. + fn push_assignment_for_core(core_idx: CoreIndex, assignment: Assignment); + + /// Returns a set of variables needed by the scheduler + fn get_provider_config(core_idx: CoreIndex) -> AssignmentProviderConfig; +} + +/// How a core is mapped to a backing group and a `ParaId` +#[derive(Clone, Encode, Decode, PartialEq, TypeInfo)] +#[cfg_attr(feature = "std", derive(Debug))] +pub struct CoreAssignment { + /// The core that is assigned. + pub core: CoreIndex, + /// The para id and accompanying information needed to collate and back a parablock. + pub paras_entry: ParasEntry, + /// The index of the validator group assigned to the core. + pub group_idx: GroupIndex, +} + +impl CoreAssignment { + /// Returns the [`ParaId`] of the assignment. + pub fn para_id(&self) -> ParaId { + self.paras_entry.para_id() + } + + /// Returns the inner [`ParasEntry`] of the assignment. + pub fn to_paras_entry(self) -> ParasEntry { + self.paras_entry + } +} diff --git a/runtime/parachains/src/scheduler/migration.rs b/runtime/parachains/src/scheduler/migration.rs new file mode 100644 index 000000000000..4284b979264b --- /dev/null +++ b/runtime/parachains/src/scheduler/migration.rs @@ -0,0 +1,170 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A module that is responsible for migration of storage. + +use super::*; +use frame_support::{ + pallet_prelude::ValueQuery, storage_alias, traits::OnRuntimeUpgrade, weights::Weight, +}; +use primitives::vstaging::Assignment; + +mod v0 { + use super::*; + + use primitives::CollatorId; + #[storage_alias] + pub(super) type Scheduled = StorageValue, Vec, ValueQuery>; + + #[derive(Encode, Decode)] + pub struct QueuedParathread { + claim: primitives::ParathreadEntry, + core_offset: u32, + } + + #[derive(Encode, Decode, Default)] + pub struct ParathreadClaimQueue { + queue: Vec, + next_core_offset: u32, + } + + // Only here to facilitate the migration. + impl ParathreadClaimQueue { + pub fn len(self) -> usize { + self.queue.len() + } + } + + #[storage_alias] + pub(super) type ParathreadQueue = + StorageValue, ParathreadClaimQueue, ValueQuery>; + + #[storage_alias] + pub(super) type ParathreadClaimIndex = + StorageValue, Vec, ValueQuery>; + + /// The assignment type. + #[derive(Clone, Encode, Decode, TypeInfo, RuntimeDebug)] + #[cfg_attr(feature = "std", derive(PartialEq))] + pub enum AssignmentKind { + /// A parachain. + Parachain, + /// A parathread. + Parathread(CollatorId, u32), + } + + /// How a free core is scheduled to be assigned. + #[derive(Clone, Encode, Decode, TypeInfo, RuntimeDebug)] + #[cfg_attr(feature = "std", derive(PartialEq))] + pub struct CoreAssignment { + /// The core that is assigned. + pub core: CoreIndex, + /// The unique ID of the para that is assigned to the core. + pub para_id: ParaId, + /// The kind of the assignment. + pub kind: AssignmentKind, + /// The index of the validator group assigned to the core. + pub group_idx: GroupIndex, + } +} + +pub mod v1 { + use super::*; + use crate::scheduler; + use frame_support::traits::StorageVersion; + + pub struct MigrateToV1(sp_std::marker::PhantomData); + impl OnRuntimeUpgrade for MigrateToV1 { + fn on_runtime_upgrade() -> Weight { + if StorageVersion::get::>() == 0 { + let weight_consumed = migrate_to_v1::(); + + log::info!(target: scheduler::LOG_TARGET, "Migrating para scheduler storage to v1"); + StorageVersion::new(1).put::>(); + + weight_consumed + } else { + log::warn!(target: scheduler::LOG_TARGET, "Para scheduler v1 migration should be removed."); + T::DbWeight::get().reads(1) + } + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::DispatchError> { + log::trace!( + target: crate::scheduler::LOG_TARGET, + "Scheduled before migration: {}", + v0::Scheduled::::get().len() + ); + ensure!( + StorageVersion::get::>() == 0, + "Storage version should be less than `1` before the migration", + ); + + let bytes = u32::to_be_bytes(v0::Scheduled::::get().len() as u32); + + Ok(bytes.to_vec()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::DispatchError> { + log::trace!(target: crate::scheduler::LOG_TARGET, "Running post_upgrade()"); + ensure!( + StorageVersion::get::>() == 1, + "Storage version should be `1` after the migration" + ); + ensure!( + v0::Scheduled::::get().len() == 0, + "Scheduled should be empty after the migration" + ); + + let sched_len = u32::from_be_bytes(state.try_into().unwrap()); + ensure!( + Pallet::::claimqueue_len() as u32 == sched_len, + "Scheduled completely moved to ClaimQueue after migration" + ); + + Ok(()) + } + } +} + +pub fn migrate_to_v1() -> Weight { + let mut weight: Weight = Weight::zero(); + + let pq = v0::ParathreadQueue::::take(); + let pq_len = pq.len() as u64; + + let pci = v0::ParathreadClaimIndex::::take(); + let pci_len = pci.len() as u64; + + let now = >::block_number(); + let scheduled = v0::Scheduled::::take(); + let sched_len = scheduled.len() as u64; + for core_assignment in scheduled { + let core_idx = core_assignment.core; + let assignment = Assignment::new(core_assignment.para_id); + let pe = ParasEntry::new(assignment, now); + Pallet::::add_to_claimqueue(core_idx, pe); + } + + // 2x as once for Scheduled and once for Claimqueue + weight = weight.saturating_add(T::DbWeight::get().reads_writes(2 * sched_len, 2 * sched_len)); + weight = weight.saturating_add(T::DbWeight::get().reads_writes(pq_len, pq_len)); + weight = weight.saturating_add(T::DbWeight::get().reads_writes(pci_len, pci_len)); + + weight +} diff --git a/runtime/parachains/src/scheduler/tests.rs b/runtime/parachains/src/scheduler/tests.rs index cc2aee357231..0f64432c5f3a 100644 --- a/runtime/parachains/src/scheduler/tests.rs +++ b/runtime/parachains/src/scheduler/tests.rs @@ -18,13 +18,15 @@ use super::*; use frame_support::assert_ok; use keyring::Sr25519Keyring; -use primitives::{BlockNumber, CollatorId, SessionIndex, ValidationCode, ValidatorId}; +use primitives::{v5::Assignment, BlockNumber, SessionIndex, ValidationCode, ValidatorId}; +use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; use crate::{ + assigner_on_demand::QueuePushDirection, configuration::HostConfiguration, initializer::SessionChangeNotification, mock::{ - new_test_ext, Configuration, MockGenesisConfig, Paras, ParasShared, RuntimeOrigin, + new_test_ext, MockGenesisConfig, OnDemandAssigner, Paras, ParasShared, RuntimeOrigin, Scheduler, System, Test, }, paras::{ParaGenesisArgs, ParaKind}, @@ -61,6 +63,8 @@ fn run_to_block( if notification_with_session_index.session_index == SessionIndex::default() { notification_with_session_index.session_index = ParasShared::scheduled_session(); } + Scheduler::pre_new_session(); + Paras::initializer_on_new_session(¬ification_with_session_index); Scheduler::initializer_on_new_session(¬ification_with_session_index); } @@ -74,8 +78,7 @@ fn run_to_block( Scheduler::initializer_initialize(b + 1); // In the real runtime this is expected to be called by the `InclusionInherent` pallet. - Scheduler::clear(); - Scheduler::schedule(Vec::new(), b + 1); + Scheduler::update_claimqueue(BTreeMap::new(), b + 1); } } @@ -89,6 +92,8 @@ fn run_to_end_of_block( Paras::initializer_finalize(to); if let Some(notification) = new_session(to + 1) { + Scheduler::pre_new_session(); + Paras::initializer_on_new_session(¬ification); Scheduler::initializer_on_new_session(¬ification); } @@ -98,12 +103,11 @@ fn run_to_end_of_block( fn default_config() -> HostConfiguration { HostConfiguration { - parathread_cores: 3, + on_demand_cores: 3, group_rotation_frequency: 10, - chain_availability_period: 3, - thread_availability_period: 5, + paras_availability_period: 3, scheduling_lookahead: 2, - parathread_retries: 1, + on_demand_retries: 1, // This field does not affect anything that scheduler does. However, `HostConfiguration` // is still a subject to consistency test. It requires that // `minimum_validation_upgrade_delay` is greater than `chain_availability_period` and @@ -113,218 +117,181 @@ fn default_config() -> HostConfiguration { } } -#[test] -fn add_parathread_claim_works() { - let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { config: default_config() }, +fn genesis_config(config: &HostConfiguration) -> MockGenesisConfig { + MockGenesisConfig { + configuration: crate::configuration::GenesisConfig { config: config.clone() }, ..Default::default() - }; - - let thread_id = ParaId::from(10); - let collator = CollatorId::from(Sr25519Keyring::Alice.public()); - - new_test_ext(genesis_config).execute_with(|| { - schedule_blank_para(thread_id, ParaKind::Parathread); - - assert!(!Paras::is_parathread(thread_id)); + } +} - run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); +pub(crate) fn claimqueue_contains_only_none() -> bool { + let mut cq = Scheduler::claimqueue(); + for (_, v) in cq.iter_mut() { + v.retain(|e| e.is_some()); + } - assert!(Paras::is_parathread(thread_id)); + cq.values().map(|v| v.len()).sum::() == 0 +} - { - Scheduler::add_parathread_claim(ParathreadClaim(thread_id, collator.clone())); - let queue = ParathreadQueue::::get(); - assert_eq!(queue.next_core_offset, 1); - assert_eq!(queue.queue.len(), 1); - assert_eq!( - queue.queue[0], - QueuedParathread { - claim: ParathreadEntry { - claim: ParathreadClaim(thread_id, collator.clone()), - retries: 0, - }, - core_offset: 0, - } - ); - } +pub(crate) fn claimqueue_contains_para_ids(pids: Vec) -> bool { + let set: BTreeSet = ClaimQueue::::get() + .into_iter() + .flat_map(|(_, assignments)| { + assignments + .into_iter() + .filter_map(|assignment| assignment.and_then(|pe| Some(pe.para_id()))) + }) + .collect(); + + pids.into_iter().all(|pid| set.contains(&pid)) +} - // due to the index, completing claims are not allowed. - { - let collator2 = CollatorId::from(Sr25519Keyring::Bob.public()); - Scheduler::add_parathread_claim(ParathreadClaim(thread_id, collator2.clone())); - let queue = ParathreadQueue::::get(); - assert_eq!(queue.next_core_offset, 1); - assert_eq!(queue.queue.len(), 1); - assert_eq!( - queue.queue[0], - QueuedParathread { - claim: ParathreadEntry { - claim: ParathreadClaim(thread_id, collator.clone()), - retries: 0, - }, - core_offset: 0, - } - ); - } +pub(crate) fn availability_cores_contains_para_ids(pids: Vec) -> bool { + let set: BTreeSet = AvailabilityCores::::get() + .into_iter() + .filter_map(|core| match core { + CoreOccupied::Free => None, + CoreOccupied::Paras(entry) => Some(entry.para_id()), + }) + .collect(); - // claims on non-live parathreads have no effect. - { - let thread_id2 = ParaId::from(11); - Scheduler::add_parathread_claim(ParathreadClaim(thread_id2, collator.clone())); - let queue = ParathreadQueue::::get(); - assert_eq!(queue.next_core_offset, 1); - assert_eq!(queue.queue.len(), 1); - assert_eq!( - queue.queue[0], - QueuedParathread { - claim: ParathreadEntry { - claim: ParathreadClaim(thread_id, collator.clone()), - retries: 0, - }, - core_offset: 0, - } - ); - } - }) + pids.into_iter().all(|pid| set.contains(&pid)) } #[test] -fn cannot_add_claim_when_no_parathread_cores() { - let config = { - let mut config = default_config(); - config.parathread_cores = 0; - config - }; - let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { config }, - ..Default::default() - }; +fn claimqueue_ttl_drop_fn_works() { + let mut config = default_config(); + config.scheduling_lookahead = 3; + let genesis_config = genesis_config(&config); - let thread_id = ParaId::from(10); - let collator = CollatorId::from(Sr25519Keyring::Alice.public()); + let para_id = ParaId::from(100); + let core_idx = CoreIndex::from(0); + let mut now = 10; new_test_ext(genesis_config).execute_with(|| { - schedule_blank_para(thread_id, ParaKind::Parathread); - - assert!(!Paras::is_parathread(thread_id)); - + assert!(default_config().on_demand_ttl == 5); + // Register and run to a blockheight where the para is in a valid state. + schedule_blank_para(para_id, ParaKind::Parathread); run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); - assert!(Paras::is_parathread(thread_id)); - - Scheduler::add_parathread_claim(ParathreadClaim(thread_id, collator.clone())); - assert_eq!(ParathreadQueue::::get(), Default::default()); + // Add a claim on core 0 with a ttl in the past. + let paras_entry = ParasEntry::new(Assignment::new(para_id), now - 5); + Scheduler::add_to_claimqueue(core_idx, paras_entry.clone()); + + // Claim is in queue prior to call. + assert!(claimqueue_contains_para_ids::(vec![para_id])); + + // Claim is dropped post call. + Scheduler::drop_expired_claims_from_claimqueue(); + assert!(!claimqueue_contains_para_ids::(vec![para_id])); + + // Add a claim on core 0 with a ttl in the future (15). + let paras_entry = ParasEntry::new(Assignment::new(para_id), now + 5); + Scheduler::add_to_claimqueue(core_idx, paras_entry.clone()); + + // Claim is in queue post call. + Scheduler::drop_expired_claims_from_claimqueue(); + assert!(claimqueue_contains_para_ids::(vec![para_id])); + + now = now + 6; + run_to_block(now, |_| None); + + // Claim is dropped + Scheduler::drop_expired_claims_from_claimqueue(); + assert!(!claimqueue_contains_para_ids::(vec![para_id])); + + // Add a claim on core 0 with a ttl == now (16) + let paras_entry = ParasEntry::new(Assignment::new(para_id), now); + Scheduler::add_to_claimqueue(core_idx, paras_entry.clone()); + + // Claim is in queue post call. + Scheduler::drop_expired_claims_from_claimqueue(); + assert!(claimqueue_contains_para_ids::(vec![para_id])); + + now = now + 1; + run_to_block(now, |_| None); + + // Drop expired claim. + Scheduler::drop_expired_claims_from_claimqueue(); + + // Add a claim on core 0 with a ttl == now (17) + let paras_entry_non_expired = ParasEntry::new(Assignment::new(para_id), now); + let paras_entry_expired = ParasEntry::new(Assignment::new(para_id), now - 2); + // ttls = [17, 15, 17] + Scheduler::add_to_claimqueue(core_idx, paras_entry_non_expired.clone()); + Scheduler::add_to_claimqueue(core_idx, paras_entry_expired.clone()); + Scheduler::add_to_claimqueue(core_idx, paras_entry_non_expired.clone()); + let cq = Scheduler::claimqueue(); + assert!(cq.get(&core_idx).unwrap().len() == 3); + + // Add claims to on demand assignment provider. + let assignment = Assignment::new(para_id); + + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment.clone(), + QueuePushDirection::Back + )); + + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment, + QueuePushDirection::Back + )); + + // Drop expired claim. + Scheduler::drop_expired_claims_from_claimqueue(); + + let cq = Scheduler::claimqueue(); + let cqc = cq.get(&core_idx).unwrap(); + // Same number of claims + assert!(cqc.len() == 3); + + // The first 2 claims in the queue should have a ttl of 17, + // being the ones set up prior in this test as claims 1 and 3. + // The third claim is popped from the assignment provider and + // has a new ttl set by the scheduler of now + config.on_demand_ttl. + // ttls = [17, 17, 22] + assert!(cqc.iter().enumerate().all(|(index, entry)| { + match index { + 0 | 1 => return entry.clone().unwrap().ttl == 17, + 2 => return entry.clone().unwrap().ttl == 22, + _ => return false, + } + })) }); } +// Pretty useless here. Should be on parathread assigner... if at all #[test] -fn session_change_prunes_cores_beyond_retries_and_those_from_non_live_parathreads() { - let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { config: default_config() }, - ..Default::default() - }; - let max_parathread_retries = default_config().parathread_retries; - - let thread_a = ParaId::from(1_u32); - let thread_b = ParaId::from(2_u32); - let thread_c = ParaId::from(3_u32); - let thread_d = ParaId::from(4_u32); +fn add_parathread_claim_works() { + let genesis_config = genesis_config(&default_config()); - let collator = CollatorId::from(Sr25519Keyring::Alice.public()); + let thread_id = ParaId::from(10); + let core_index = CoreIndex::from(0); + let entry_ttl = 10_000; new_test_ext(genesis_config).execute_with(|| { - assert_eq!(Configuration::config(), default_config()); - - // threads a, b, and c will be live in next session, but not d. - { - schedule_blank_para(thread_a, ParaKind::Parathread); - schedule_blank_para(thread_b, ParaKind::Parathread); - schedule_blank_para(thread_c, ParaKind::Parathread); - } - - // set up a queue as if `n_cores` was 4 and with some with many retries. - ParathreadQueue::::put({ - let mut queue = ParathreadClaimQueue::default(); - - // Will be pruned: too many retries. - queue.enqueue_entry( - ParathreadEntry { - claim: ParathreadClaim(thread_a, collator.clone()), - retries: max_parathread_retries + 1, - }, - 4, - ); - - // Will not be pruned. - queue.enqueue_entry( - ParathreadEntry { - claim: ParathreadClaim(thread_b, collator.clone()), - retries: max_parathread_retries, - }, - 4, - ); + schedule_blank_para(thread_id, ParaKind::Parathread); - // Will not be pruned. - queue.enqueue_entry( - ParathreadEntry { claim: ParathreadClaim(thread_c, collator.clone()), retries: 0 }, - 4, - ); + assert!(!Paras::is_parathread(thread_id)); - // Will be pruned: not a live parathread. - queue.enqueue_entry( - ParathreadEntry { claim: ParathreadClaim(thread_d, collator.clone()), retries: 0 }, - 4, - ); + run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); - queue - }); + assert!(Paras::is_parathread(thread_id)); - ParathreadClaimIndex::::put(vec![thread_a, thread_b, thread_c, thread_d]); + let pe = ParasEntry::new(Assignment::new(thread_id), entry_ttl); + Scheduler::add_to_claimqueue(core_index, pe.clone()); - run_to_block(10, |b| match b { - 10 => Some(SessionChangeNotification { - new_config: Configuration::config(), - ..Default::default() - }), - _ => None, - }); - assert_eq!(Configuration::config(), default_config()); - - let queue = ParathreadQueue::::get(); - assert_eq!( - queue.queue, - vec![ - QueuedParathread { - claim: ParathreadEntry { - claim: ParathreadClaim(thread_b, collator.clone()), - retries: max_parathread_retries, - }, - core_offset: 0, - }, - QueuedParathread { - claim: ParathreadEntry { - claim: ParathreadClaim(thread_c, collator.clone()), - retries: 0, - }, - core_offset: 1, - }, - ] - ); - assert_eq!(queue.next_core_offset, 2); - - assert_eq!(ParathreadClaimIndex::::get(), vec![thread_b, thread_c]); + let cq = Scheduler::claimqueue(); + assert_eq!(Scheduler::claimqueue_len(), 1); + assert_eq!(*(cq.get(&core_index).unwrap().front().unwrap()), Some(pe)); }) } #[test] fn session_change_shuffles_validators() { - let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { config: default_config() }, - ..Default::default() - }; + let genesis_config = genesis_config(&default_config()); - assert_eq!(default_config().parathread_cores, 3); + assert_eq!(default_config().on_demand_cores, 3); new_test_ext(genesis_config).execute_with(|| { let chain_a = ParaId::from(1_u32); let chain_b = ParaId::from(2_u32); @@ -369,15 +336,12 @@ fn session_change_shuffles_validators() { fn session_change_takes_only_max_per_core() { let config = { let mut config = default_config(); - config.parathread_cores = 0; + config.on_demand_cores = 0; config.max_validators_per_core = Some(1); config }; - let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { config: config.clone() }, - ..Default::default() - }; + let genesis_config = genesis_config(&config); new_test_ext(genesis_config).execute_with(|| { let chain_a = ParaId::from(1_u32); @@ -418,12 +382,10 @@ fn session_change_takes_only_max_per_core() { } #[test] -fn schedule_schedules() { - let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { config: default_config() }, - ..Default::default() - }; +fn fill_claimqueue_fills() { + let genesis_config = genesis_config(&default_config()); + let lookahead = genesis_config.configuration.config.scheduling_lookahead as usize; let chain_a = ParaId::from(1_u32); let chain_b = ParaId::from(2_u32); @@ -431,10 +393,12 @@ fn schedule_schedules() { let thread_b = ParaId::from(4_u32); let thread_c = ParaId::from(5_u32); - let collator = CollatorId::from(Sr25519Keyring::Alice.public()); + let assignment_a = Assignment { para_id: thread_a }; + let assignment_b = Assignment { para_id: thread_b }; + let assignment_c = Assignment { para_id: thread_c }; new_test_ext(genesis_config).execute_with(|| { - assert_eq!(default_config().parathread_cores, 3); + assert_eq!(default_config().on_demand_cores, 3); // register 2 parachains schedule_blank_para(chain_a, ParaKind::Parachain); @@ -462,15 +426,21 @@ fn schedule_schedules() { }); { - let scheduled = Scheduler::scheduled(); - assert_eq!(scheduled.len(), 2); + assert_eq!(Scheduler::claimqueue_len(), 2 * lookahead); + let scheduled = Scheduler::scheduled_claimqueue(1); + + // Cannot assert on indices anymore as they depend on the assignment providers + assert!(claimqueue_contains_para_ids::(vec![chain_a, chain_b])); assert_eq!( scheduled[0], CoreAssignment { core: CoreIndex(0), - para_id: chain_a, - kind: AssignmentKind::Parachain, + paras_entry: ParasEntry { + assignment: Assignment { para_id: chain_a }, + availability_timeouts: 0, + ttl: 6 + }, group_idx: GroupIndex(0), } ); @@ -479,59 +449,98 @@ fn schedule_schedules() { scheduled[1], CoreAssignment { core: CoreIndex(1), - para_id: chain_b, - kind: AssignmentKind::Parachain, + paras_entry: ParasEntry { + assignment: Assignment { para_id: chain_b }, + availability_timeouts: 0, + ttl: 6 + }, group_idx: GroupIndex(1), } ); } - // add a couple of parathread claims. - Scheduler::add_parathread_claim(ParathreadClaim(thread_a, collator.clone())); - Scheduler::add_parathread_claim(ParathreadClaim(thread_c, collator.clone())); + // add a couple of parathread assignments. + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_a, + QueuePushDirection::Back + )); + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_b, + QueuePushDirection::Back + )); + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_c, + QueuePushDirection::Back + )); run_to_block(2, |_| None); + // cores 0 and 1 should be occupied. mark them as such. + Scheduler::occupied( + vec![(CoreIndex(0), chain_a), (CoreIndex(1), chain_b)].into_iter().collect(), + ); + + run_to_block(3, |_| None); { - let scheduled = Scheduler::scheduled(); - assert_eq!(scheduled.len(), 4); + assert_eq!(Scheduler::claimqueue_len(), 5); + let scheduled = Scheduler::scheduled_claimqueue(3); assert_eq!( scheduled[0], CoreAssignment { core: CoreIndex(0), - para_id: chain_a, - kind: AssignmentKind::Parachain, + paras_entry: ParasEntry { + assignment: Assignment { para_id: chain_a }, + availability_timeouts: 0, + ttl: 6 + }, group_idx: GroupIndex(0), } ); - assert_eq!( scheduled[1], CoreAssignment { core: CoreIndex(1), - para_id: chain_b, - kind: AssignmentKind::Parachain, + paras_entry: ParasEntry { + assignment: Assignment { para_id: chain_b }, + availability_timeouts: 0, + ttl: 6 + }, group_idx: GroupIndex(1), } ); + // Was added a block later, note the TTL. assert_eq!( scheduled[2], CoreAssignment { core: CoreIndex(2), - para_id: thread_a, - kind: AssignmentKind::Parathread(collator.clone(), 0), + paras_entry: ParasEntry { + assignment: Assignment { para_id: thread_a }, + availability_timeouts: 0, + ttl: 7 + }, group_idx: GroupIndex(2), } ); - + // Sits on the same core as `thread_a` + assert_eq!( + Scheduler::claimqueue().get(&CoreIndex(2)).unwrap()[1], + Some(ParasEntry { + assignment: Assignment { para_id: thread_b }, + availability_timeouts: 0, + ttl: 7 + }) + ); assert_eq!( scheduled[3], CoreAssignment { core: CoreIndex(3), - para_id: thread_c, - kind: AssignmentKind::Parathread(collator.clone(), 0), + paras_entry: ParasEntry { + assignment: Assignment { para_id: thread_c }, + availability_timeouts: 0, + ttl: 7 + }, group_idx: GroupIndex(3), } ); @@ -541,10 +550,11 @@ fn schedule_schedules() { #[test] fn schedule_schedules_including_just_freed() { - let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { config: default_config() }, - ..Default::default() - }; + let mut config = default_config(); + // NOTE: This test expects on demand cores to each get slotted on to a different core + // and not fill up the claimqueue of each core first. + config.scheduling_lookahead = 1; + let genesis_config = genesis_config(&config); let chain_a = ParaId::from(1_u32); let chain_b = ParaId::from(2_u32); @@ -555,10 +565,14 @@ fn schedule_schedules_including_just_freed() { let thread_d = ParaId::from(6_u32); let thread_e = ParaId::from(7_u32); - let collator = CollatorId::from(Sr25519Keyring::Alice.public()); + let assignment_a = Assignment { para_id: thread_a }; + let assignment_b = Assignment { para_id: thread_b }; + let assignment_c = Assignment { para_id: thread_c }; + let assignment_d = Assignment { para_id: thread_d }; + let assignment_e = Assignment { para_id: thread_e }; new_test_ext(genesis_config).execute_with(|| { - assert_eq!(default_config().parathread_cores, 3); + assert_eq!(default_config().on_demand_cores, 3); // register 2 parachains schedule_blank_para(chain_a, ParaKind::Parachain); @@ -588,39 +602,68 @@ fn schedule_schedules_including_just_freed() { }); // add a couple of parathread claims now that the parathreads are live. - Scheduler::add_parathread_claim(ParathreadClaim(thread_a, collator.clone())); - Scheduler::add_parathread_claim(ParathreadClaim(thread_c, collator.clone())); + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_a, + QueuePushDirection::Back + )); + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_c, + QueuePushDirection::Back + )); - run_to_block(2, |_| None); + let mut now = 2; + run_to_block(now, |_| None); - assert_eq!(Scheduler::scheduled().len(), 4); + assert_eq!(Scheduler::scheduled_claimqueue(now).len(), 4); // cores 0, 1, 2, and 3 should be occupied. mark them as such. - Scheduler::occupied(&[CoreIndex(0), CoreIndex(1), CoreIndex(2), CoreIndex(3)]); + let mut occupied_map: BTreeMap = BTreeMap::new(); + occupied_map.insert(CoreIndex(0), chain_a); + occupied_map.insert(CoreIndex(1), chain_b); + occupied_map.insert(CoreIndex(2), thread_a); + occupied_map.insert(CoreIndex(3), thread_c); + Scheduler::occupied(occupied_map); { let cores = AvailabilityCores::::get(); - assert!(cores[0].is_some()); - assert!(cores[1].is_some()); - assert!(cores[2].is_some()); - assert!(cores[3].is_some()); - assert!(cores[4].is_none()); + // cores 0, 1, 2, and 3 are all `CoreOccupied::Paras(ParasEntry...)` + assert!(cores[0] != CoreOccupied::Free); + assert!(cores[1] != CoreOccupied::Free); + assert!(cores[2] != CoreOccupied::Free); + assert!(cores[3] != CoreOccupied::Free); + + // core 4 is free + assert!(cores[4] == CoreOccupied::Free); - assert!(Scheduler::scheduled().is_empty()); + assert!(Scheduler::scheduled_claimqueue(now).is_empty()); + + // All core index entries in the claimqueue should have `None` in them. + Scheduler::claimqueue().iter().for_each(|(_core_idx, core_queue)| { + assert!(core_queue.iter().all(|claim| claim.is_none())) + }) } // add a couple more parathread claims - the claim on `b` will go to the 3rd parathread core // (4) and the claim on `d` will go back to the 1st parathread core (2). The claim on `e` // then will go for core `3`. - Scheduler::add_parathread_claim(ParathreadClaim(thread_b, collator.clone())); - Scheduler::add_parathread_claim(ParathreadClaim(thread_d, collator.clone())); - Scheduler::add_parathread_claim(ParathreadClaim(thread_e, collator.clone())); - - run_to_block(3, |_| None); + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_b, + QueuePushDirection::Back + )); + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_d, + QueuePushDirection::Back + )); + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_e.clone(), + QueuePushDirection::Back + )); + now = 3; + run_to_block(now, |_| None); { - let scheduled = Scheduler::scheduled(); + let scheduled = Scheduler::scheduled_claimqueue(now); // cores 0 and 1 are occupied by parachains. cores 2 and 3 are occupied by parathread // claims. core 4 was free. @@ -629,25 +672,28 @@ fn schedule_schedules_including_just_freed() { scheduled[0], CoreAssignment { core: CoreIndex(4), - para_id: thread_b, - kind: AssignmentKind::Parathread(collator.clone(), 0), + paras_entry: ParasEntry { + assignment: Assignment { para_id: thread_b }, + availability_timeouts: 0, + ttl: 8 + }, group_idx: GroupIndex(4), } ); } // now note that cores 0, 2, and 3 were freed. - Scheduler::schedule( - vec![ - (CoreIndex(0), FreedReason::Concluded), - (CoreIndex(2), FreedReason::Concluded), - (CoreIndex(3), FreedReason::TimedOut), // should go back on queue. - ], - 3, - ); + let just_updated: BTreeMap = vec![ + (CoreIndex(0), FreedReason::Concluded), + (CoreIndex(2), FreedReason::Concluded), + (CoreIndex(3), FreedReason::TimedOut), // should go back on queue. + ] + .into_iter() + .collect(); + Scheduler::update_claimqueue(just_updated, now); { - let scheduled = Scheduler::scheduled(); + let scheduled = Scheduler::scheduled_claimqueue(now); // 1 thing scheduled before, + 3 cores freed. assert_eq!(scheduled.len(), 4); @@ -655,8 +701,11 @@ fn schedule_schedules_including_just_freed() { scheduled[0], CoreAssignment { core: CoreIndex(0), - para_id: chain_a, - kind: AssignmentKind::Parachain, + paras_entry: ParasEntry { + assignment: Assignment { para_id: chain_a }, + availability_timeouts: 0, + ttl: 8 + }, group_idx: GroupIndex(0), } ); @@ -664,17 +713,24 @@ fn schedule_schedules_including_just_freed() { scheduled[1], CoreAssignment { core: CoreIndex(2), - para_id: thread_d, - kind: AssignmentKind::Parathread(collator.clone(), 0), + paras_entry: ParasEntry { + assignment: Assignment { para_id: thread_d }, + availability_timeouts: 0, + ttl: 8 + }, group_idx: GroupIndex(2), } ); + // Although C was descheduled, the core `4` was occupied so C goes back to the queue. assert_eq!( scheduled[2], CoreAssignment { core: CoreIndex(3), - para_id: thread_e, - kind: AssignmentKind::Parathread(collator.clone(), 0), + paras_entry: ParasEntry { + assignment: Assignment { para_id: thread_c }, + availability_timeouts: 1, + ttl: 8 + }, group_idx: GroupIndex(3), } ); @@ -682,49 +738,44 @@ fn schedule_schedules_including_just_freed() { scheduled[3], CoreAssignment { core: CoreIndex(4), - para_id: thread_b, - kind: AssignmentKind::Parathread(collator.clone(), 0), + paras_entry: ParasEntry { + assignment: Assignment { para_id: thread_b }, + availability_timeouts: 0, + ttl: 8 + }, group_idx: GroupIndex(4), } ); - // the prior claim on thread A concluded, but the claim on thread C was marked as - // timed out. - let index = ParathreadClaimIndex::::get(); - let parathread_queue = ParathreadQueue::::get(); - - // thread A claim should have been wiped, but thread C claim should remain. - assert_eq!(index, vec![thread_b, thread_c, thread_d, thread_e]); - - // Although C was descheduled, the core `4` was occupied so C goes back on the queue. - assert_eq!(parathread_queue.queue.len(), 1); - assert_eq!( - parathread_queue.queue[0], - QueuedParathread { - claim: ParathreadEntry { - claim: ParathreadClaim(thread_c, collator.clone()), - retries: 0, // retries not incremented by timeout - validators' fault. - }, - core_offset: 2, // reassigned to next core. thread_e claim was on offset 1. - } - ); + // The only assignment yet to be popped on to the claim queue is `thread_e`. + // This is due to `thread_c` timing out. + let order_queue = OnDemandAssigner::get_queue(); + assert!(order_queue.len() == 1); + assert!(order_queue[0] == assignment_e); + + // Chain B's core was not marked concluded or timed out, it should be on an + // availability core + assert!(availability_cores_contains_para_ids::(vec![chain_b])); + // Thread A claim should have been wiped, but thread C claim should remain. + assert!(!claimqueue_contains_para_ids::(vec![thread_a])); + assert!(claimqueue_contains_para_ids::(vec![thread_c])); + assert!(!availability_cores_contains_para_ids::(vec![thread_a, thread_c])); } }); } #[test] fn schedule_clears_availability_cores() { - let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { config: default_config() }, - ..Default::default() - }; + let mut config = default_config(); + config.scheduling_lookahead = 1; + let genesis_config = genesis_config(&config); let chain_a = ParaId::from(1_u32); let chain_b = ParaId::from(2_u32); let chain_c = ParaId::from(3_u32); new_test_ext(genesis_config).execute_with(|| { - assert_eq!(default_config().parathread_cores, 3); + assert_eq!(default_config().on_demand_cores, 3); // register 3 parachains schedule_blank_para(chain_a, ParaKind::Parachain); @@ -749,56 +800,55 @@ fn schedule_clears_availability_cores() { run_to_block(2, |_| None); - assert_eq!(Scheduler::scheduled().len(), 3); + assert_eq!(Scheduler::claimqueue().len(), 3); // cores 0, 1, and 2 should be occupied. mark them as such. - Scheduler::occupied(&[CoreIndex(0), CoreIndex(1), CoreIndex(2)]); + Scheduler::occupied( + vec![(CoreIndex(0), chain_a), (CoreIndex(1), chain_b), (CoreIndex(2), chain_c)] + .into_iter() + .collect(), + ); { let cores = AvailabilityCores::::get(); - assert!(cores[0].is_some()); - assert!(cores[1].is_some()); - assert!(cores[2].is_some()); + assert_eq!(cores[0].is_free(), false); + assert_eq!(cores[1].is_free(), false); + assert_eq!(cores[2].is_free(), false); - assert!(Scheduler::scheduled().is_empty()); + assert!(claimqueue_contains_only_none()); } run_to_block(3, |_| None); // now note that cores 0 and 2 were freed. - Scheduler::schedule( - vec![(CoreIndex(0), FreedReason::Concluded), (CoreIndex(2), FreedReason::Concluded)], + Scheduler::free_cores_and_fill_claimqueue( + vec![(CoreIndex(0), FreedReason::Concluded), (CoreIndex(2), FreedReason::Concluded)] + .into_iter() + .collect::>(), 3, ); { - let scheduled = Scheduler::scheduled(); - - assert_eq!(scheduled.len(), 2); + let claimqueue = Scheduler::claimqueue(); + let claimqueue_0 = claimqueue.get(&CoreIndex(0)).unwrap().clone(); + let claimqueue_2 = claimqueue.get(&CoreIndex(2)).unwrap().clone(); + let entry_ttl = 8; + assert_eq!(claimqueue_0.len(), 1); + assert_eq!(claimqueue_2.len(), 1); assert_eq!( - scheduled[0], - CoreAssignment { - core: CoreIndex(0), - para_id: chain_a, - kind: AssignmentKind::Parachain, - group_idx: GroupIndex(0), - } + claimqueue_0, + vec![Some(ParasEntry::new(Assignment::new(chain_a), entry_ttl))], ); assert_eq!( - scheduled[1], - CoreAssignment { - core: CoreIndex(2), - para_id: chain_c, - kind: AssignmentKind::Parachain, - group_idx: GroupIndex(2), - } + claimqueue_2, + vec![Some(ParasEntry::new(Assignment::new(chain_c), entry_ttl))], ); - // The freed cores should be `None` in `AvailabilityCores`. + // The freed cores should be `Free` in `AvailabilityCores`. let cores = AvailabilityCores::::get(); - assert!(cores[0].is_none()); - assert!(cores[2].is_none()); + assert!(cores[0].is_free()); + assert!(cores[2].is_free()); } }); } @@ -808,27 +858,26 @@ fn schedule_rotates_groups() { let config = { let mut config = default_config(); - // make sure parathread requests don't retry-out - config.parathread_retries = config.group_rotation_frequency * 3; - config.parathread_cores = 2; + // make sure on demand requests don't retry-out + config.on_demand_retries = config.group_rotation_frequency * 3; + config.on_demand_cores = 2; + config.scheduling_lookahead = 1; config }; let rotation_frequency = config.group_rotation_frequency; - let parathread_cores = config.parathread_cores; + let on_demand_cores = config.on_demand_cores; - let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { config: config.clone() }, - ..Default::default() - }; + let genesis_config = genesis_config(&config); let thread_a = ParaId::from(1_u32); let thread_b = ParaId::from(2_u32); - let collator = CollatorId::from(Sr25519Keyring::Alice.public()); + let assignment_a = Assignment { para_id: thread_a }; + let assignment_b = Assignment { para_id: thread_b }; new_test_ext(genesis_config).execute_with(|| { - assert_eq!(default_config().parathread_cores, 3); + assert_eq!(default_config().on_demand_cores, 3); schedule_blank_para(thread_a, ParaKind::Parathread); schedule_blank_para(thread_b, ParaKind::Parathread); @@ -846,64 +895,72 @@ fn schedule_rotates_groups() { _ => None, }); - let session_start_block = SessionStartBlock::::get(); + let session_start_block = Scheduler::session_start_block(); assert_eq!(session_start_block, 1); - Scheduler::add_parathread_claim(ParathreadClaim(thread_a, collator.clone())); - Scheduler::add_parathread_claim(ParathreadClaim(thread_b, collator.clone())); + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_a, + QueuePushDirection::Back + )); + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_b, + QueuePushDirection::Back + )); - run_to_block(2, |_| None); + let mut now = 2; + run_to_block(now, |_| None); - let assert_groups_rotated = |rotations: u32| { - let scheduled = Scheduler::scheduled(); + let assert_groups_rotated = |rotations: u32, now: &BlockNumberFor| { + let scheduled = Scheduler::scheduled_claimqueue(*now); assert_eq!(scheduled.len(), 2); - assert_eq!(scheduled[0].group_idx, GroupIndex((0u32 + rotations) % parathread_cores)); - assert_eq!(scheduled[1].group_idx, GroupIndex((1u32 + rotations) % parathread_cores)); + assert_eq!(scheduled[0].group_idx, GroupIndex((0u32 + rotations) % on_demand_cores)); + assert_eq!(scheduled[1].group_idx, GroupIndex((1u32 + rotations) % on_demand_cores)); }; - assert_groups_rotated(0); + assert_groups_rotated(0, &now); // one block before first rotation. + now = rotation_frequency; run_to_block(rotation_frequency, |_| None); - assert_groups_rotated(0); + assert_groups_rotated(0, &now); // first rotation. - run_to_block(rotation_frequency + 1, |_| None); - assert_groups_rotated(1); + now = now + 1; + run_to_block(now, |_| None); + assert_groups_rotated(1, &now); // one block before second rotation. - run_to_block(rotation_frequency * 2, |_| None); - assert_groups_rotated(1); + now = rotation_frequency * 2; + run_to_block(now, |_| None); + assert_groups_rotated(1, &now); // second rotation. - run_to_block(rotation_frequency * 2 + 1, |_| None); - assert_groups_rotated(2); + now = now + 1; + run_to_block(now, |_| None); + assert_groups_rotated(2, &now); }); } #[test] -fn parathread_claims_are_pruned_after_retries() { - let max_retries = default_config().parathread_retries; - - let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { config: default_config() }, - ..Default::default() - }; +fn on_demand_claims_are_pruned_after_timing_out() { + let max_retries = 20; + let mut config = default_config(); + config.scheduling_lookahead = 1; + config.on_demand_cores = 2; + config.on_demand_retries = max_retries; + let genesis_config = genesis_config(&config); let thread_a = ParaId::from(1_u32); - let thread_b = ParaId::from(2_u32); - let collator = CollatorId::from(Sr25519Keyring::Alice.public()); + let assignment_a = Assignment { para_id: thread_a }; new_test_ext(genesis_config).execute_with(|| { - assert_eq!(default_config().parathread_cores, 3); - schedule_blank_para(thread_a, ParaKind::Parathread); - schedule_blank_para(thread_b, ParaKind::Parathread); - // start a new session to activate, 5 validators for 5 cores. - run_to_block(1, |number| match number { + // #1 + let mut now = 1; + run_to_block(now, |number| match number { 1 => Some(SessionChangeNotification { new_config: default_config(), validators: vec![ @@ -915,39 +972,126 @@ fn parathread_claims_are_pruned_after_retries() { _ => None, }); - Scheduler::add_parathread_claim(ParathreadClaim(thread_a, collator.clone())); - Scheduler::add_parathread_claim(ParathreadClaim(thread_b, collator.clone())); + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_a.clone(), + QueuePushDirection::Back + )); + + // #2 + now += 1; + run_to_block(now, |_| None); + assert_eq!(Scheduler::claimqueue().len(), 1); + // ParaId a is in the claimqueue. + assert!(claimqueue_contains_para_ids::(vec![thread_a])); + + Scheduler::occupied(vec![(CoreIndex(0), thread_a)].into_iter().collect()); + // ParaId a is no longer in the claimqueue. + assert!(!claimqueue_contains_para_ids::(vec![thread_a])); + // It is in availability cores. + assert!(availability_cores_contains_para_ids::(vec![thread_a])); + + // #3 + now += 1; + // Run to block #n over the max_retries value. + // In this case, both validator groups with time out on availability and + // the assignment will be dropped. + for n in now..=(now + max_retries + 1) { + // #n + run_to_block(n, |_| None); + // Time out on core 0. + let just_updated: BTreeMap = vec![ + (CoreIndex(0), FreedReason::TimedOut), // should go back on queue. + ] + .into_iter() + .collect(); + let core_assignments = Scheduler::update_claimqueue(just_updated, now); + + // ParaId a exists in the claim queue until max_retries is reached. + if n < max_retries + now { + assert!(claimqueue_contains_para_ids::(vec![thread_a])); + } else { + assert!(!claimqueue_contains_para_ids::(vec![thread_a])); + } - run_to_block(2, |_| None); - assert_eq!(Scheduler::scheduled().len(), 2); + // Occupy the cores based on the output of update_claimqueue. + Scheduler::occupied( + core_assignments + .iter() + .map(|core_assignment| (core_assignment.core, core_assignment.para_id())) + .collect(), + ); + } - run_to_block(2 + max_retries, |_| None); - assert_eq!(Scheduler::scheduled().len(), 2); + // ParaId a does not exist in the claimqueue/availability_cores after + // threshold has been reached. + assert!(!claimqueue_contains_para_ids::(vec![thread_a])); + assert!(!availability_cores_contains_para_ids::(vec![thread_a])); + + // #25 + now += max_retries + 2; + + // Add assignment back to the mix. + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_a.clone(), + QueuePushDirection::Back + )); + run_to_block(now, |_| None); + + assert!(claimqueue_contains_para_ids::(vec![thread_a])); + + // #26 + now += 1; + // Run to block #n but this time have group 1 conclude the availabilty. + for n in now..=(now + max_retries + 1) { + // #n + run_to_block(n, |_| None); + // Time out core 0 if group 0 is assigned to it, if group 1 is assigned, conclude. + let mut just_updated: BTreeMap = BTreeMap::new(); + if let Some(group) = Scheduler::group_assigned_to_core(CoreIndex(0), n) { + match group { + GroupIndex(0) => { + just_updated.insert(CoreIndex(0), FreedReason::TimedOut); // should go back on queue. + }, + GroupIndex(1) => { + just_updated.insert(CoreIndex(0), FreedReason::Concluded); + }, + _ => panic!("Should only have 2 groups here"), + } + } + + let core_assignments = Scheduler::update_claimqueue(just_updated, now); + + // ParaId a exists in the claim queue until groups are rotated. + if n < 31 { + assert!(claimqueue_contains_para_ids::(vec![thread_a])); + } else { + assert!(!claimqueue_contains_para_ids::(vec![thread_a])); + } + + // Occupy the cores based on the output of update_claimqueue. + Scheduler::occupied( + core_assignments + .iter() + .map(|core_assignment| (core_assignment.core, core_assignment.para_id())) + .collect(), + ); + } - run_to_block(2 + max_retries + 1, |_| None); - assert_eq!(Scheduler::scheduled().len(), 0); + // ParaId a does not exist in the claimqueue/availability_cores after + // being concluded + assert!(!claimqueue_contains_para_ids::(vec![thread_a])); + assert!(!availability_cores_contains_para_ids::(vec![thread_a])); }); } #[test] fn availability_predicate_works() { - let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { config: default_config() }, - ..Default::default() - }; + let genesis_config = genesis_config(&default_config()); - let HostConfiguration { - group_rotation_frequency, - chain_availability_period, - thread_availability_period, - .. - } = default_config(); - let collator = CollatorId::from(Sr25519Keyring::Alice.public()); + let HostConfiguration { group_rotation_frequency, paras_availability_period, .. } = + default_config(); - assert!( - chain_availability_period < thread_availability_period && - thread_availability_period < group_rotation_frequency - ); + assert!(paras_availability_period < group_rotation_frequency); let chain_a = ParaId::from(1_u32); let thread_a = ParaId::from(2_u32); @@ -956,7 +1100,7 @@ fn availability_predicate_works() { schedule_blank_para(chain_a, ParaKind::Parachain); schedule_blank_para(thread_a, ParaKind::Parathread); - // start a new session with our chain & thread registered. + // start a new session with our chain registered. run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { new_config: default_config(), @@ -974,16 +1118,17 @@ fn availability_predicate_works() { // assign some availability cores. { + let entry_ttl = 10_000; AvailabilityCores::::mutate(|cores| { - cores[0] = Some(CoreOccupied::Parachain); - cores[1] = Some(CoreOccupied::Parathread(ParathreadEntry { - claim: ParathreadClaim(thread_a, collator), - retries: 0, - })) + cores[0] = + CoreOccupied::Paras(ParasEntry::new(Assignment::new(chain_a), entry_ttl)); + cores[1] = + CoreOccupied::Paras(ParasEntry::new(Assignment::new(thread_a), entry_ttl)); }); } - run_to_block(1 + thread_availability_period, |_| None); + run_to_block(1 + paras_availability_period, |_| None); + assert!(Scheduler::availability_timeout_predicate().is_none()); run_to_block(1 + group_rotation_frequency, |_| None); @@ -993,59 +1138,40 @@ fn availability_predicate_works() { .expect("predicate exists recently after rotation"); let now = System::block_number(); - let would_be_timed_out = now - thread_availability_period; + let would_be_timed_out = now - paras_availability_period; for i in 0..AvailabilityCores::::get().len() { // returns true for unoccupied cores. - // And can time out both threads and chains at this stage. + // And can time out paras at this stage. assert!(pred(CoreIndex(i as u32), would_be_timed_out)); } - assert!(!pred(CoreIndex(0), now)); // assigned: chain - assert!(!pred(CoreIndex(1), now)); // assigned: thread + assert!(!pred(CoreIndex(0), now)); + assert!(!pred(CoreIndex(1), now)); assert!(pred(CoreIndex(2), now)); - // check the tighter bound on chains vs threads. - assert!(pred(CoreIndex(0), now - chain_availability_period)); - assert!(!pred(CoreIndex(1), now - chain_availability_period)); + // check the tight bound. + assert!(pred(CoreIndex(0), now - paras_availability_period)); + assert!(pred(CoreIndex(1), now - paras_availability_period)); // check the threshold is exact. - assert!(!pred(CoreIndex(0), now - chain_availability_period + 1)); - assert!(!pred(CoreIndex(1), now - thread_availability_period + 1)); + assert!(!pred(CoreIndex(0), now - paras_availability_period + 1)); + assert!(!pred(CoreIndex(1), now - paras_availability_period + 1)); } - run_to_block(1 + group_rotation_frequency + chain_availability_period, |_| None); - - { - let pred = Scheduler::availability_timeout_predicate() - .expect("predicate exists recently after rotation"); - - let would_be_timed_out = System::block_number() - thread_availability_period; - - assert!(!pred(CoreIndex(0), would_be_timed_out)); // chains can't be timed out now. - assert!(pred(CoreIndex(1), would_be_timed_out)); // but threads can. - } - - run_to_block(1 + group_rotation_frequency + thread_availability_period, |_| None); - - assert!(Scheduler::availability_timeout_predicate().is_none()); + run_to_block(1 + group_rotation_frequency + paras_availability_period, |_| None); }); } #[test] fn next_up_on_available_uses_next_scheduled_or_none_for_thread() { let mut config = default_config(); - config.parathread_cores = 1; + config.on_demand_cores = 1; - let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { config: config.clone() }, - ..Default::default() - }; + let genesis_config = genesis_config(&config); let thread_a = ParaId::from(1_u32); let thread_b = ParaId::from(2_u32); - let collator = CollatorId::from(Sr25519Keyring::Alice.public()); - new_test_ext(genesis_config).execute_with(|| { schedule_blank_para(thread_a, ParaKind::Parathread); schedule_blank_para(thread_b, ParaKind::Parathread); @@ -1063,38 +1189,42 @@ fn next_up_on_available_uses_next_scheduled_or_none_for_thread() { _ => None, }); - let thread_claim_a = ParathreadClaim(thread_a, collator.clone()); - let thread_claim_b = ParathreadClaim(thread_b, collator.clone()); + let thread_entry_a = ParasEntry { + assignment: Assignment { para_id: thread_a }, + availability_timeouts: 0, + ttl: 5, + }; + let thread_entry_b = ParasEntry { + assignment: Assignment { para_id: thread_b }, + availability_timeouts: 0, + ttl: 5, + }; - Scheduler::add_parathread_claim(thread_claim_a.clone()); + Scheduler::add_to_claimqueue(CoreIndex(0), thread_entry_a.clone()); run_to_block(2, |_| None); { - assert_eq!(Scheduler::scheduled().len(), 1); + assert_eq!(Scheduler::claimqueue_len(), 1); assert_eq!(Scheduler::availability_cores().len(), 1); - Scheduler::occupied(&[CoreIndex(0)]); + let mut map = BTreeMap::new(); + map.insert(CoreIndex(0), thread_a); + Scheduler::occupied(map); let cores = Scheduler::availability_cores(); - match cores[0].as_ref().unwrap() { - CoreOccupied::Parathread(entry) => assert_eq!(entry.claim, thread_claim_a), + match &cores[0] { + CoreOccupied::Paras(entry) => assert_eq!(entry, &thread_entry_a), _ => panic!("with no chains, only core should be a thread core"), } assert!(Scheduler::next_up_on_available(CoreIndex(0)).is_none()); - Scheduler::add_parathread_claim(thread_claim_b); - - let queue = ParathreadQueue::::get(); - assert_eq!( - queue.get_next_on_core(0).unwrap().claim, - ParathreadClaim(thread_b, collator.clone()), - ); + Scheduler::add_to_claimqueue(CoreIndex(0), thread_entry_b); assert_eq!( Scheduler::next_up_on_available(CoreIndex(0)).unwrap(), - ScheduledCore { para_id: thread_b, collator: Some(collator.clone()) } + ScheduledCore { para_id: thread_b, collator: None } ); } }); @@ -1103,17 +1233,15 @@ fn next_up_on_available_uses_next_scheduled_or_none_for_thread() { #[test] fn next_up_on_time_out_reuses_claim_if_nothing_queued() { let mut config = default_config(); - config.parathread_cores = 1; + config.on_demand_cores = 1; - let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { config: config.clone() }, - ..Default::default() - }; + let genesis_config = genesis_config(&config); let thread_a = ParaId::from(1_u32); let thread_b = ParaId::from(2_u32); - let collator = CollatorId::from(Sr25519Keyring::Alice.public()); + let assignment_a = Assignment { para_id: thread_a }; + let assignment_b = Assignment { para_id: thread_b }; new_test_ext(genesis_config).execute_with(|| { schedule_blank_para(thread_a, ParaKind::Parathread); @@ -1132,44 +1260,49 @@ fn next_up_on_time_out_reuses_claim_if_nothing_queued() { _ => None, }); - let thread_claim_a = ParathreadClaim(thread_a, collator.clone()); - let thread_claim_b = ParathreadClaim(thread_b, collator.clone()); - - Scheduler::add_parathread_claim(thread_claim_a.clone()); + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_a.clone(), + QueuePushDirection::Back + )); run_to_block(2, |_| None); { - assert_eq!(Scheduler::scheduled().len(), 1); + assert_eq!(Scheduler::claimqueue().len(), 1); assert_eq!(Scheduler::availability_cores().len(), 1); - Scheduler::occupied(&[CoreIndex(0)]); + let mut map = BTreeMap::new(); + map.insert(CoreIndex(0), thread_a); + Scheduler::occupied(map); let cores = Scheduler::availability_cores(); - match cores[0].as_ref().unwrap() { - CoreOccupied::Parathread(entry) => assert_eq!(entry.claim, thread_claim_a), + match cores.get(0).unwrap() { + CoreOccupied::Paras(entry) => assert_eq!(entry.assignment, assignment_a.clone()), _ => panic!("with no chains, only core should be a thread core"), } - let queue = ParathreadQueue::::get(); - assert!(queue.get_next_on_core(0).is_none()); + // There's nothing more to pop for core 0 from the assignment provider. + assert!( + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(thread_a)).is_none() + ); + assert_eq!( Scheduler::next_up_on_time_out(CoreIndex(0)).unwrap(), - ScheduledCore { para_id: thread_a, collator: Some(collator.clone()) } + ScheduledCore { para_id: thread_a, collator: None } ); - Scheduler::add_parathread_claim(thread_claim_b); + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_b.clone(), + QueuePushDirection::Back + )); - let queue = ParathreadQueue::::get(); - assert_eq!( - queue.get_next_on_core(0).unwrap().claim, - ParathreadClaim(thread_b, collator.clone()), - ); + // Pop assignment_b into the claimqueue + Scheduler::update_claimqueue(BTreeMap::new(), 2); - // Now that there is an earlier next-up, we use that. + //// Now that there is an earlier next-up, we use that. assert_eq!( Scheduler::next_up_on_available(CoreIndex(0)).unwrap(), - ScheduledCore { para_id: thread_b, collator: Some(collator.clone()) } + ScheduledCore { para_id: thread_b, collator: None } ); } }); @@ -1178,13 +1311,8 @@ fn next_up_on_time_out_reuses_claim_if_nothing_queued() { #[test] fn next_up_on_available_is_parachain_always() { let mut config = default_config(); - config.parathread_cores = 0; - - let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { config: config.clone() }, - ..Default::default() - }; - + config.on_demand_cores = 0; + let genesis_config = genesis_config(&config); let chain_a = ParaId::from(1_u32); new_test_ext(genesis_config).execute_with(|| { @@ -1206,14 +1334,14 @@ fn next_up_on_available_is_parachain_always() { run_to_block(2, |_| None); { - assert_eq!(Scheduler::scheduled().len(), 1); + assert_eq!(Scheduler::claimqueue().len(), 1); assert_eq!(Scheduler::availability_cores().len(), 1); - Scheduler::occupied(&[CoreIndex(0)]); + Scheduler::occupied(vec![(CoreIndex(0), chain_a)].into_iter().collect()); let cores = Scheduler::availability_cores(); - match cores[0].as_ref().unwrap() { - CoreOccupied::Parachain => {}, + match &cores[0] { + CoreOccupied::Paras(pe) if pe.para_id() == chain_a => {}, _ => panic!("with no threads, only core should be a chain core"), } @@ -1229,12 +1357,9 @@ fn next_up_on_available_is_parachain_always() { #[test] fn next_up_on_time_out_is_parachain_always() { let mut config = default_config(); - config.parathread_cores = 0; + config.on_demand_cores = 0; - let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { config: config.clone() }, - ..Default::default() - }; + let genesis_config = genesis_config(&config); let chain_a = ParaId::from(1_u32); @@ -1257,15 +1382,15 @@ fn next_up_on_time_out_is_parachain_always() { run_to_block(2, |_| None); { - assert_eq!(Scheduler::scheduled().len(), 1); + assert_eq!(Scheduler::claimqueue().len(), 1); assert_eq!(Scheduler::availability_cores().len(), 1); - Scheduler::occupied(&[CoreIndex(0)]); + Scheduler::occupied(vec![(CoreIndex(0), chain_a)].into_iter().collect()); let cores = Scheduler::availability_cores(); - match cores[0].as_ref().unwrap() { - CoreOccupied::Parachain => {}, - _ => panic!("with no threads, only core should be a chain core"), + match &cores[0] { + CoreOccupied::Paras(pe) if pe.para_id() == chain_a => {}, + _ => panic!("Core should be occupied by chain_a ParaId"), } // Now that there is an earlier next-up, we use that. @@ -1279,12 +1404,11 @@ fn next_up_on_time_out_is_parachain_always() { #[test] fn session_change_requires_reschedule_dropping_removed_paras() { - let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { config: default_config() }, - ..Default::default() - }; + let mut config = default_config(); + config.scheduling_lookahead = 1; + let genesis_config = genesis_config(&config); - assert_eq!(default_config().parathread_cores, 3); + assert_eq!(default_config().on_demand_cores, 3); new_test_ext(genesis_config).execute_with(|| { let chain_a = ParaId::from(1_u32); let chain_b = ParaId::from(2_u32); @@ -1311,13 +1435,12 @@ fn session_change_requires_reschedule_dropping_removed_paras() { _ => None, }); - assert_eq!(Scheduler::scheduled().len(), 2); + assert_eq!(Scheduler::claimqueue().len(), 2); let groups = ValidatorGroups::::get(); assert_eq!(groups.len(), 5); assert_ok!(Paras::schedule_para_cleanup(chain_b)); - run_to_end_of_block(2, |number| match number { 2 => Some(SessionChangeNotification { new_config: default_config(), @@ -1336,73 +1459,78 @@ fn session_change_requires_reschedule_dropping_removed_paras() { _ => None, }); - Scheduler::clear(); - Scheduler::schedule(Vec::new(), 3); + Scheduler::update_claimqueue(BTreeMap::new(), 3); assert_eq!( - Scheduler::scheduled(), - vec![CoreAssignment { - core: CoreIndex(0), - para_id: chain_a, - kind: AssignmentKind::Parachain, - group_idx: GroupIndex(0), - }], + Scheduler::claimqueue(), + vec![( + CoreIndex(0), + vec![Some(ParasEntry::new( + Assignment::new(chain_a), + // At end of block 2 + config.on_demand_ttl + 2 + ))] + .into_iter() + .collect() + )] + .into_iter() + .collect() ); - }); -} - -#[test] -fn parathread_claims_are_pruned_after_deregistration() { - let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { config: default_config() }, - ..Default::default() - }; - - let thread_a = ParaId::from(1_u32); - let thread_b = ParaId::from(2_u32); - let collator = CollatorId::from(Sr25519Keyring::Alice.public()); - - new_test_ext(genesis_config).execute_with(|| { - assert_eq!(default_config().parathread_cores, 3); - - schedule_blank_para(thread_a, ParaKind::Parathread); - schedule_blank_para(thread_b, ParaKind::Parathread); + // Add parachain back + schedule_blank_para(chain_b, ParaKind::Parachain); - // start a new session to activate, 5 validators for 5 cores. - run_to_block(1, |number| match number { - 1 => Some(SessionChangeNotification { + run_to_block(3, |number| match number { + 3 => Some(SessionChangeNotification { new_config: default_config(), validators: vec![ ValidatorId::from(Sr25519Keyring::Alice.public()), + ValidatorId::from(Sr25519Keyring::Bob.public()), + ValidatorId::from(Sr25519Keyring::Charlie.public()), + ValidatorId::from(Sr25519Keyring::Dave.public()), ValidatorId::from(Sr25519Keyring::Eve.public()), + ValidatorId::from(Sr25519Keyring::Ferdie.public()), + ValidatorId::from(Sr25519Keyring::One.public()), ], + random_seed: [99; 32], ..Default::default() }), _ => None, }); - Scheduler::add_parathread_claim(ParathreadClaim(thread_a, collator.clone())); - Scheduler::add_parathread_claim(ParathreadClaim(thread_b, collator.clone())); + assert_eq!(Scheduler::claimqueue().len(), 2); - run_to_block(2, |_| None); - assert_eq!(Scheduler::scheduled().len(), 2); + let groups = ValidatorGroups::::get(); + assert_eq!(groups.len(), 5); - assert_ok!(Paras::schedule_para_cleanup(thread_a)); + Scheduler::update_claimqueue(BTreeMap::new(), 4); - // start a new session to activate, 5 validators for 5 cores. - run_to_block(3, |number| match number { - 3 => Some(SessionChangeNotification { - new_config: default_config(), - validators: vec![ - ValidatorId::from(Sr25519Keyring::Alice.public()), - ValidatorId::from(Sr25519Keyring::Eve.public()), - ], - ..Default::default() - }), - _ => None, - }); - - assert_eq!(Scheduler::scheduled().len(), 1); + assert_eq!( + Scheduler::claimqueue(), + vec![ + ( + CoreIndex(0), + vec![Some(ParasEntry::new( + Assignment::new(chain_a), + // At block 3 + config.on_demand_ttl + 3 + ))] + .into_iter() + .collect() + ), + ( + CoreIndex(1), + vec![Some(ParasEntry::new( + Assignment::new(chain_b), + // At block 3 + config.on_demand_ttl + 3 + ))] + .into_iter() + .collect() + ), + ] + .into_iter() + .collect() + ); }); } diff --git a/runtime/parachains/src/session_info/tests.rs b/runtime/parachains/src/session_info/tests.rs index c4475526d58f..727b7c79fbae 100644 --- a/runtime/parachains/src/session_info/tests.rs +++ b/runtime/parachains/src/session_info/tests.rs @@ -62,7 +62,7 @@ fn run_to_block( fn default_config() -> HostConfiguration { HostConfiguration { - parathread_cores: 1, + on_demand_cores: 1, dispute_period: 2, needed_approvals: 3, ..Default::default() diff --git a/runtime/polkadot/src/lib.rs b/runtime/polkadot/src/lib.rs index fbf896cdedc5..da8cf7cb5a61 100644 --- a/runtime/polkadot/src/lib.rs +++ b/runtime/polkadot/src/lib.rs @@ -27,6 +27,7 @@ use runtime_common::{ }; use runtime_parachains::{ + assigner_parachains as parachains_assigner_parachains, configuration as parachains_configuration, disputes as parachains_disputes, disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, @@ -1183,7 +1184,11 @@ impl parachains_paras_inherent::Config for Runtime { type WeightInfo = weights::runtime_parachains_paras_inherent::WeightInfo; } -impl parachains_scheduler::Config for Runtime {} +impl parachains_scheduler::Config for Runtime { + type AssignmentProvider = ParaAssignmentProvider; +} + +impl parachains_assigner_parachains::Config for Runtime {} impl parachains_initializer::Config for Runtime { type Randomness = pallet_babe::RandomnessFromOneEpochAgo; @@ -1434,6 +1439,7 @@ construct_runtime! { ParaSessionInfo: parachains_session_info::{Pallet, Storage} = 61, ParasDisputes: parachains_disputes::{Pallet, Call, Storage, Event} = 62, ParasSlashing: parachains_slashing::{Pallet, Call, Storage, ValidateUnsigned} = 63, + ParaAssignmentProvider: parachains_assigner_parachains::{Pallet} = 64, // Parachain Onboarding Pallets. Start indices at 70 to leave room. Registrar: paras_registrar::{Pallet, Call, Storage, Event} = 70, @@ -1494,6 +1500,8 @@ pub mod migrations { pub type Unreleased = ( pallet_im_online::migration::v1::Migration, parachains_configuration::migration::v7::MigrateToV7, + parachains_scheduler::migration::v1::MigrateToV1, + parachains_configuration::migration::v8::MigrateToV8, ); } diff --git a/runtime/polkadot/src/weights/runtime_parachains_configuration.rs b/runtime/polkadot/src/weights/runtime_parachains_configuration.rs index af8e4c111b20..39b0d893edbb 100644 --- a/runtime/polkadot/src/weights/runtime_parachains_configuration.rs +++ b/runtime/polkadot/src/weights/runtime_parachains_configuration.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("polkadot-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet -// --chain=polkadot-dev // --steps=50 // --repeat=20 -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --pallet=runtime_parachains::configuration // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=runtime_parachains::configuration +// --chain=polkadot-dev // --header=./file_header.txt -// --output=./runtime/polkadot/src/weights/runtime_parachains_configuration.rs +// --output=./runtime/polkadot/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,62 +48,56 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::configuration`. pub struct WeightInfo(PhantomData); impl runtime_parachains::configuration::WeightInfo for WeightInfo { - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_block_number() -> Weight { // Proof Size summary in bytes: - // Measured: `443` - // Estimated: `1928` - // Minimum execution time: 13_403_000 picoseconds. - Weight::from_parts(13_933_000, 0) - .saturating_add(Weight::from_parts(0, 1928)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 9_330_000 picoseconds. + Weight::from_parts(9_663_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_u32() -> Weight { // Proof Size summary in bytes: - // Measured: `443` - // Estimated: `1928` - // Minimum execution time: 13_210_000 picoseconds. - Weight::from_parts(13_674_000, 0) - .saturating_add(Weight::from_parts(0, 1928)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 9_155_000 picoseconds. + Weight::from_parts(9_554_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_option_u32() -> Weight { // Proof Size summary in bytes: - // Measured: `443` - // Estimated: `1928` - // Minimum execution time: 13_351_000 picoseconds. - Weight::from_parts(13_666_000, 0) - .saturating_add(Weight::from_parts(0, 1928)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 9_299_000 picoseconds. + Weight::from_parts(9_663_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_hrmp_open_request_ttl() -> Weight { // Proof Size summary in bytes: // Measured: `0` @@ -114,40 +106,52 @@ impl runtime_parachains::configuration::WeightInfo for Weight::from_parts(2_000_000_000_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_balance() -> Weight { // Proof Size summary in bytes: - // Measured: `443` - // Estimated: `1928` - // Minimum execution time: 13_299_000 picoseconds. - Weight::from_parts(13_892_000, 0) - .saturating_add(Weight::from_parts(0, 1928)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 9_130_000 picoseconds. + Weight::from_parts(9_554_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_executor_params() -> Weight { // Proof Size summary in bytes: - // Measured: `443` - // Estimated: `1928` - // Minimum execution time: 14_002_000 picoseconds. - Weight::from_parts(14_673_000, 0) - .saturating_add(Weight::from_parts(0, 1928)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 10_177_000 picoseconds. + Weight::from_parts(10_632_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_perbill() -> Weight { + // Proof Size summary in bytes: + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 9_136_000 picoseconds. + Weight::from_parts(9_487_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/runtime/rococo/src/lib.rs b/runtime/rococo/src/lib.rs index d923437a67e5..a9811872b6af 100644 --- a/runtime/rococo/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -38,6 +38,8 @@ use scale_info::TypeInfo; use sp_std::{cmp::Ordering, collections::btree_map::BTreeMap, prelude::*}; use runtime_parachains::{ + assigner as parachains_assigner, assigner_on_demand as parachains_assigner_on_demand, + assigner_parachains as parachains_assigner_parachains, configuration as parachains_configuration, disputes as parachains_disputes, disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, @@ -77,7 +79,7 @@ use sp_runtime::{ Extrinsic as ExtrinsicT, Keccak256, OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, KeyTypeId, Perbill, Percent, Permill, + ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill, }; use sp_staking::SessionIndex; #[cfg(any(feature = "std", test))] @@ -879,6 +881,7 @@ pub enum ProxyType { CancelProxy, Auction, Society, + OnDemandOrdering, } impl Default for ProxyType { fn default() -> Self { @@ -965,6 +968,7 @@ impl InstanceFilter for ProxyType { RuntimeCall::Slots { .. } ), ProxyType::Society => matches!(c, RuntimeCall::Society(..)), + ProxyType::OnDemandOrdering => matches!(c, RuntimeCall::OnDemandAssignmentProvider(..)), } } fn is_superset(&self, o: &Self) -> bool { @@ -1095,7 +1099,27 @@ impl parachains_paras_inherent::Config for Runtime { type WeightInfo = weights::runtime_parachains_paras_inherent::WeightInfo; } -impl parachains_scheduler::Config for Runtime {} +impl parachains_scheduler::Config for Runtime { + type AssignmentProvider = ParaAssignmentProvider; +} + +parameter_types! { + pub const OnDemandTrafficDefaultValue: FixedU128 = FixedU128::from_u32(1); +} + +impl parachains_assigner_on_demand::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type TrafficDefaultValue = OnDemandTrafficDefaultValue; + type WeightInfo = weights::runtime_parachains_assigner_on_demand::WeightInfo; +} + +impl parachains_assigner_parachains::Config for Runtime {} + +impl parachains_assigner::Config for Runtime { + type OnDemandAssignmentProvider = OnDemandAssignmentProvider; + type ParachainsAssignmentProvider = ParachainsAssignmentProvider; +} impl parachains_initializer::Config for Runtime { type Randomness = pallet_babe::RandomnessFromOneEpochAgo; @@ -1451,6 +1475,9 @@ construct_runtime! { ParasDisputes: parachains_disputes::{Pallet, Call, Storage, Event} = 62, ParasSlashing: parachains_slashing::{Pallet, Call, Storage, ValidateUnsigned} = 63, MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 64, + ParaAssignmentProvider: parachains_assigner::{Pallet, Storage} = 65, + OnDemandAssignmentProvider: parachains_assigner_on_demand::{Pallet, Call, Storage, Event} = 66, + ParachainsAssignmentProvider: parachains_assigner_parachains::{Pallet} = 67, // Parachain Onboarding Pallets. Start indices at 70 to leave room. Registrar: paras_registrar::{Pallet, Call, Storage, Event, Config} = 70, @@ -1524,6 +1551,8 @@ pub mod migrations { pallet_im_online::migration::v1::Migration, parachains_configuration::migration::v7::MigrateToV7, assigned_slots::migration::v1::VersionCheckedMigrateToV1, + parachains_scheduler::migration::v1::MigrateToV1, + parachains_configuration::migration::v8::MigrateToV8, ); } @@ -1583,6 +1612,7 @@ mod benches { [runtime_parachains::initializer, Initializer] [runtime_parachains::paras_inherent, ParaInherent] [runtime_parachains::paras, Paras] + [runtime_parachains::assigner_on_demand, OnDemandAssignmentProvider] // Substrate [pallet_balances, Balances] [pallet_balances, NisCounterpartBalances] diff --git a/runtime/rococo/src/weights/mod.rs b/runtime/rococo/src/weights/mod.rs index 75acfe9a5d64..21558ca3fb90 100644 --- a/runtime/rococo/src/weights/mod.rs +++ b/runtime/rococo/src/weights/mod.rs @@ -48,6 +48,7 @@ pub mod runtime_common_claims; pub mod runtime_common_crowdloan; pub mod runtime_common_paras_registrar; pub mod runtime_common_slots; +pub mod runtime_parachains_assigner_on_demand; pub mod runtime_parachains_configuration; pub mod runtime_parachains_disputes; pub mod runtime_parachains_hrmp; diff --git a/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs b/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs new file mode 100644 index 000000000000..ac0f05301b48 --- /dev/null +++ b/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs @@ -0,0 +1,91 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `runtime_parachains::assigner_on_demand` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=runtime_parachains::assigner_on_demand +// --chain=rococo-dev +// --header=./file_header.txt +// --output=./runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `runtime_parachains::assigner_on_demand`. +pub struct WeightInfo(PhantomData); +impl runtime_parachains::assigner_on_demand::WeightInfo for WeightInfo { + /// Storage: `OnDemandAssignmentProvider::SpotTraffic` (r:1 w:0) + /// Proof: `OnDemandAssignmentProvider::SpotTraffic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::OnDemandQueue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::OnDemandQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `s` is `[1, 9999]`. + fn place_order_keep_alive(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `297 + s * (4 ±0)` + // Estimated: `3762 + s * (4 ±0)` + // Minimum execution time: 33_522_000 picoseconds. + Weight::from_parts(35_436_835, 0) + .saturating_add(Weight::from_parts(0, 3762)) + // Standard Error: 129 + .saturating_add(Weight::from_parts(14_041, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) + } + /// Storage: `OnDemandAssignmentProvider::SpotTraffic` (r:1 w:0) + /// Proof: `OnDemandAssignmentProvider::SpotTraffic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::OnDemandQueue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::OnDemandQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `s` is `[1, 9999]`. + fn place_order_allow_death(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `297 + s * (4 ±0)` + // Estimated: `3762 + s * (4 ±0)` + // Minimum execution time: 33_488_000 picoseconds. + Weight::from_parts(34_848_934, 0) + .saturating_add(Weight::from_parts(0, 3762)) + // Standard Error: 143 + .saturating_add(Weight::from_parts(14_215, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) + } +} diff --git a/runtime/rococo/src/weights/runtime_parachains_configuration.rs b/runtime/rococo/src/weights/runtime_parachains_configuration.rs index c44046382d5a..29f387657786 100644 --- a/runtime/rococo/src/weights/runtime_parachains_configuration.rs +++ b/runtime/rococo/src/weights/runtime_parachains_configuration.rs @@ -17,24 +17,25 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet -// --chain=rococo-dev // --steps=50 // --repeat=20 -// --pallet=runtime_parachains::configuration // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=runtime_parachains::configuration +// --chain=rococo-dev // --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_parachains_configuration.rs +// --output=./runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,63 +48,56 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::configuration`. pub struct WeightInfo(PhantomData); impl runtime_parachains::configuration::WeightInfo for WeightInfo { - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_block_number() -> Weight { // Proof Size summary in bytes: - // Measured: `414` - // Estimated: `1899` - // Minimum execution time: 13_097_000 picoseconds. - Weight::from_parts(13_667_000, 0) - .saturating_add(Weight::from_parts(0, 1899)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 9_051_000 picoseconds. + Weight::from_parts(9_496_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_u32() -> Weight { // Proof Size summary in bytes: - // Measured: `414` - // Estimated: `1899` - // Minimum execution time: 13_199_000 picoseconds. - Weight::from_parts(13_400_000, 0) - .saturating_add(Weight::from_parts(0, 1899)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 9_104_000 picoseconds. + Weight::from_parts(9_403_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_option_u32() -> Weight { // Proof Size summary in bytes: - // Measured: `397` - // Estimated: `1882` - // Minimum execution time: 12_831_000 picoseconds. - Weight::from_parts(13_151_000, 0) - .saturating_add(Weight::from_parts(0, 1882)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 9_112_000 picoseconds. + Weight::from_parts(9_495_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_hrmp_open_request_ttl() -> Weight { // Proof Size summary in bytes: // Measured: `0` @@ -112,40 +106,52 @@ impl runtime_parachains::configuration::WeightInfo for Weight::from_parts(2_000_000_000_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_balance() -> Weight { // Proof Size summary in bytes: - // Measured: `414` - // Estimated: `1899` - // Minimum execution time: 13_059_000 picoseconds. - Weight::from_parts(13_481_000, 0) - .saturating_add(Weight::from_parts(0, 1899)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 9_011_000 picoseconds. + Weight::from_parts(9_460_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_executor_params() -> Weight { // Proof Size summary in bytes: - // Measured: `414` - // Estimated: `1899` - // Minimum execution time: 13_764_000 picoseconds. - Weight::from_parts(14_224_000, 0) - .saturating_add(Weight::from_parts(0, 1899)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 9_940_000 picoseconds. + Weight::from_parts(10_288_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_perbill() -> Weight { + // Proof Size summary in bytes: + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 9_192_000 picoseconds. + Weight::from_parts(9_595_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/runtime/test-runtime/src/lib.rs b/runtime/test-runtime/src/lib.rs index d7594e67c12a..b2397299430d 100644 --- a/runtime/test-runtime/src/lib.rs +++ b/runtime/test-runtime/src/lib.rs @@ -25,6 +25,7 @@ use parity_scale_codec::Encode; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; use polkadot_runtime_parachains::{ + assigner_parachains as parachains_assigner_parachains, configuration as parachains_configuration, disputes as parachains_disputes, disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, initializer as parachains_initializer, @@ -555,7 +556,11 @@ impl parachains_hrmp::Config for Runtime { type WeightInfo = parachains_hrmp::TestWeightInfo; } -impl parachains_scheduler::Config for Runtime {} +impl parachains_assigner_parachains::Config for Runtime {} + +impl parachains_scheduler::Config for Runtime { + type AssignmentProvider = ParaAssignmentProvider; +} impl paras_sudo_wrapper::Config for Runtime {} @@ -697,6 +702,7 @@ construct_runtime! { Xcm: pallet_xcm::{Pallet, Call, Event, Origin}, ParasDisputes: parachains_disputes::{Pallet, Storage, Event}, ParasSlashing: parachains_slashing::{Pallet, Call, Storage, ValidateUnsigned}, + ParaAssignmentProvider: parachains_assigner_parachains::{Pallet}, Sudo: pallet_sudo::{Pallet, Call, Storage, Config, Event}, diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index 9c322d6b8436..e6fa4afc9388 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -52,6 +52,7 @@ use runtime_common::{ BlockHashCount, BlockLength, CurrencyToVote, SlowAdjustingFeeUpdate, U256ToBalance, }; use runtime_parachains::{ + assigner_parachains as parachains_assigner_parachains, configuration as parachains_configuration, disputes as parachains_disputes, disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, @@ -994,7 +995,11 @@ impl parachains_paras_inherent::Config for Runtime { type WeightInfo = weights::runtime_parachains_paras_inherent::WeightInfo; } -impl parachains_scheduler::Config for Runtime {} +impl parachains_scheduler::Config for Runtime { + type AssignmentProvider = ParaAssignmentProvider; +} + +impl parachains_assigner_parachains::Config for Runtime {} impl parachains_initializer::Config for Runtime { type Randomness = pallet_babe::RandomnessFromOneEpochAgo; @@ -1221,6 +1226,7 @@ construct_runtime! { ParaSessionInfo: parachains_session_info::{Pallet, Storage} = 52, ParasDisputes: parachains_disputes::{Pallet, Call, Storage, Event} = 53, ParasSlashing: parachains_slashing::{Pallet, Call, Storage, ValidateUnsigned} = 54, + ParaAssignmentProvider: parachains_assigner_parachains::{Pallet, Storage} = 55, // Parachain Onboarding Pallets. Start indices at 60 to leave room. Registrar: paras_registrar::{Pallet, Call, Storage, Event, Config} = 60, @@ -1283,6 +1289,8 @@ pub mod migrations { pallet_im_online::migration::v1::Migration, parachains_configuration::migration::v7::MigrateToV7, assigned_slots::migration::v1::VersionCheckedMigrateToV1, + parachains_scheduler::migration::v1::MigrateToV1, + parachains_configuration::migration::v8::MigrateToV8, ); } diff --git a/runtime/westend/src/weights/runtime_parachains_configuration.rs b/runtime/westend/src/weights/runtime_parachains_configuration.rs index 60f6f8e214c3..585dc9058f21 100644 --- a/runtime/westend/src/weights/runtime_parachains_configuration.rs +++ b/runtime/westend/src/weights/runtime_parachains_configuration.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner--ss9ysm1-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet -// --chain=westend-dev // --steps=50 // --repeat=20 -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --pallet=runtime_parachains::configuration // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=runtime_parachains::configuration +// --chain=westend-dev // --header=./file_header.txt -// --output=./runtime/westend/src/weights/runtime_parachains_configuration.rs +// --output=./runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,56 +48,56 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::configuration`. pub struct WeightInfo(PhantomData); impl runtime_parachains::configuration::WeightInfo for WeightInfo { - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `127` // Estimated: `1612` - // Minimum execution time: 9_998_000 picoseconds. - Weight::from_parts(10_268_000, 0) + // Minimum execution time: 9_616_000 picoseconds. + Weight::from_parts(9_961_000, 0) .saturating_add(Weight::from_parts(0, 1612)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_u32() -> Weight { // Proof Size summary in bytes: // Measured: `127` // Estimated: `1612` - // Minimum execution time: 9_851_000 picoseconds. - Weight::from_parts(10_102_000, 0) + // Minimum execution time: 9_587_000 picoseconds. + Weight::from_parts(9_964_000, 0) .saturating_add(Weight::from_parts(0, 1612)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_option_u32() -> Weight { // Proof Size summary in bytes: // Measured: `127` // Estimated: `1612` - // Minimum execution time: 9_932_000 picoseconds. - Weight::from_parts(10_248_000, 0) + // Minimum execution time: 9_650_000 picoseconds. + Weight::from_parts(9_960_000, 0) .saturating_add(Weight::from_parts(0, 1612)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_hrmp_open_request_ttl() -> Weight { // Proof Size summary in bytes: // Measured: `0` @@ -108,34 +106,50 @@ impl runtime_parachains::configuration::WeightInfo for Weight::from_parts(2_000_000_000_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_balance() -> Weight { // Proof Size summary in bytes: // Measured: `127` // Estimated: `1612` - // Minimum execution time: 9_804_000 picoseconds. - Weight::from_parts(10_173_000, 0) + // Minimum execution time: 9_545_000 picoseconds. + Weight::from_parts(9_845_000, 0) .saturating_add(Weight::from_parts(0, 1612)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_executor_params() -> Weight { // Proof Size summary in bytes: // Measured: `127` // Estimated: `1612` - // Minimum execution time: 10_531_000 picoseconds. - Weight::from_parts(10_984_000, 0) + // Minimum execution time: 10_258_000 picoseconds. + Weight::from_parts(10_607_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_perbill() -> Weight { + // Proof Size summary in bytes: + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 9_502_000 picoseconds. + Weight::from_parts(9_902_000, 0) .saturating_add(Weight::from_parts(0, 1612)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/scripts/ci/gitlab/pipeline/zombienet.yml b/scripts/ci/gitlab/pipeline/zombienet.yml index d7a12ad0723f..6d023489c073 100644 --- a/scripts/ci/gitlab/pipeline/zombienet.yml +++ b/scripts/ci/gitlab/pipeline/zombienet.yml @@ -23,7 +23,7 @@ zombienet-tests-parachains-smoke-test: - export DEBUG=zombie,zombie::network-node - export ZOMBIENET_INTEGRATION_TEST_IMAGE=${PARACHAINS_IMAGE_NAME}:${PARACHAINS_IMAGE_TAG} - export MALUS_IMAGE=${MALUS_IMAGE_NAME}:${MALUS_IMAGE_TAG} - - export COL_IMAGE="docker.io/paritypr/colander:4519" # The collator image is fixed + - export COL_IMAGE="docker.io/paritypr/colander:7292" # The collator image is fixed script: - /home/nonroot/zombie-net/scripts/ci/run-test-env-manager.sh --github-remote-dir="${GH_DIR}" diff --git a/zombienet_tests/functional/0003-parachains-garbage-candidate.zndsl b/zombienet_tests/functional/0003-parachains-garbage-candidate.zndsl index ccc1ea258f52..50fff9e3d597 100644 --- a/zombienet_tests/functional/0003-parachains-garbage-candidate.zndsl +++ b/zombienet_tests/functional/0003-parachains-garbage-candidate.zndsl @@ -9,7 +9,7 @@ honest-validator-2: reports node_roles is 4 malus-validator-0: reports node_roles is 4 # Parachains should be making progress even if we have up to 1/3 malicious validators. -honest-validator-0: parachain 2000 block height is at least 2 within 180 seconds +honest-validator-0: parachain 2000 block height is at least 2 within 240 seconds honest-validator-1: parachain 2001 block height is at least 2 within 180 seconds honest-validator-2: parachain 2002 block height is at least 2 within 180 seconds diff --git a/zombienet_tests/misc/0003-parathreads.toml b/zombienet_tests/misc/0003-parathreads.toml new file mode 100644 index 000000000000..83b6d39bffb0 --- /dev/null +++ b/zombienet_tests/misc/0003-parathreads.toml @@ -0,0 +1,32 @@ +[settings] +timeout = 1000 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +command = "polkadot" + + [[relaychain.nodes]] + name = "alice" + args = [ "--alice", "-lruntime=debug,parachain=trace" ] + + [[relaychain.nodes]] + name = "bob" + args = [ "--bob", "-lruntime=debug,parachain=trace" ] + +[[parachains]] +id = 100 +add_to_genesis = false +register_para = true +onboard_as_parachain = false + + [parachains.collator] + name = "collator01" + image = "{{COL_IMAGE}}" + command = "adder-collator" + args = [ "-lruntime=debug,parachain=trace" ] + +[types.Header] +number = "u64" +parent_hash = "Hash" +post_state = "Hash" diff --git a/zombienet_tests/smoke/0001-parachains-smoke-test.zndsl b/zombienet_tests/smoke/0001-parachains-smoke-test.zndsl index 13d0624158f2..b280a198e085 100644 --- a/zombienet_tests/smoke/0001-parachains-smoke-test.zndsl +++ b/zombienet_tests/smoke/0001-parachains-smoke-test.zndsl @@ -3,4 +3,4 @@ Network: ./0001-parachains-smoke-test.toml Creds: config alice: parachain 100 is registered within 225 seconds -alice: parachain 100 block height is at least 10 within 200 seconds +alice: parachain 100 block height is at least 10 within 400 seconds diff --git a/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.zndsl b/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.zndsl index fec28455f5f2..bcea5aa1646e 100644 --- a/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.zndsl +++ b/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.zndsl @@ -3,6 +3,6 @@ Network: ./0002-parachains-upgrade-smoke-test.toml Creds: config alice: parachain 100 is registered within 225 seconds -alice: parachain 100 block height is at least 10 within 400 seconds +alice: parachain 100 block height is at least 10 within 460 seconds alice: parachain 100 perform dummy upgrade within 200 seconds alice: parachain 100 block height is at least 14 within 200 seconds