From 4ad5f0bd62f778a304d0932a76c9d6de2ec4d289 Mon Sep 17 00:00:00 2001 From: Richard Watts Date: Thu, 14 Nov 2024 15:44:42 +0000 Subject: [PATCH 1/4] (feat) Attempt to infer post-block gaps. (feat) Be more conservative in deleting blocks from the block_store (fix) Remember to remove requests from the in-flight requests queue when we get a response (feat) admin_ mechanisms to draw graphs of node arrangements and interrupt comms (feat) z2 commands to partition the network and dump out node arrangements (feat) A configuration file for zq2-richard (feat) z2 deployer info and command-line options to specify hosts for z2 deployer install and upgrade (feat) Scripts to allow you to easily(ish) run dhat and oprofile on zilliqa processes (feat) A ZQ2_API_URL env var for deposit txns so you can insert mitmweb to work out why they are failing. (feat) List API URLs in z deployer info --- Cargo.lock | 292 +++++++++++++++------ Cargo.toml | 1 + scripts/dhat_zq2 | 5 + scripts/perf_zq2 | 3 + z2/Cargo.toml | 5 +- z2/docs/testing.md | 35 +++ z2/resources/chain-specs/zq2-richard.toml | 17 ++ z2/src/bin/z2.rs | 66 ++++- z2/src/chain.rs | 2 +- z2/src/chain/config.rs | 10 +- z2/src/chain/node.rs | 7 +- z2/src/deployer.rs | 58 ++++- z2/src/lib.rs | 1 + z2/src/node_spec.rs | 6 +- z2/src/plumbing.rs | 58 ++++- z2/src/setup.rs | 66 +++-- z2/src/testing.rs | 228 +++++++++++++++++ z2/src/utils.rs | 13 + z2/src/validators.rs | 3 +- z2/src/zq2.rs | 43 ++-- zilliqa/src/api/admin.rs | 49 +++- zilliqa/src/api/debug.rs | 1 + zilliqa/src/block_store.rs | 293 +++++++++++++++++++--- zilliqa/src/cfg.rs | 8 +- zilliqa/src/consensus.rs | 52 +++- zilliqa/src/db.rs | 27 +- zilliqa/src/director.rs | 33 +++ zilliqa/src/lib.rs | 1 + zilliqa/src/node.rs | 19 ++ zq2-richard.yaml | 2 +- 30 files changed, 1236 insertions(+), 168 deletions(-) create mode 100755 scripts/dhat_zq2 create mode 100755 scripts/perf_zq2 create mode 100644 z2/docs/testing.md create mode 100644 z2/resources/chain-specs/zq2-richard.toml create mode 100644 z2/src/testing.rs create mode 100644 zilliqa/src/api/debug.rs create mode 100644 zilliqa/src/director.rs diff --git a/Cargo.lock b/Cargo.lock index 884cb8a8e..796c95933 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4044,16 +4044,18 @@ dependencies = [ [[package]] name = "igd-next" -version = "0.14.3" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "064d90fec10d541084e7b39ead8875a5a80d9114a2b18791565253bae25f49e4" +checksum = "76b0d7d4541def58a37bf8efc559683f21edce7c82f0d866c93ac21f7e098f93" dependencies = [ "async-trait", "attohttpc", "bytes", "futures", - "http 0.2.12", - "hyper 0.14.30", + "http 1.1.0", + "http-body-util", + "hyper 1.5.0", + "hyper-util", "log", "rand", "tokio", @@ -4701,33 +4703,55 @@ checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libp2p" version = "0.54.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbbe80f9c7e00526cd6b838075b9c171919404a4732cb2fa8ece0a093223bfc4" dependencies = [ "bytes", "either", "futures", "futures-timer", "getrandom", - "libp2p-allow-block-list", + "libp2p-allow-block-list 0.4.1", "libp2p-autonat", - "libp2p-connection-limits", - "libp2p-core", + "libp2p-connection-limits 0.4.0", + "libp2p-core 0.42.0", "libp2p-dns", "libp2p-gossipsub", - "libp2p-identify", + "libp2p-identify 0.45.1", "libp2p-identity", "libp2p-kad", "libp2p-mdns", - "libp2p-metrics", + "libp2p-metrics 0.15.0", "libp2p-quic", "libp2p-request-response", - "libp2p-swarm", + "libp2p-swarm 0.45.2", "libp2p-tcp", "libp2p-upnp", "multiaddr", "pin-project", - "rw-stream-sink", + "rw-stream-sink 0.4.0", + "thiserror 1.0.69", +] + +[[package]] +name = "libp2p" +version = "0.54.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbbe80f9c7e00526cd6b838075b9c171919404a4732cb2fa8ece0a093223bfc4" +dependencies = [ + "bytes", + "either", + "futures", + "futures-timer", + "getrandom", + "libp2p-allow-block-list 0.4.0", + "libp2p-connection-limits 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libp2p-core 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libp2p-identify 0.45.0", + "libp2p-identity", + "libp2p-metrics 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libp2p-swarm 0.45.1", + "multiaddr", + "pin-project", + "rw-stream-sink 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "thiserror 1.0.69", ] @@ -4737,17 +4761,25 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1027ccf8d70320ed77e984f273bc8ce952f623762cb9bf2d126df73caef8041" dependencies = [ - "libp2p-core", + "libp2p-core 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", + "void", +] + +[[package]] +name = "libp2p-allow-block-list" +version = "0.4.1" +dependencies = [ + "libp2p-core 0.42.0", + "libp2p-identity", + "libp2p-swarm 0.45.2", "void", ] [[package]] name = "libp2p-autonat" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a083675f189803d0682a2726131628e808144911dad076858bfbe30b13065499" +version = "0.13.1" dependencies = [ "async-trait", "asynchronous-codec", @@ -4756,12 +4788,12 @@ dependencies = [ "futures", "futures-bounded", "futures-timer", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "libp2p-request-response", - "libp2p-swarm", + "libp2p-swarm 0.45.2", "quick-protobuf", - "quick-protobuf-codec", + "quick-protobuf-codec 0.3.1", "rand", "rand_core", "thiserror 1.0.69", @@ -4770,18 +4802,55 @@ dependencies = [ "web-time", ] +[[package]] +name = "libp2p-connection-limits" +version = "0.4.0" +dependencies = [ + "libp2p-core 0.42.0", + "libp2p-identity", + "libp2p-swarm 0.45.2", + "void", +] + [[package]] name = "libp2p-connection-limits" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d003540ee8baef0d254f7b6bfd79bac3ddf774662ca0abf69186d517ef82ad8" dependencies = [ - "libp2p-core", + "libp2p-core 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "void", ] +[[package]] +name = "libp2p-core" +version = "0.42.0" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", + "libp2p-identity", + "multiaddr", + "multihash", + "multistream-select 0.13.0", + "once_cell", + "parking_lot 0.12.3", + "pin-project", + "quick-protobuf", + "rand", + "rw-stream-sink 0.4.0", + "serde", + "smallvec", + "thiserror 1.0.69", + "tracing", + "unsigned-varint 0.8.0", + "void", + "web-time", +] + [[package]] name = "libp2p-core" version = "0.42.0" @@ -4795,14 +4864,13 @@ dependencies = [ "libp2p-identity", "multiaddr", "multihash", - "multistream-select", + "multistream-select 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", "once_cell", "parking_lot 0.12.3", "pin-project", "quick-protobuf", "rand", - "rw-stream-sink", - "serde", + "rw-stream-sink 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec", "thiserror 1.0.69", "tracing", @@ -4814,13 +4882,11 @@ dependencies = [ [[package]] name = "libp2p-dns" version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97f37f30d5c7275db282ecd86e54f29dd2176bd3ac656f06abf43bedb21eb8bd" dependencies = [ "async-trait", "futures", "hickory-resolver", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "parking_lot 0.12.3", "smallvec", @@ -4829,9 +4895,7 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.47.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4e830fdf24ac8c444c12415903174d506e1e077fbe3875c404a78c5935a8543" +version = "0.47.1" dependencies = [ "asynchronous-codec", "base64 0.22.1", @@ -4843,12 +4907,12 @@ dependencies = [ "futures-ticker", "getrandom", "hex_fmt", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.2", "prometheus-client", "quick-protobuf", - "quick-protobuf-codec", + "quick-protobuf-codec 0.3.1", "rand", "regex", "serde", @@ -4870,12 +4934,33 @@ dependencies = [ "futures", "futures-bounded", "futures-timer", - "libp2p-core", + "libp2p-core 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "lru", "quick-protobuf", - "quick-protobuf-codec", + "quick-protobuf-codec 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec", + "thiserror 1.0.69", + "tracing", + "void", +] + +[[package]] +name = "libp2p-identify" +version = "0.45.1" +dependencies = [ + "asynchronous-codec", + "either", + "futures", + "futures-bounded", + "futures-timer", + "libp2p-core 0.42.0", + "libp2p-identity", + "libp2p-swarm 0.45.2", + "lru", + "quick-protobuf", + "quick-protobuf-codec 0.3.1", "smallvec", "thiserror 1.0.69", "tracing", @@ -4903,9 +4988,7 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.46.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced237d0bd84bbebb7c2cad4c073160dacb4fe40534963c32ed6d4c6bb7702a3" +version = "0.47.0" dependencies = [ "arrayvec", "asynchronous-codec", @@ -4915,11 +4998,11 @@ dependencies = [ "futures", "futures-bounded", "futures-timer", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.2", "quick-protobuf", - "quick-protobuf-codec", + "quick-protobuf-codec 0.3.1", "rand", "serde", "sha2", @@ -4934,16 +5017,14 @@ dependencies = [ [[package]] name = "libp2p-mdns" version = "0.46.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14b8546b6644032565eb29046b42744aee1e9f261ed99671b2c93fb140dba417" dependencies = [ "data-encoding", "futures", "hickory-proto", "if-watch", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.2", "rand", "smallvec", "socket2", @@ -4955,16 +5036,30 @@ dependencies = [ [[package]] name = "libp2p-metrics" version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ebafa94a717c8442d8db8d3ae5d1c6a15e30f2d347e0cd31d057ca72e42566" dependencies = [ "futures", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-gossipsub", - "libp2p-identify", + "libp2p-identify 0.45.1", "libp2p-identity", "libp2p-kad", - "libp2p-swarm", + "libp2p-swarm 0.45.2", + "pin-project", + "prometheus-client", + "web-time", +] + +[[package]] +name = "libp2p-metrics" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ebafa94a717c8442d8db8d3ae5d1c6a15e30f2d347e0cd31d057ca72e42566" +dependencies = [ + "futures", + "libp2p-core 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libp2p-identify 0.45.0", + "libp2p-identity", + "libp2p-swarm 0.45.1", "pin-project", "prometheus-client", "web-time", @@ -4973,14 +5068,12 @@ dependencies = [ [[package]] name = "libp2p-quic" version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46352ac5cd040c70e88e7ff8257a2ae2f891a4076abad2c439584a31c15fd24e" dependencies = [ "bytes", "futures", "futures-timer", "if-watch", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "libp2p-tls", "parking_lot 0.12.3", @@ -4997,17 +5090,15 @@ dependencies = [ [[package]] name = "libp2p-request-response" version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1356c9e376a94a75ae830c42cdaea3d4fe1290ba409a22c809033d1b7dcab0a6" dependencies = [ "async-trait", "cbor4ii", "futures", "futures-bounded", "futures-timer", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.2", "rand", "serde", "smallvec", @@ -5026,11 +5117,31 @@ dependencies = [ "fnv", "futures", "futures-timer", - "libp2p-core", + "libp2p-core 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libp2p-identity", + "lru", + "multistream-select 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", + "once_cell", + "rand", + "smallvec", + "tracing", + "void", + "web-time", +] + +[[package]] +name = "libp2p-swarm" +version = "0.45.2" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", + "libp2p-core 0.42.0", "libp2p-identity", "libp2p-swarm-derive", "lru", - "multistream-select", + "multistream-select 0.13.0", "once_cell", "rand", "smallvec", @@ -5043,8 +5154,6 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" version = "0.35.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "206e0aa0ebe004d778d79fb0966aa0de996c19894e2c0605ba2f8524dd4443d8" dependencies = [ "heck 0.5.0", "proc-macro2", @@ -5055,14 +5164,12 @@ dependencies = [ [[package]] name = "libp2p-tcp" version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad964f312c59dcfcac840acd8c555de8403e295d39edf96f5240048b5fcaa314" dependencies = [ "futures", "futures-timer", "if-watch", "libc", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "socket2", "tokio", @@ -5072,12 +5179,10 @@ dependencies = [ [[package]] name = "libp2p-tls" version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47b23dddc2b9c355f73c1e36eb0c3ae86f7dc964a3715f0731cfad352db4d847" dependencies = [ "futures", "futures-rustls", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "rcgen", "ring 0.17.8", @@ -5090,15 +5195,13 @@ dependencies = [ [[package]] name = "libp2p-upnp" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01bf2d1b772bd3abca049214a3304615e6a36fa6ffc742bdd1ba774486200b8f" +version = "0.3.1" dependencies = [ "futures", "futures-timer", "igd-next", - "libp2p-core", - "libp2p-swarm", + "libp2p-core 0.42.0", + "libp2p-swarm 0.45.2", "tokio", "tracing", "void", @@ -5381,6 +5484,18 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" +[[package]] +name = "multistream-select" +version = "0.13.0" +dependencies = [ + "bytes", + "futures", + "pin-project", + "smallvec", + "tracing", + "unsigned-varint 0.8.0", +] + [[package]] name = "multistream-select" version = "0.13.0" @@ -6504,6 +6619,17 @@ dependencies = [ "byteorder", ] +[[package]] +name = "quick-protobuf-codec" +version = "0.3.1" +dependencies = [ + "asynchronous-codec", + "bytes", + "quick-protobuf", + "thiserror 1.0.69", + "unsigned-varint 0.8.0", +] + [[package]] name = "quick-protobuf-codec" version = "0.3.1" @@ -7315,6 +7441,15 @@ dependencies = [ "wait-timeout", ] +[[package]] +name = "rw-stream-sink" +version = "0.4.0" +dependencies = [ + "futures", + "pin-project", + "static_assertions", +] + [[package]] name = "rw-stream-sink" version = "0.4.0" @@ -9666,12 +9801,13 @@ dependencies = [ "git2", "hex", "home", + "hyper 1.5.0", "indicatif", "itertools 0.13.0", "jsonrpsee 0.22.5", "k256", "lazy_static", - "libp2p", + "libp2p 0.54.1 (registry+https://github.com/rust-lang/crates.io-index)", "log", "octocrab", "primitive-types", @@ -9696,6 +9832,8 @@ dependencies = [ "tokio", "tokio-stream", "toml", + "tower 0.5.1", + "tower-http", "tracing", "tracing-subscriber", "url", @@ -9820,7 +9958,7 @@ dependencies = [ "itertools 0.13.0", "jsonrpsee 0.24.4", "k256", - "libp2p", + "libp2p 0.54.1", "lru", "lru-mem", "lz4", diff --git a/Cargo.toml b/Cargo.toml index 1f492e551..6f736eb82 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,6 +19,7 @@ license = "MIT OR Apache-2.0" # Makes flamegraphs more readable. # https://doc.rust-lang.org/cargo/reference/manifest.html#the-profile-sections debug = true +# Set to false for profiling data. lto = "thin" [profile.release-stripped] diff --git a/scripts/dhat_zq2 b/scripts/dhat_zq2 new file mode 100755 index 000000000..525fc0eaf --- /dev/null +++ b/scripts/dhat_zq2 @@ -0,0 +1,5 @@ +#! /bin/bash +# Use this as the value of ZQ2_SCRIPT to enable oprofile +# You will also want to set [profile.release] debug = 1 in Cargo.toml +# Args passed are (binary) (rest) +valgrind --tool=dhat $* diff --git a/scripts/perf_zq2 b/scripts/perf_zq2 new file mode 100755 index 000000000..71213094d --- /dev/null +++ b/scripts/perf_zq2 @@ -0,0 +1,3 @@ +#! /bin/bash +# Use this as the value of PERF_SCRIPT to runt perf +perf record --call-graph dwarf -- $* diff --git a/z2/Cargo.toml b/z2/Cargo.toml index 3b74309d9..047556b81 100644 --- a/z2/Cargo.toml +++ b/z2/Cargo.toml @@ -42,6 +42,7 @@ git2 = "0.18.3" hex = "0.4.3" home = "0.5.9" indicatif = "0.17.9" +hyper = {version = "1.5.0", features = [ "client" ] } itertools = "0.13.0" jsonrpsee = {version = "0.22.4", features = ["client"]} k256 = "0.13.4" @@ -66,9 +67,11 @@ sha3 = "0.10.8" tempfile = "3.14.0" tera = "1.19.1" thiserror = "2.0.3" -tokio = {version = "1.41.1", features = ["macros", "rt-multi-thread", "sync", "io-std", "io-util", "process", "fs"]} +tokio = {version = "1.41.1", features = ["macros", "rt-multi-thread", "sync", "io-std", "io-util", "process", "fs", "time"]} tokio-stream = "0.1.16" toml = "0.8.19" +tower = "0.5.1" +tower-http = "0.6.1" tracing = "0.1.40" tracing-subscriber = "0.3.18" url = "2.5.3" diff --git a/z2/docs/testing.md b/z2/docs/testing.md new file mode 100644 index 000000000..4c6b36c1c --- /dev/null +++ b/z2/docs/testing.md @@ -0,0 +1,35 @@ +# Testing + +There is now some test framework in z2. After starting a z2 network running in the context directory `/tmp/a`, you can... + +```sh +./scripts/z2 test /tmp/a partition 0:0/30000 1-2:2000/30000 2-5:1-3:1000/20000 +``` + +This tells the system to call the admin API to partition the network: + + * With node 0 being told to talk just to itself from t=0ms to t=30000ms. + * With nodes 1 and 2 being told to talk to just 1 and 2 from t=2000ms to t=30000ms + * With nodes 2-5 being told to talk to nodes 1-3 from t=1000ms to t=20000ms + +We do this by calling `admin_whitelist` at appropriate times. Code in `testing.rs`. + +You can also see what the nodes think of the chain: + +```sh +./scripts/z2 test /tmp/a graphs xc viewmin-viewmax 1-2,3 +``` + + * In the context `/tmp/a` + * `graphs` - draw graphs + * With names `/tmp/xc.dot` + * From `viewmin` to `viewmax` (see below) inclusive. + * On nodes `1-2,3` + +`viewmax==0` means "the latest view". `viewmin>0, viewmax=0` means "the last `viewmin` views". +Otherwise they are a range of views to visualise. + +Chrome is the best way to view svgs these days, it seems, so we +convert the dotfiles written by the `admin` API to `svg` with `dot` +(which you need to have installed) and then output the URLs. + diff --git a/z2/resources/chain-specs/zq2-richard.toml b/z2/resources/chain-specs/zq2-richard.toml new file mode 100644 index 000000000..8c8d7308c --- /dev/null +++ b/z2/resources/chain-specs/zq2-richard.toml @@ -0,0 +1,17 @@ +p2p_port = 3333 +bootstrap_address = [ "12D3KooWACUuqbMYRddTh34HejKg8i1QyuPJoffWVecYotCi8FzZ", "/ip4/34.87.179.185/udp/3333/quic-v1" ] + +[[nodes]] +eth_chain_id = 33469 +allowed_timestamp_skew = { secs = 60, nanos = 0 } +data_dir = "/data" +consensus.genesis_accounts = [ ["0xed4Ec243b08456404F37CFA9a09DFdF6a52137F1", "20_800_000_000_000_000_000_000_000_000" ] ] +consensus.genesis_deposits = [ ["a81a31aaf946111bbe9a958cd9b8bd85d277b8b7c64fc67f579696dbcb6a460a96d4f70e0187064cda83a74b32b1f81f", "12D3KooWACUuqbMYRddTh34HejKg8i1QyuPJoffWVecYotCi8FzZ", "100_000_000_000_000_000_000_000_000", "0xed4Ec243b08456404F37CFA9a09DFdF6a52137F1", "0xed4Ec243b08456404F37CFA9a09DFdF6a52137F1"] ] + +# Reward parameters +consensus.rewards_per_hour = "51_000_000_000_000_000_000_000" +consensus.blocks_per_hour = 3600 +consensus.minimum_stake = "10_000_000_000_000_000_000_000_000" +# Gas parameters +consensus.eth_block_gas_limit = 84000000 +consensus.gas_price = "4_761_904_800_000" diff --git a/z2/src/bin/z2.rs b/z2/src/bin/z2.rs index cd7f8f931..e5da91681 100644 --- a/z2/src/bin/z2.rs +++ b/z2/src/bin/z2.rs @@ -37,6 +37,7 @@ enum Commands { Perf(PerfStruct), #[clap(subcommand)] /// Group of subcommands to deploy and configure a Zilliqa 2 network + /// If you define the environment variable ZQ2_API_URL, we will use it in preference to the default API url for this network. Deployer(DeployerCommands), #[clap(subcommand)] /// Convert Zilliqa 1 to Zilliqa 2 persistnce @@ -57,6 +58,8 @@ enum Commands { Nodes(NodesStruct), /// Start a node and join it to a network JoinNode(JoinNodeStruct), + /// Run various tests on the chain + Test(TestStruct), } #[derive(Subcommand, Debug)] @@ -104,6 +107,8 @@ enum DeployerCommands { GeneratePrivateKeys(DeployerGenerateActionsArgs), /// Generate the genesis key. --force to replace if already existing GenerateGenesisKey(DeployerGenerateGenesisArgs), + /// Get info + Info(DeployerInfoArgs), } #[derive(Args, Debug)] @@ -144,6 +149,9 @@ pub struct DeployerInstallArgs { /// gsutil URI of the persistence file. Ie. gs://my-bucket/my-file #[clap(long)] persistence_url: Option, + /// Machines to install + #[clap(long, num_args= 0..)] + machines: Vec, } #[derive(Args, Debug)] @@ -156,6 +164,9 @@ pub struct DeployerUpgradeArgs { /// Define the number of nodes to process in parallel. Default: 1 #[clap(long)] max_parallel: Option, + /// Machines to install + #[clap(long, num_args= 0..)] + machines: Vec, } #[derive(Args, Debug)] @@ -224,6 +235,11 @@ pub struct DeployerGenerateGenesisArgs { force: bool, } +#[derive(Args, Debug)] +pub struct DeployerInfoArgs { + config_file: String, +} + #[derive(Subcommand, Debug)] enum ConverterCommands { /// Convert Zilliqa 1 to Zilliqa 2 persistence format. @@ -300,6 +316,23 @@ struct DocStruct { api_url: Option, } +#[derive(Args, Debug)] +struct TestStruct { + config_dir: String, + #[clap(long)] + #[clap(default_value = "warn")] + log_level: LogLevel, + + #[clap(long)] + debug_modules: Vec, + + #[clap(long)] + trace_modules: Vec, + + #[arg(trailing_var_arg = true, hide = true)] + rest: Vec, +} + // See https://jwodder.github.io/kbits/posts/clap-bool-negate/ #[derive(Args, Debug)] struct RunStruct { @@ -743,6 +776,7 @@ async fn main() -> Result<()> { arg.select, arg.max_parallel, arg.persistence_url.clone(), + &arg.machines, ) .await .map_err(|err| { @@ -756,11 +790,16 @@ async fn main() -> Result<()> { "Provide a configuration file. [--config-file] mandatory argument" ) })?; - plumbing::run_deployer_upgrade(&config_file, arg.select, arg.max_parallel) - .await - .map_err(|err| { - anyhow::anyhow!("Failed to run deployer upgrade command: {}", err) - })?; + plumbing::run_deployer_upgrade( + &config_file, + arg.select, + arg.max_parallel, + &arg.machines, + ) + .await + .map_err(|err| { + anyhow::anyhow!("Failed to run deployer upgrade command: {}", err) + })?; Ok(()) } DeployerCommands::GetConfigFile(ref arg) => { @@ -917,6 +956,14 @@ async fn main() -> Result<()> { })?; Ok(()) } + DeployerCommands::Info(ref arg) => { + plumbing::run_deployer_info(&arg.config_file) + .await + .map_err(|err| { + anyhow::anyhow!("Failed to run deployer info command: {}", err) + })?; + Ok(()) + } }, Commands::Converter(converter_command) => match &converter_command { ConverterCommands::Convert(ref arg) => { @@ -1067,5 +1114,14 @@ async fn main() -> Result<()> { .await?; Ok(()) } + Commands::Test(ref arg) => { + let log_spec = utils::compute_log_string( + &arg.log_level.to_string(), + &arg.debug_modules, + &arg.trace_modules, + )?; + plumbing::test(&arg.config_dir, &base_dir, &log_spec, false, &arg.rest).await?; + Ok(()) + } } } diff --git a/z2/src/chain.rs b/z2/src/chain.rs index c158ccbdc..5b861edef 100644 --- a/z2/src/chain.rs +++ b/z2/src/chain.rs @@ -47,7 +47,7 @@ impl Chain { pub fn get_toml_contents(chain_name: &str) -> Result<&'static str> { match chain_name { - "zq2-richard" => Err(anyhow!("Configuration file for {} not found", chain_name)), + "zq2-richard" => Ok(include_str!("../resources/chain-specs/zq2-richard.toml")), "zq2-uccbtest" => Ok(include_str!("../resources/chain-specs/zq2-uccbtest.toml")), "zq2-infratest" => Err(anyhow!("Configuration file for {} not found", chain_name)), "zq2-perftest" => Ok(include_str!("../resources/chain-specs/zq2-perftest.toml")), diff --git a/z2/src/chain/config.rs b/z2/src/chain/config.rs index 6bf0923e5..c4f0ef5f8 100644 --- a/z2/src/chain/config.rs +++ b/z2/src/chain/config.rs @@ -8,11 +8,11 @@ use crate::github; #[derive(Debug, Clone, Deserialize, Serialize)] pub struct NetworkConfig { - pub(super) name: String, - pub(super) eth_chain_id: u64, - pub(super) project_id: String, - pub(super) roles: Vec, - pub(super) versions: HashMap, + pub name: String, + pub eth_chain_id: u64, + pub project_id: String, + pub roles: Vec, + pub versions: HashMap, } impl NetworkConfig { diff --git a/z2/src/chain/node.rs b/z2/src/chain/node.rs index 1c81a8720..824218155 100644 --- a/z2/src/chain/node.rs +++ b/z2/src/chain/node.rs @@ -111,7 +111,7 @@ impl fmt::Display for NodeRole { } } -#[derive(Clone, Debug)] +#[derive(Clone, Serialize, Deserialize, Debug)] pub struct Machine { pub project_id: String, pub zone: String, @@ -386,7 +386,8 @@ impl ChainNode { private_key.value().await? } else { return Err(anyhow!( - "Found multiple private keys for the instance {}", + "Found {} private keys for the instance {}", + private_keys.len(), &self.machine.name )); }; @@ -764,7 +765,7 @@ pub async fn retrieve_secret_by_role( .await } -async fn retrieve_secret_by_node_name( +pub async fn retrieve_secret_by_node_name( chain_name: &str, project_id: &str, node_name: &str, diff --git a/z2/src/deployer.rs b/z2/src/deployer.rs index c97c97c04..f52b2193a 100644 --- a/z2/src/deployer.rs +++ b/z2/src/deployer.rs @@ -1,21 +1,42 @@ -use std::{collections::BTreeMap, sync::Arc}; +use std::{collections::BTreeMap, str::FromStr, sync::Arc}; use anyhow::{anyhow, Result}; use cliclack::MultiProgress; use colored::Colorize; +use serde::{Deserialize, Serialize}; use tokio::{fs, sync::Semaphore, task}; use crate::{ address::EthereumAddress, + chain::Chain, chain::{ config::NetworkConfig, instance::ChainInstance, - node::{ChainNode, NodeRole}, + node::{self, ChainNode, NodeRole}, }, secret::Secret, validators, }; +#[derive(Serialize, Deserialize, Clone)] +pub struct MachineChainInfo { + pub zone: String, + pub name: String, + pub external_address: String, + pub peer_id: String, + pub p2p: String, + pub rpc: String, + pub labels: BTreeMap, +} + +// Serializable for easy printing. +#[derive(Serialize, Deserialize, Clone)] +pub struct ChainInfo { + pub config: NetworkConfig, + pub machines: Vec, + pub rpc: Option, +} + pub async fn new( network_name: &str, eth_chain_id: u64, @@ -42,6 +63,7 @@ pub async fn install_or_upgrade( node_selection: bool, max_parallel: usize, persistence_url: Option, + machines: &Vec, ) -> Result<()> { let config = NetworkConfig::from_file(config_file).await?; let mut chain = ChainInstance::new(config).await?; @@ -52,7 +74,9 @@ pub async fn install_or_upgrade( .map(|n| n.name().clone()) .collect::>(); - let selected_machines = if !node_selection { + let selected_machines = if !machines.is_empty() { + machines.clone() + } else if !node_selection { node_names } else { let mut multi_select = cliclack::multiselect(format!( @@ -887,3 +911,31 @@ async fn generate_secret( Ok(()) } + +pub async fn info(config_file: &str) -> Result { + let config = NetworkConfig::from_file(config_file).await?; + let instance = ChainInstance::new(config.clone()).await?; + let chain = Chain::from_str(&instance.name()).unwrap(); + let mut info = ChainInfo { + config: config.clone(), + machines: Vec::new(), + rpc: chain.get_endpoint().map(|x| x.to_string()), + }; + for m in instance.machines().iter() { + let private_keys = + node::retrieve_secret_by_node_name(&config.name, &config.project_id, &m.name).await?; + if let Some(key) = private_keys.first() { + let address = EthereumAddress::from_private_key(&key.value().await?)?; + info.machines.push(MachineChainInfo { + zone: m.zone.to_string(), + name: m.name.to_string(), + external_address: m.external_address.to_string(), + peer_id: address.peer_id, + p2p: format!("/ip4/{0}/udp/3333/quic-v1", m.external_address), + rpc: format!("http://{0}:4201/", m.external_address), + labels: m.labels.clone(), + }) + } + } + Ok(info) +} diff --git a/z2/src/lib.rs b/z2/src/lib.rs index 712576bb3..10f998a16 100644 --- a/z2/src/lib.rs +++ b/z2/src/lib.rs @@ -19,6 +19,7 @@ pub mod scilla; mod secret; pub mod setup; pub mod spout; +pub mod testing; pub mod utils; pub mod validators; pub mod zq1; diff --git a/z2/src/node_spec.rs b/z2/src/node_spec.rs index 7db852608..5017ab5e0 100644 --- a/z2/src/node_spec.rs +++ b/z2/src/node_spec.rs @@ -37,7 +37,7 @@ impl fmt::Display for Composition { } } -fn indices_from_string(input: &str) -> Result> { +pub fn indices_from_string(input: &str) -> Result> { // We support a-b and a,b,c . let components = input.split(','); let mut result = RangeMap::new(); @@ -72,6 +72,10 @@ impl Composition { Ok(Self { nodes }) } + pub fn all_nodes(&self) -> HashSet { + self.nodes.keys().cloned().collect::>() + } + pub fn single_node(is_validator: bool) -> Self { let mut nodes = HashMap::new(); nodes.insert(0, NodeDesc { is_validator }); diff --git a/z2/src/plumbing.rs b/z2/src/plumbing.rs index a874e8c1a..04f06d23e 100644 --- a/z2/src/plumbing.rs +++ b/z2/src/plumbing.rs @@ -15,9 +15,9 @@ use zilliqa::crypto::SecretKey; use crate::{ chain, chain::node::NodeRole, - kpi, + kpi, node_spec, node_spec::{Composition, NodeSpec}, - utils, validators, + testing, utils, validators, }; const DEFAULT_API_URL: &str = "https://api.zq2-devnet.zilliqa.com"; @@ -176,6 +176,7 @@ pub async fn run_deployer_install( node_selection: bool, max_parallel: Option, persistence_url: Option, + machines: &Vec, ) -> Result<()> { println!("🦆 Installing {config_file} .. "); deployer::install_or_upgrade( @@ -184,6 +185,7 @@ pub async fn run_deployer_install( node_selection, max_parallel.unwrap_or(50), persistence_url, + machines, ) .await?; Ok(()) @@ -193,6 +195,7 @@ pub async fn run_deployer_upgrade( config_file: &str, node_selection: bool, max_parallel: Option, + machines: &Vec, ) -> Result<()> { println!("🦆 Upgrading {config_file} .. "); deployer::install_or_upgrade( @@ -201,6 +204,7 @@ pub async fn run_deployer_upgrade( node_selection, max_parallel.unwrap_or(1), None, + machines, ) .await?; Ok(()) @@ -227,6 +231,13 @@ pub async fn run_deployer_deposit(config_file: &str, node_selection: bool) -> Re Ok(()) } +pub async fn run_deployer_info(config_file: &str) -> Result<()> { + println!("🦆 Getting info for {config_file} .. "); + let result = deployer::info(config_file).await?; + println!("{}", &serde_yaml::to_string(&result)?); + Ok(()) +} + pub async fn run_rpc_call( method: &str, params: &Option, @@ -467,3 +478,46 @@ pub async fn generate_docs( )) } } + +pub async fn test( + config_dir: &str, + base_dir: &str, + log_spec: &str, + watch: bool, + rest: &Vec, +) -> Result<()> { + let mut setup_obj = setup::Setup::load(config_dir, log_spec, base_dir, watch).await?; + if rest.is_empty() { + return Err(anyhow!("No test name specified")); + } + let cmd = &rest[0]; + match cmd.as_str() { + "partition" => { + // The rest of the args are partition sets. + let part = testing::Partition::from_args(&rest[1..], &setup_obj)?; + part.run_with(&mut setup_obj).await?; + Ok(()) + } + "graphs" => { + // Get a range and request graphs + let file_name = rest + .get(1) + .ok_or(anyhow!("You must provide a base filename"))?; + let (min_view, max_view) = if let Some(v) = rest.get(2) { + utils::parse_range(v)? + } else { + // Meaning the last 128 please. + (128, 0) + }; + let indices = if let Some(v) = rest.get(3) { + Some(node_spec::indices_from_string(v)?) + } else { + None + }; + + testing::dump_graphs(&setup_obj, file_name, indices, min_view, max_view).await?; + Ok(()) + } + _ => Err(anyhow!(format!("No test type {0}", cmd))), + } +} diff --git a/z2/src/setup.rs b/z2/src/setup.rs index 1e412fd4a..4f9d48d86 100644 --- a/z2/src/setup.rs +++ b/z2/src/setup.rs @@ -1,17 +1,18 @@ -use std::{ - collections::HashMap, - path::{Path, PathBuf}, - str::FromStr, -}; - +use crate::address::EthereumAddress; use alloy::{ primitives::{address, Address}, signers::local::LocalSigner, }; use anyhow::{anyhow, Context, Result}; use k256::ecdsa::SigningKey; +use libp2p::{Multiaddr, PeerId}; use serde::{Deserialize, Serialize}; use serde_yaml; +use std::{ + collections::HashMap, + path::{Path, PathBuf}, + str::FromStr, +}; use tera::Tera; use tokio::fs; use zilliqa::{ @@ -41,6 +42,7 @@ use crate::{ node_spec::Composition, scilla, utils, validators, }; +use core::net::Ipv4Addr; const GENESIS_DEPOSIT: u128 = 10000000000000000000000000; const DATADIR_PREFIX: &str = "z2_node_"; @@ -274,6 +276,21 @@ impl Setup { index + 201 + self.config.base_port + if proxied { 1000 } else { 0 } } + pub fn get_p2p_port(&self, index: u16) -> u16 { + return self.config.base_port + 301 + index + self.config.base_port; + } + + pub fn get_external_addr(&self, index: u16) -> Result { + Ok(format!("/ip4/127.0.0.1/udp/{0}/quic-v1", self.get_p2p_port(index)).parse()?) + } + + pub fn get_json_rpc_url_for_node(&self, node: u64) -> Result { + Ok(format!( + "http://localhost:{}/", + self.get_json_rpc_port(u64::try_into(node)?, false) + )) + } + pub fn get_scilla_port(&self, index: u16) -> u16 { index + self.config.base_port + 500 } @@ -314,6 +331,10 @@ impl Setup { "🦏 JSON-RPC ports are at {0}+\n", self.get_json_rpc_port(0, false) )); + result.push_str(&format!( + "🦏 libp2p ports are at {0}+\n", + self.get_p2p_port(0) + )); result.push_str(&format!( "🦏 Scilla ports are at {0}+\n", self.get_scilla_port(0) @@ -398,6 +419,17 @@ impl Setup { Ok(()) } + pub fn peer_id_for_idx(&self, idx: u64) -> Result { + let secret_key = self + .config + .node_data + .get(&idx) + .ok_or(anyhow!("No node with index {idx}"))? + .secret_key + .clone(); + Ok(EthereumAddress::from_private_key(&secret_key)?.peer_id) + } + pub async fn generate_standalone_config(&self) -> Result<()> { // The genesis deposits. let mut genesis_deposits: Vec = Vec::new(); @@ -430,7 +462,7 @@ impl Setup { address!("cb57ec3f064a16cadb36c7c712f4c9fa62b77415"), zilliqa::cfg::Amount(2u128 * ONE_BILLION * ONE_ETH), ), - // e53d1c3edaffc7a7bab5418eb836cf75819a82872b4a1a0f1c7fcf5c3e020b89 + // privkey dbcfgfa086b92497c8ed5a4cc6edb3a5bfe3a640c43ffb9fc6aa0873c56f2ee3 ( address!("2ce2dbd623b3c277fae4074f0b2605e624510e20"), zilliqa::cfg::Amount(2u128 * ONE_BILLION * ONE_ETH), @@ -485,7 +517,7 @@ impl Setup { .add_raw_template("test_env", include_str!("../resources/test_env.tera.sh"))?; let env_str = test_tera .render("test_env", &context) - .context("whilst rendering test_env.tera.sh")?; + .context("whilst recfgring test_env.tera.sh")?; fs::write(test_env_path, &env_str).await?; } @@ -496,17 +528,25 @@ impl Setup { &self.config_dir ); for (node_index, _node_desc) in self.config.shape.nodes.iter() { + let node_index_as_u16 = u64::try_into(*node_index)?; println!("🎱 Generating configuration for node {node_index}..."); let mut cfg = zilliqa::cfg::Config { otlp_collector_endpoint: Some("http://localhost:4317".to_string()), - bootstrap_address: None, nodes: Vec::new(), - p2p_port: 0, - external_address: None, + p2p_port: self.get_p2p_port(node_index_as_u16), + external_address: Some(self.get_external_addr(node_index_as_u16)?), + bootstrap_address: Some(( + PeerId::from_str(&self.peer_id_for_idx(0)?)?, + self.get_external_addr(0)?, + )), + listening_subnet: Some(( + "127.0.0.1".parse::()?, + "255.255.255.0".parse::()?, + )), }; // @todo should pass this in! let mut node_config = cfg::NodeConfig { - json_rpc_port: self.get_json_rpc_port(u64::try_into(*node_index)?, false), + json_rpc_port: self.get_json_rpc_port(node_index_as_u16, false), allowed_timestamp_skew: allowed_timestamp_skew_default(), data_dir: None, state_cache_size: state_cache_size_default(), @@ -577,10 +617,8 @@ impl Setup { node_config.state_rpc_limit = usize::try_from(i64::MAX)?; node_config.consensus.scilla_stdlib_dir = scilla::Runner::get_scilla_stdlib_dir(&self.base_dir); - cfg.nodes = Vec::new(); cfg.nodes.push(node_config); - cfg.p2p_port = 0; // Now write the config. let config_path = self.get_config_path(*node_index)?; println!("🪅 Writing configuration file for node {0} .. ", node_index); diff --git a/z2/src/testing.rs b/z2/src/testing.rs new file mode 100644 index 000000000..130e5e594 --- /dev/null +++ b/z2/src/testing.rs @@ -0,0 +1,228 @@ +// Code to stress-test z2 networks. +#![allow(unused_imports)] + +use crate::{node_spec, setup::Setup}; +use anyhow::{anyhow, Error, Result}; +use jsonrpsee::core::{client::ClientT, params::ArrayParams}; +use jsonrpsee::http_client::HttpClientBuilder; +use jsonrpsee::rpc_params; +use std::cmp::{Ordering, PartialOrd}; +use std::collections::{BinaryHeap, HashSet}; +use tokio::process::Command; +use tokio::time::{self, Duration, Instant}; +use tower_http::trace::TraceLayer; + +// This is inherently reversed, since BinaryHeap is a max-heap + +// Not very artistic, but it'll do . +#[derive(Debug)] +pub struct PartitionEntry { + pub nodes_to_talk_to: HashSet, + pub nodes_to_tell: HashSet, + pub start_ms: u64, + pub end_ms: u64, +} + +/// A partition test, specified by a list of sets of nodes and time ranges during which we +/// use the admin interface to whitelist nodes to communicate only with other nodes within +/// their partition. +/// It would've been nice to add some view constraints too, but since many of the views will +/// be identical this would be harder to specify than I want for this code. +#[derive(Debug)] +pub struct Partition { + entries: Vec, +} + +#[derive(Debug, Eq, PartialEq)] +struct HeapEntry { + idx: usize, + is_start: bool, + when_ms: u64, +} + +impl Ord for HeapEntry { + fn cmp(&self, other: &Self) -> Ordering { + // Backwards because BinaryHeap is a max-heap. + if self.when_ms > other.when_ms { + Ordering::Less + } else if self.when_ms < other.when_ms { + Ordering::Greater + } else { + Ordering::Equal + } + } +} + +impl PartialOrd for HeapEntry { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +fn nodes_from_spec(spec: &str, setup: &Setup) -> Result> { + Ok(if spec == "all" { + setup + .config + .node_data + .keys() + .cloned() + .collect::>() + } else { + node_spec::indices_from_string(spec)? + }) +} + +impl Partition { + pub fn from_args(args: &[String], setup: &Setup) -> Result { + let mut entries: Vec = Vec::new(); + for arg in args { + // Partitions are specified as a comma-separated range of nodes, followed by ':', followed by nodes to talk to (equal if only one :), followed by a start millisecond, followed by '/', followed by an end millisecond + // eg. 0-1:400/500 + // The special value "all" means all of them. + let fields = arg.split(':').collect::>(); + if fields.len() != 2 && fields.len() != 3 { + return Err(anyhow!( + "Arg '{arg}' has the wrong number of ':'-separated fields - {0} - should be 2", + fields.len() + )); + } + let has_nodes_to_tell = fields.len() == 3; + let nodes_to_talk_to = nodes_from_spec(fields[0], setup)?; + let nodes_to_tell = if has_nodes_to_tell { + nodes_from_spec(fields[1], setup)? + } else { + nodes_to_talk_to.clone() + }; + let times = (if has_nodes_to_tell { + fields[2] + } else { + fields[1] + }) + .split('/') + .collect::>(); + if times.len() != 2 { + return Err(anyhow!("Arg '{arg}' - there must be two times, separated by a '/' after the ':' - found {0}", times.len())); + } + let start_ms = u64::from_str_radix(times[0], 10)?; + let end_ms = u64::from_str_radix(times[1], 10)?; + entries.push(PartitionEntry { + nodes_to_talk_to, + nodes_to_tell, + start_ms, + end_ms, + }); + } + + Ok(Partition { entries }) + } + + pub async fn run_with(&self, setup: &mut Setup) -> Result<()> { + println!("🦒 Running partition test ... "); + // This is fairly easily done with a pair of pqueues of indices (indices because the rust priority queue impl wants + // an equality relation on items). + // I = (index, is_start) + let mut tasks: BinaryHeap = BinaryHeap::new(); + + self.entries.iter().enumerate().for_each(|(idx, val)| { + tasks.push(HeapEntry { + idx, + is_start: true, + when_ms: val.start_ms, + }); + tasks.push(HeapEntry { + idx, + is_start: false, + when_ms: val.end_ms, + }); + }); + + let start_time = Instant::now(); + loop { + let now = Instant::now(); + if let Some(heap_entry) = tasks.pop() { + let event_happens_at = start_time + Duration::from_millis(heap_entry.when_ms); + if event_happens_at > now { + let to_sleep: Duration = event_happens_at - now; + println!( + "🦒🦒 Waiting {:?} for next event (idx = {}, is_start = {})", + to_sleep, heap_entry.idx, heap_entry.is_start + ); + tokio::time::sleep(to_sleep).await; + } + let mut peer_ids: ArrayParams = ArrayParams::new(); + let mut peer_vec: Vec = Vec::new(); + let entry = &self.entries[heap_entry.idx]; + + if heap_entry.is_start { + // List the peer ids of the elements + for peer in entry.nodes_to_talk_to.iter() { + let id = setup.peer_id_for_idx(*peer)?; + peer_ids.insert(id.to_string())?; + peer_vec.push(id); + } + } + // Otherwise, leave empty and we'll remove the partition. + for peer in entry.nodes_to_tell.iter() { + println!("🦩 admin_whitelist to {peer} for {peer_vec:?}"); + let client = HttpClientBuilder::default() + .build(setup.get_json_rpc_url_for_node(*peer)?)?; + + client + .request::<(), ArrayParams>("admin_whitelist", peer_ids.clone()) + .await?; + } + } else { + println!("🐌 All done"); + break; + } + } + Ok(()) + } +} + +pub async fn dump_graphs( + setup: &Setup, + file_name: &str, + indices: Option>, + min_view: u64, + max_view: u64, +) -> Result<()> { + // OK. Request dumps .. + let indices_to_dump = if let Some(v) = indices { + v + } else { + setup.config.shape.all_nodes() + }; + for idx in &indices_to_dump { + let node_fn = format!("{file_name}_{idx:08}"); + println!("🐑 Dumping graphs for index {idx} to /tmp/{node_fn}.dot"); + let client = HttpClientBuilder::default().build(setup.get_json_rpc_url_for_node(*idx)?)?; + let params = rpc_params![node_fn, format!("{min_view}"), format!("{max_view}")]; + client + .request::<(), ArrayParams>("admin_graphs", params) + .await?; + } + for idx in &indices_to_dump { + let node_fn = format!("{file_name}_{idx:08}"); + println!("🐑 Trying to generate SVG in file:///tmp/{node_fn}.svg"); + // Yuck, but test code (and I'm out of time) + let mut cmd = Command::new("sh"); + cmd.arg("-c"); + cmd.arg(format!("dot /tmp/{node_fn}.dot -Tsvg >/tmp/{node_fn}.svg")); + let result = cmd.spawn(); + if let Ok(mut r) = result { + if let Ok(v) = r.wait().await { + if !v.success() { + println!("🐧🐧🐧 dot failed."); + } + } else { + println!("🐧🐧🐧 Couldn't run dot"); + } + } else { + println!("🐧🐧🐧 Couldn't spawn dot!"); + } + } + + println!("🐐 All done. Open your files in chrome!"); + Ok(()) +} diff --git a/z2/src/utils.rs b/z2/src/utils.rs index adeaf84bf..d8cbe0f65 100644 --- a/z2/src/utils.rs +++ b/z2/src/utils.rs @@ -121,3 +121,16 @@ pub fn parse_checkpoint_spec(spec: &str) -> Result { }) } } + +pub fn parse_range(in_val: &str) -> Result<(u64, u64)> { + let mut fields = in_val.split('-'); + let mut min: u64 = 0; + let mut max: u64 = 0; + if let Some(mv) = fields.next() { + min = mv.parse::()?; + } + if let Some(mv) = fields.next() { + max = mv.parse::()?; + } + Ok((min, max)) +} diff --git a/z2/src/validators.rs b/z2/src/validators.rs index 737113e44..1b28e20ad 100644 --- a/z2/src/validators.rs +++ b/z2/src/validators.rs @@ -173,7 +173,8 @@ pub async fn deposit_stake(stake: &StakeDeposit) -> Result<()> { stake.amount, stake.validator.peer_id ); - let network_api = stake.chain_name.get_endpoint().unwrap(); + let network_api = std::env::var("ZQ2_API_URL") + .unwrap_or(stake.chain_name.get_endpoint().unwrap().to_string()); let provider = Provider::::try_from(network_api)?; let chain_id = provider.get_chainid().await?; diff --git a/z2/src/zq2.rs b/z2/src/zq2.rs index 338fb9cf8..f2607a964 100644 --- a/z2/src/zq2.rs +++ b/z2/src/zq2.rs @@ -1,5 +1,6 @@ use anyhow::Result; use futures::future::JoinAll; +use std::env; use tokio::{process::Command, sync::mpsc, task::JoinHandle}; use crate::{ @@ -39,25 +40,33 @@ impl Runner { watch: bool, channel: &mpsc::Sender, ) -> Result { - let mut cmd = Command::new("cargo"); let mut args = Vec::<&str>::new(); - let cargo_cmd = &[ - "run", - "--release", - "--bin", - "zilliqa", - "--", - key, - "--config-file", - config_file, - ]; - let joined = cargo_cmd.join(" ").to_string(); - if watch { - args.extend_from_slice(&["watch", "-x", &joined]) + let prefix_script = env::var("ZQ2_SCRIPT"); + let mut cmd = if let Ok(val) = prefix_script { + let mut cmd = Command::new(val); + cmd.args(["target/release/zilliqa", key, "--config-file", config_file]); + cmd } else { - args.extend_from_slice(cargo_cmd); - } - cmd.args(args); + let mut cmd = Command::new("cargo"); + let cargo_cmd = &[ + "run", + "--release", + "--bin", + "zilliqa", + "--", + key, + "--config-file", + config_file, + ]; + let joined = cargo_cmd.join(" ").to_string(); + if watch { + args.extend_from_slice(&["watch", "-x", &joined]) + } else { + args.extend_from_slice(cargo_cmd); + } + cmd.args(args); + cmd + }; cmd.current_dir(format!("{base_dir}/zq2")); if let Some(val) = debug_spec { cmd.env("RUST_LOG", val); diff --git a/zilliqa/src/api/admin.rs b/zilliqa/src/api/admin.rs index bfe262fbd..fbecf34a1 100644 --- a/zilliqa/src/api/admin.rs +++ b/zilliqa/src/api/admin.rs @@ -1,16 +1,26 @@ //! An administrative API +use core::str::FromStr; use std::sync::{Arc, Mutex}; use alloy::eips::BlockNumberOrTag; use anyhow::{anyhow, Result}; use jsonrpsee::{types::Params, RpcModule}; +use libp2p::PeerId; use serde::{Deserialize, Serialize}; +use tracing::*; use crate::{api::to_hex::ToHex, node::Node}; pub fn rpc_module(node: Arc>) -> RpcModule>> { - super::declare_module!(node, [("admin_generateCheckpoint", checkpoint)]) + super::declare_module!( + node, + [ + ("admin_generateCheckpoint", checkpoint), + ("admin_whitelist", whitelist), + ("admin_graphs", graphs) + ] + ) } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -38,3 +48,40 @@ fn checkpoint(params: Params, node: &Arc>) -> Result>) -> Result<()> { + let mut params = params.sequence(); + let mut the_list: Vec = Vec::new(); + let mut node = node.lock().unwrap(); + while let Some(val) = params.optional_next().unwrap() { + the_list.push(PeerId::from_str(val)?); + } + info!("Set whitelist to {the_list:?}"); + node.director.whitelist(if the_list.is_empty() { + None + } else { + Some(the_list) + })?; + Ok(()) +} + +fn graphs(params: Params<'_>, node: &Arc>) -> Result<()> { + let mut params = params.sequence(); + // Make safe. + let filename = params + .next::() + .unwrap() + .chars() + .filter(|x| *x != '.' && *x != '/') + .collect::(); + let min_view = params.next::()?.parse::()?; + let max_view = params.next::()?.parse::()?; + + let full_path = format!("/tmp/{filename}.dot"); + node.lock() + .unwrap() + .consensus + .dump_graphs(&full_path, min_view, max_view)?; + Ok(()) +} diff --git a/zilliqa/src/api/debug.rs b/zilliqa/src/api/debug.rs new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/zilliqa/src/api/debug.rs @@ -0,0 +1 @@ + diff --git a/zilliqa/src/block_store.rs b/zilliqa/src/block_store.rs index 58b8bddf4..31143a69b 100644 --- a/zilliqa/src/block_store.rs +++ b/zilliqa/src/block_store.rs @@ -2,7 +2,7 @@ use std::{ cmp, collections::{BTreeMap, BTreeSet, HashMap, HashSet}, num::NonZeroUsize, - ops::Range, + ops::{Bound, Range}, sync::{Arc, RwLock}, time::Duration, }; @@ -73,6 +73,8 @@ pub struct BlockCache { /// The head cache - this caches /// An index into the cache by parent hash pub by_parent_hash: HashMap>, + /// By hash, because when restarting we need it. + pub by_hash: HashMap>, /// Set associative shift pub shift: usize, /// This is used to count the number of times we've looked for a fork. @@ -96,6 +98,7 @@ impl BlockCache { head_cache: BTreeMap::new(), empty_view_ranges: RangeMap::new(), by_parent_hash: HashMap::new(), + by_hash: HashMap::new(), shift: 8 - constants::BLOCK_CACHE_LOG2_WAYS, fork_counter: 0, max_blocks_in_flight, @@ -103,6 +106,58 @@ impl BlockCache { } } + // Dump out a graphviz dotfile fragment containing the cache with links. + pub fn illustrate(&self, from_db: &Vec) -> Result { + // Print out the structure of the cache (fairly slowly!). + let mut by_hash: HashMap> = HashMap::new(); + let mut result = String::new(); + fn represent_entry(k: u128, v: &BlockCacheEntry) -> String { + format!( + "v#{0}/b#{1}/k#{2} {3}", + v.proposal.view(), + v.proposal.number(), + k, + v.proposal.hash() + ) + } + + for (key, entry) in self.cache.iter().chain(self.head_cache.iter()) { + let hash = entry.proposal.hash(); + by_hash + .entry(hash) + .or_insert_with(|| HashSet::new()) + .insert(*key); + } + + for (key, entry) in self.cache.iter().chain(self.head_cache.iter()) { + // Dump out the node itself. + result.push_str(&format!( + "c{0}[label=\"C{1}\"]\n", + key, + represent_entry(*key, entry) + )); + // Now find our parent + let parent_hash = entry.proposal.header.qc.block_hash; + if let Some(p_values) = by_hash.get(&parent_hash) { + for p in p_values.iter() { + result.push_str(&format!("c{0} -> c{1}\n", p, key)); + } + } + } + + // now .. + for h in from_db.iter() { + //result.push_str(&format!("h{0}\n", h)); + if let Some(v) = self.by_parent_hash.get(h) { + for p in v.iter() { + result.push_str(&format!("h{0} -> c{1}\n", h, p)); + } + } + } + result.push_str("\n"); + Ok(result) + } + pub fn key_from_view(&self, peer: &PeerId, view_num: u64) -> u128 { let ways = peer.to_bytes().pop().unwrap_or(0x00); u128::from(ways >> self.shift) | (u128::from(view_num) << self.shift) @@ -124,6 +179,109 @@ impl BlockCache { self.views_expecting_proposals.remove(&view); } + /// There is no way to work out which of the blocks that purport to be + /// the block with hash H we actually want, so return any one and + /// if this is a lie, we'll soon find out. + /// cloning proposals is slow, so we'll take the first here. + pub fn first_proposal_by_hash(&self, hash: &Hash) -> Result> { + // Frustratingly, there appears to be no flat_map(). + Ok(self + .by_hash + .get(hash) + .map(|entry_set: &HashSet| { + entry_set + .iter() + .take(1) + .filter_map(|entry_key| { + self.cache + .get(entry_key) + .or(self.head_cache.get(entry_key)) + .map(|entry| entry.proposal.clone()) + }) + .last() + }) + .flatten()) + } + + /// Find any view gaps known to exist in the cache and add them to the known view gap + /// array so that we don't keep re-requesting them. + pub fn find_known_view_gaps(&mut self) { + // Iterate through the list of blocks we have. If we are ever in the situation where + // the next block has the next number, but not the next view, there must be a gap. + // We can safely allow peers to mislead us here, because we will eventually notice + // (when the sync point gets to the gap) and remove the gap. + let mut last_block: u64 = 0; + let mut max_view_for_current_block: u64 = 0; + + // This measures the maximum received view for the next block, based on the intuition that + // view gaps happen with forks. When a fork happens, it will lead to two blocks at the same + // block number but different view numbers (probably missing a view number in the middle). + // we will see that missing view number as a place where we could have a block and attempt to + // fetch the block in it - except that there isn't one. + // the "max_view_for_next_block" heuristic used here effectively means that if a peer advertises + // block N+1 at view V1, we assume that there are no blocks in the views between block N with + // view V0 and block N+1 with view V1. This may not be true, but it is efficient and prevents + // us rerequesting missing views that aren't really missing - it will also slow down sync + // if the peer was lying, because we will not fetch those views ahead, thinking they don't + // exist, until we realise that the N+1/V1 block we thought we had was invalid and we in + // fact needed the "other" fork, which contains blocks in views we never fetched. + // This seems the lesser of the evils at the moment. It may change later! + // - rrw 2024-11-01 + let mut max_view_for_next_block: Option = None; + //trace!("find_known_view_gaps: finding"); + // Avoids a borrow conflict in the loop... + let mut empty_view_ranges = self.empty_view_ranges.clone(); + for (k, v) in self.cache.iter().chain(self.head_cache.iter()) { + let view = self.view_from_key(*k); + let block = v.proposal.number(); + //trace!("find_known_view_gaps: view#{view} blk#{block}"); + if block <= last_block { + // The view at last_block must be at least the view in this entry. + max_view_for_current_block = std::cmp::max(max_view_for_current_block, view) + } else if block == last_block + 1 { + // The view at the next block must be at least the view in this entry + max_view_for_next_block = + Some(max_view_for_next_block.map_or(view, |x| std::cmp::max(x, view))); + } else { + // We're at a higher block now. + if last_block != 0 + && max_view_for_next_block.map_or(false, |x| x > max_view_for_current_block + 1) + { + // There's a gap. + empty_view_ranges.with_range(&Range { + start: max_view_for_current_block + 1, + // Safe because the if stmt above would have failed if this had not been ok. + end: max_view_for_next_block.unwrap(), + }); + } + last_block = block; + max_view_for_current_block = view; + max_view_for_next_block = None; + } + } + self.empty_view_ranges = empty_view_ranges; + } + + /// Useful for finding gaps in view numbers. Returns the next (view_number, block number) + /// we know of, >= the given view. + pub fn get_next_block_number_ge_view(&mut self, view: u64) -> Option<(u64, u64)> { + let key = self.min_key_for_view(view); + let mut cache_iter = self + .cache + .range::((Bound::Included(key), Bound::Unbounded)); + if let Some((_, v)) = cache_iter.next() { + return Some((v.proposal.view(), v.proposal.number())); + } + let mut head_iter = self + .head_cache + .range::((Bound::Included(key), Bound::Unbounded)); + if let Some((_, v)) = head_iter.next() { + return Some((v.proposal.view(), v.proposal.number())); + } else { + None + } + } + /// returns the minimum key (view << shift) that we are prepared to store in the head cache. /// keys smaller than this are stored in the main cache. /// We compute this by subtracting a constant from (highest_known_view +1)<< shift - which is @@ -186,11 +344,30 @@ impl BlockCache { pub fn delete_blocks_up_to(&mut self, block_number: u64) { // note that this code embodies the assumption that increasing block number implies // increasing view number. - self.trim_with_fn(|_, v| -> bool { v.proposal.number() <= block_number }); + self.trim_with_fn(|_, v| -> bool { + if v.proposal.number() <= block_number { + trace!( + "[cache] delete cached block with view#{0}/blk#{1}", + v.proposal.view(), + v.proposal.number() + ); + true + } else { + false + } + }); + } + + pub fn last_safe_view(&self, highest_confirmed_view: u64) -> u64 { + if highest_confirmed_view >= 3 { + highest_confirmed_view - 3 + } else { + 0 + } } pub fn trim(&mut self, highest_confirmed_view: u64) { - let lowest_ignored_key = self.min_key_for_view(highest_confirmed_view); + let lowest_ignored_key = self.min_key_for_view(self.last_safe_view(highest_confirmed_view)); debug!( "before trim {highest_confirmed_view} cache has {0:?} expecting {3:?} with {1}/{2}", self.extant_block_ranges(), @@ -217,7 +394,7 @@ impl BlockCache { /// any selector function which is not monotonic in key will not work properly. fn trim_with_fn bool>(&mut self, selector: F) { // We've deleted or replaced this key with this parent hash; remove it from the index. - fn unlink_parent_hash(cache: &mut HashMap>, key: &u128, hash: &Hash) { + fn unlink_hash(cache: &mut HashMap>, key: &u128, hash: &Hash) { let mut do_remove = false; if let Some(val) = cache.get_mut(hash) { val.remove(key); @@ -241,7 +418,8 @@ impl BlockCache { if selector(k, v) { // Kill it! if let Some((k, v)) = cache_ptr.pop_first() { - unlink_parent_hash(&mut self.by_parent_hash, &k, &v.parent_hash); + unlink_hash(&mut self.by_parent_hash, &k, &v.parent_hash); + unlink_hash(&mut self.by_hash, &k, &v.proposal.hash()); }; } else { let view_number = u64::try_from(*k >> shift).unwrap(); @@ -276,7 +454,8 @@ impl BlockCache { } while self.cache.len() > cache_size { if let Some((k, v)) = self.cache.pop_last() { - unlink_parent_hash(&mut self.by_parent_hash, &k, &v.parent_hash); + unlink_hash(&mut self.by_parent_hash, &k, &v.parent_hash); + unlink_hash(&mut self.by_hash, &k, &v.proposal.hash()); } } // Both caches are now at most the "right" number of entries long. @@ -302,24 +481,24 @@ impl BlockCache { fn insert_with_replacement( into: &mut BTreeMap, by_parent_hash: &mut HashMap>, + by_hash: &mut HashMap>, from: &PeerId, parent_hash: &Hash, key: u128, value: Proposal, ) { + let hash = value.hash(); into.insert(key, BlockCacheEntry::new(*parent_hash, *from, value)) .map(|entry| { by_parent_hash .get_mut(&entry.parent_hash) .map(|x| x.remove(&key)) }); - if let Some(v) = by_parent_hash.get_mut(parent_hash) { - v.insert(key); - } else { - let mut new_set = HashSet::new(); - new_set.insert(key); - by_parent_hash.insert(*parent_hash, new_set); - } + by_parent_hash + .entry(*parent_hash) + .or_insert(HashSet::new()) + .insert(key); + by_hash.entry(hash).or_insert(HashSet::new()).insert(key); } if proposal.header.view <= highest_confirmed_view { @@ -332,6 +511,7 @@ impl BlockCache { insert_with_replacement( &mut self.head_cache, &mut self.by_parent_hash, + &mut self.by_hash, from, parent_hash, key, @@ -347,6 +527,7 @@ impl BlockCache { insert_with_replacement( &mut self.cache, &mut self.by_parent_hash, + &mut self.by_hash, from, parent_hash, key, @@ -355,6 +536,8 @@ impl BlockCache { } // Zero the fork counter. self.fork_counter = 0; + // Find known view gaps + self.find_known_view_gaps(); // Now evict the worst entry self.trim(highest_confirmed_view); Ok(()) @@ -473,6 +656,30 @@ impl PeerInfo { } } + /// Clear any pending requests that contain this view. + /// Note that this is only legitimate because there is exactly one + /// BlockResponse per BlockRequest - otherwise, we would rerequest + /// blocks the peer was about to send us and end up in an awful mess. + fn clear_pending_requests_with_view(&mut self, view: u64) -> Result<()> { + let ids = self + .pending_requests + .iter() + .filter_map(|(k, (_, from, to))| { + if view >= *from && view <= *to { + Some(k) + } else { + None + } + }) + .cloned() + .collect::>(); + trace!("clear_pending_requests_with_view(): remove pending requests for {ids:?}"); + ids.iter().for_each(|v| { + self.pending_requests.remove(v); + }); + Ok(()) + } + /// Do we have availability, or should we get it again? fn have_availability(&self) -> bool { self.availability_updated_at.is_some() @@ -703,6 +910,11 @@ impl BlockStore { peer.availability.highest_known_view = view; } + // Ack any pending requests with this proposal + // (it's fine that other proposals from this response may still be in the queue - + // our accounting for pending proposals will ensure that we don't request them again) + peer.clear_pending_requests_with_view(view)?; + Ok(()) } @@ -771,9 +983,12 @@ impl BlockStore { // the responses and we'll end up being unable to reconstruct the chain. Not strictly true, because // the network will hold some blocks for us, but true enough that I think we ought to treat it as // such. - let to = cmp::min( - current_view + self.max_blocks_in_flight, - self.highest_known_view, + let to = cmp::max( + self.max_blocks_in_flight, + cmp::min( + current_view + self.max_blocks_in_flight, + self.highest_known_view, + ), ); trace!("block_store::request_missing_blocks() : requesting blocks {from} to {to}"); to_request.with_range(&Range { @@ -981,10 +1196,10 @@ impl BlockStore { request, requests ); - // Yay! + // Yay! Note that to_view is inclusive, not exclusive, so we need to subtract 1. let message = ExternalMessage::BlockRequest(BlockRequest { from_view: request.start, - to_view: request.end, + to_view: request.end - 1, }); let request_id = self.message_sender.send_external_message(*peer, message)?; @@ -1050,6 +1265,13 @@ impl BlockStore { self.get_block(hash) } + pub fn get_cached_block_by_hash(&self, hash: &Hash) -> Result> { + Ok(self + .buffered + .first_proposal_by_hash(hash)? + .map(|x| x.into_parts().0)) + } + pub fn get_highest_canonical_block_number(&self) -> Result> { self.db.get_highest_canonical_block_number() } @@ -1149,17 +1371,6 @@ impl BlockStore { // There would be a few easy optimisations if we could eg. assume that forks were max length // 1. As it is, I can't think of a clever way to do this, so... - // In any case, deleting any cached block that calls itself the next block is - // the right thing to do - if it really was the next block, we would not be - // executing this branch. - if let Some(highest_block_number) = self.db.get_highest_canonical_block_number()? { - self.buffered.delete_blocks_up_to(highest_block_number + 1); - trace!( - "block_store::obtain_child_block_candidates : deleted cached blocks up to and including {0}", - highest_block_number + 1 - ); - } - let fork_elems = self.buffered.inc_fork_counter() * (1 + constants::EXAMINE_BLOCKS_PER_FORK_COUNT); let parent_hashes = self.db.get_highest_block_hashes(fork_elems)?; @@ -1172,7 +1383,23 @@ impl BlockStore { if !revised.is_empty() { // Found some! self.buffered.reset_fork_counter(); + } else { + // In any case, deleting any cached block that calls itself the next block is + // the right thing to do - if it really was the next block, we would not be + // executing this branch. + if let Some(highest_block_number) = self.db.get_highest_canonical_block_number()? { + // TR-32: forks are not canonical until the winner is at least two blocks higher + // than the loser. So any chain shorter than highest_block_number - 2 can be disposed + // of. + self.buffered + .delete_blocks_up_to(std::cmp::max(highest_block_number, 3) - 3); + trace!( + "block_store::obtain_child_block_candidates : deleted cached blocks up to and including {0}", + highest_block_number + ); + } } + Ok(revised) } else { Ok(with_parent_hashes) @@ -1229,7 +1456,9 @@ impl BlockStore { } gap_start = gap_end + 1; } - // There's never a gap at the end, because we don't know at which view we stopped. + + // There are various other cases, but find_known_view_gaps() will deal with them. + Ok(()) } @@ -1285,4 +1514,8 @@ impl BlockStore { ))) } } + + pub fn illustrate(&self, from_db: &Vec) -> Result { + self.buffered.illustrate(from_db) + } } diff --git a/zilliqa/src/cfg.rs b/zilliqa/src/cfg.rs index a2fbd7845..d4ace0a9e 100644 --- a/zilliqa/src/cfg.rs +++ b/zilliqa/src/cfg.rs @@ -9,6 +9,7 @@ use crate::{ crypto::{Hash, NodePublicKey}, transaction::EvmGas, }; +use std::net::Ipv4Addr; // Note that z2 constructs instances of this to save as a configuration so it must be both // serializable and deserializable. @@ -34,6 +35,11 @@ pub struct Config { /// The base address of the OTLP collector. If not set, metrics will not be exported. #[serde(default)] pub otlp_collector_endpoint: Option, + /// The subnet on which "real" addresses are found. Set to 0 to ignore. + /// We do this because in z2, at least, and probabliy elsewhere, we need to bypass filtering of private + /// address ranges so that we can get any addresses at all for local services. + /// (addr, mask) + pub listening_subnet: Option<(Ipv4Addr, Ipv4Addr)>, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -139,7 +145,7 @@ pub fn block_request_limit_default() -> usize { } pub fn max_blocks_in_flight_default() -> u64 { - 1000 + 20 } pub fn block_request_batch_size_default() -> u64 { diff --git a/zilliqa/src/consensus.rs b/zilliqa/src/consensus.rs index d6581eebf..4a20c79c2 100644 --- a/zilliqa/src/consensus.rs +++ b/zilliqa/src/consensus.rs @@ -3,6 +3,7 @@ use std::{ collections::{BTreeMap, HashMap}, error::Error, fmt::Display, + ops::Range, sync::Arc, time::Duration, }; @@ -761,7 +762,10 @@ impl Consensus { if view != proposal_view + 1 { view = proposal_view + 1; self.set_view(view)?; - debug!("*** setting view to proposal view... view is now {}", view); + debug!( + "*** setting view to proposal view... view is now {} ({:0x})", + view, view + ); } if let Some(buffered_votes) = self.buffered_votes.remove(&block.hash()) { @@ -3217,6 +3221,52 @@ impl Consensus { self.block_store.report_outgoing_message_failure(failure) } + pub fn dump_graphs(&mut self, file_name: &str, min_view: u64, max_view: u64) -> Result<()> { + let hashes = if max_view == 0 { + if min_view == 0 { + // the last 256 hashes. + self.db.get_highest_block_hashes(256)? + } else { + // from min_view to latest. + let latest = self.db.get_highest_block_view()?; + self.db.get_hashes_for_views(Range { + start: min_view, + end: latest + 1, + })? + } + } else { + // An actual range.. + self.db.get_hashes_for_views(Range { + start: min_view, + end: max_view + 1, + })? + }; + let cache_contents = if max_view == 0 { + self.block_store.illustrate(&hashes)? + } else { + "".to_string() + }; + // Now illustrate the hashes in recent. + let mut recent_blocks = String::new(); + for blk_hash in hashes { + // @todo could be (much) more efficient. + let blk = self + .get_block(&blk_hash)? + .ok_or(anyhow!("No block for hash {0}", blk_hash))?; + let parent_hash = blk.parent_hash(); + recent_blocks.push_str(&format!( + "h{0}[label=\"Bv#{1}/b#{2} {0}\"]\n", + blk.hash(), + blk.view(), + blk.number() + )); + recent_blocks.push_str(&format!("h{0} -> h{1}\n", parent_hash, blk.hash())); + } + let to_write = format!("digraph {{\n{0}\n{1}\n}}", cache_contents, recent_blocks); + std::fs::write(file_name, to_write.as_bytes())?; + Ok(()) + } + pub fn tick(&mut self) -> Result<()> { trace!("consensus::tick()"); trace!("request_missing_blocks from timer"); diff --git a/zilliqa/src/db.rs b/zilliqa/src/db.rs index 4265a5b1f..87969385f 100644 --- a/zilliqa/src/db.rs +++ b/zilliqa/src/db.rs @@ -553,14 +553,33 @@ impl Db { .optional()?) } + pub fn get_highest_block_view(&self) -> Result { + Ok(self.db.lock().unwrap().query_row_and_then( + "select view from blocks order by height desc limit 1", + (), + |row| row.get(0), + )?) + } + + pub fn get_hashes_for_views(&self, views: Range) -> Result> { + Ok(self + .db + .lock() + .unwrap() + .prepare_cached("select block_hash from blocks where view >= ?1 and view < ?2")? + .query_map([views.start, views.end], |row| row.get(0))? + .collect::, _>>()?) + } + pub fn get_highest_block_hashes(&self, how_many: usize) -> Result> { + // Deliberately don't test for is_canonical - we want to include forks. Ok(self .db .lock() - .unwrap() - .prepare_cached( - "select block_hash from blocks where is_canonical = true order by height desc limit ?1")? - .query_map([how_many], |row| row.get(0))?.collect::, _>>()?) + .unwrap() + .prepare_cached("select block_hash from blocks order by height desc limit ?1")? + .query_map([how_many], |row| row.get(0))? + .collect::, _>>()?) } pub fn set_high_qc_with_db_tx( diff --git a/zilliqa/src/director.rs b/zilliqa/src/director.rs new file mode 100644 index 000000000..15e06be45 --- /dev/null +++ b/zilliqa/src/director.rs @@ -0,0 +1,33 @@ +// Director: directs the behaviour of other parts of the system. + +use anyhow::Result; +use libp2p::PeerId; +use std::collections::HashSet; +use tracing::*; + +#[derive(Debug)] +pub struct Director { + // Only talk to these nodes. + whitelist: Option>, +} + +impl Director { + pub fn new() -> Result { + Ok(Self { whitelist: None }) + } + + pub fn whitelist(&mut self, whitelist: Option>) -> Result<()> { + trace!("director: Whitelist set to {whitelist:?}"); + self.whitelist = whitelist.map(|x| x.iter().cloned().collect::>()); + Ok(()) + } + + pub fn is_allowed(&self, id: &str, from: &PeerId) -> Result { + let result = match &self.whitelist { + None => true, + Some(peers) => peers.contains(from), + }; + trace!("director: message {id} from {from:?} is_allowed {result}"); + Ok(result) + } +} diff --git a/zilliqa/src/lib.rs b/zilliqa/src/lib.rs index 9762bc844..e386f5b25 100644 --- a/zilliqa/src/lib.rs +++ b/zilliqa/src/lib.rs @@ -7,6 +7,7 @@ pub mod constants; pub mod contracts; pub mod crypto; pub mod db; +pub mod director; mod error; pub mod exec; mod health; diff --git a/zilliqa/src/node.rs b/zilliqa/src/node.rs index 72cf830e0..1a98a526a 100644 --- a/zilliqa/src/node.rs +++ b/zilliqa/src/node.rs @@ -34,6 +34,7 @@ use crate::{ consensus::Consensus, crypto::{Hash, SecretKey}, db::Db, + director::Director, exec::{PendingState, TransactionApplyResult}, inspector::{self, ScillaInspector}, message::{ @@ -157,6 +158,7 @@ pub struct Node { pub consensus: Consensus, peer_num: Arc, pub chain_id: ChainId, + pub director: Director, } #[derive(Debug, Copy, Clone)] @@ -206,6 +208,7 @@ impl Node { chain_id: ChainId::new(config.eth_chain_id), consensus: Consensus::new(secret_key, config, message_sender, reset_timeout, db)?, peer_num, + director: Director::new()?, }; Ok(node) } @@ -236,6 +239,10 @@ impl Node { response_channel: ResponseChannel, ) -> Result<()> { debug!(%from, to = %self.peer_id, %id, %message, "handling request"); + if !self.director.is_allowed(id, &from)? { + debug!("Ignoring request"); + return Ok(()); + } match message { ExternalMessage::Vote(m) => { if let Some((block, transactions)) = self.consensus.vote(*m)? { @@ -339,12 +346,23 @@ impl Node { failure: OutgoingMessageFailure, ) -> Result<()> { debug!(from = %self.peer_id, %to, ?failure, "handling message failure"); + if !self + .director + .is_allowed(&format!("{:?}", failure.request_id), &failure.peer)? + { + trace!("Ignoring message failure"); + return Ok(()); + } self.consensus.report_outgoing_message_failure(failure)?; Ok(()) } pub fn handle_response(&mut self, from: PeerId, message: ExternalMessage) -> Result<()> { debug!(%from, to = %self.peer_id, %message, "handling response"); + if !self.director.is_allowed("Unknown", &from)? { + trace!("Ignoring response"); + return Ok(()); + } match message { ExternalMessage::BlockResponse(m) => self.handle_block_response(from, m)?, ExternalMessage::Acknowledgement => {} @@ -929,6 +947,7 @@ impl Node { "block_store::handle_block_response - received blocks response of length {}", response.proposals.len() ); + self.consensus .receive_block_availability(from, &response.availability)?; diff --git a/zq2-richard.yaml b/zq2-richard.yaml index 14f1f8877..7492f376c 100644 --- a/zq2-richard.yaml +++ b/zq2-richard.yaml @@ -8,6 +8,6 @@ roles: - apps - checkpoint versions: - zq2: 6e284b5f + zq2: 6baa134d otterscan: develop spout: main From 8ac089e3dca930db2718a535eb85fbbd12834ef2 Mon Sep 17 00:00:00 2001 From: Richard Watts Date: Thu, 14 Nov 2024 16:50:39 +0000 Subject: [PATCH 2/4] (fix) Validation fixes. --- z2/src/deployer.rs | 6 +++--- z2/src/plumbing.rs | 6 +++--- z2/src/setup.rs | 17 +++++++++-------- z2/src/testing.rs | 36 ++++++++++++++++++----------------- z2/src/zq2.rs | 3 ++- zilliqa/src/block_store.rs | 25 +++++++++--------------- zilliqa/src/cfg.rs | 3 +-- zilliqa/src/director.rs | 3 ++- zilliqa/tests/it/consensus.rs | 2 +- zilliqa/tests/it/eth.rs | 2 +- 10 files changed, 50 insertions(+), 53 deletions(-) diff --git a/z2/src/deployer.rs b/z2/src/deployer.rs index f52b2193a..ac95cc85b 100644 --- a/z2/src/deployer.rs +++ b/z2/src/deployer.rs @@ -8,11 +8,11 @@ use tokio::{fs, sync::Semaphore, task}; use crate::{ address::EthereumAddress, - chain::Chain, chain::{ config::NetworkConfig, instance::ChainInstance, node::{self, ChainNode, NodeRole}, + Chain, }, secret::Secret, validators, @@ -63,7 +63,7 @@ pub async fn install_or_upgrade( node_selection: bool, max_parallel: usize, persistence_url: Option, - machines: &Vec, + machines: &[String], ) -> Result<()> { let config = NetworkConfig::from_file(config_file).await?; let mut chain = ChainInstance::new(config).await?; @@ -75,7 +75,7 @@ pub async fn install_or_upgrade( .collect::>(); let selected_machines = if !machines.is_empty() { - machines.clone() + machines.to_owned() } else if !node_selection { node_names } else { diff --git a/z2/src/plumbing.rs b/z2/src/plumbing.rs index 04f06d23e..3c816c1af 100644 --- a/z2/src/plumbing.rs +++ b/z2/src/plumbing.rs @@ -176,7 +176,7 @@ pub async fn run_deployer_install( node_selection: bool, max_parallel: Option, persistence_url: Option, - machines: &Vec, + machines: &[String], ) -> Result<()> { println!("🦆 Installing {config_file} .. "); deployer::install_or_upgrade( @@ -195,7 +195,7 @@ pub async fn run_deployer_upgrade( config_file: &str, node_selection: bool, max_parallel: Option, - machines: &Vec, + machines: &[String], ) -> Result<()> { println!("🦆 Upgrading {config_file} .. "); deployer::install_or_upgrade( @@ -484,7 +484,7 @@ pub async fn test( base_dir: &str, log_spec: &str, watch: bool, - rest: &Vec, + rest: &[String], ) -> Result<()> { let mut setup_obj = setup::Setup::load(config_dir, log_spec, base_dir, watch).await?; if rest.is_empty() { diff --git a/z2/src/setup.rs b/z2/src/setup.rs index 4f9d48d86..e9683b8fe 100644 --- a/z2/src/setup.rs +++ b/z2/src/setup.rs @@ -1,4 +1,10 @@ -use crate::address::EthereumAddress; +use core::net::Ipv4Addr; +use std::{ + collections::HashMap, + path::{Path, PathBuf}, + str::FromStr, +}; + use alloy::{ primitives::{address, Address}, signers::local::LocalSigner, @@ -8,11 +14,6 @@ use k256::ecdsa::SigningKey; use libp2p::{Multiaddr, PeerId}; use serde::{Deserialize, Serialize}; use serde_yaml; -use std::{ - collections::HashMap, - path::{Path, PathBuf}, - str::FromStr, -}; use tera::Tera; use tokio::fs; use zilliqa::{ @@ -36,13 +37,13 @@ use zilliqa::{ }; use crate::{ + address::EthereumAddress, chain, collector::{self, Collector}, components::{Component, Requirements}, node_spec::Composition, scilla, utils, validators, }; -use core::net::Ipv4Addr; const GENESIS_DEPOSIT: u128 = 10000000000000000000000000; const DATADIR_PREFIX: &str = "z2_node_"; @@ -277,7 +278,7 @@ impl Setup { } pub fn get_p2p_port(&self, index: u16) -> u16 { - return self.config.base_port + 301 + index + self.config.base_port; + self.config.base_port + 301 + index + self.config.base_port } pub fn get_external_addr(&self, index: u16) -> Result { diff --git a/z2/src/testing.rs b/z2/src/testing.rs index 130e5e594..9495a769f 100644 --- a/z2/src/testing.rs +++ b/z2/src/testing.rs @@ -1,17 +1,25 @@ // Code to stress-test z2 networks. #![allow(unused_imports)] -use crate::{node_spec, setup::Setup}; +use std::{ + cmp::{Ordering, PartialOrd}, + collections::{BinaryHeap, HashSet}, +}; + use anyhow::{anyhow, Error, Result}; -use jsonrpsee::core::{client::ClientT, params::ArrayParams}; -use jsonrpsee::http_client::HttpClientBuilder; -use jsonrpsee::rpc_params; -use std::cmp::{Ordering, PartialOrd}; -use std::collections::{BinaryHeap, HashSet}; -use tokio::process::Command; -use tokio::time::{self, Duration, Instant}; +use jsonrpsee::{ + core::{client::ClientT, params::ArrayParams}, + http_client::HttpClientBuilder, + rpc_params, +}; +use tokio::{ + process::Command, + time::{self, Duration, Instant}, +}; use tower_http::trace::TraceLayer; +use crate::{node_spec, setup::Setup}; + // This is inherently reversed, since BinaryHeap is a max-heap // Not very artistic, but it'll do . @@ -43,13 +51,7 @@ struct HeapEntry { impl Ord for HeapEntry { fn cmp(&self, other: &Self) -> Ordering { // Backwards because BinaryHeap is a max-heap. - if self.when_ms > other.when_ms { - Ordering::Less - } else if self.when_ms < other.when_ms { - Ordering::Greater - } else { - Ordering::Equal - } + other.when_ms.cmp(&self.when_ms) } } @@ -103,8 +105,8 @@ impl Partition { if times.len() != 2 { return Err(anyhow!("Arg '{arg}' - there must be two times, separated by a '/' after the ':' - found {0}", times.len())); } - let start_ms = u64::from_str_radix(times[0], 10)?; - let end_ms = u64::from_str_radix(times[1], 10)?; + let start_ms = times[0].parse::()?; + let end_ms = times[1].parse::()?; entries.push(PartitionEntry { nodes_to_talk_to, nodes_to_tell, diff --git a/z2/src/zq2.rs b/z2/src/zq2.rs index f2607a964..ce2028619 100644 --- a/z2/src/zq2.rs +++ b/z2/src/zq2.rs @@ -1,6 +1,7 @@ +use std::env; + use anyhow::Result; use futures::future::JoinAll; -use std::env; use tokio::{process::Command, sync::mpsc, task::JoinHandle}; use crate::{ diff --git a/zilliqa/src/block_store.rs b/zilliqa/src/block_store.rs index 31143a69b..e8f16eb3c 100644 --- a/zilliqa/src/block_store.rs +++ b/zilliqa/src/block_store.rs @@ -107,7 +107,7 @@ impl BlockCache { } // Dump out a graphviz dotfile fragment containing the cache with links. - pub fn illustrate(&self, from_db: &Vec) -> Result { + pub fn illustrate(&self, from_db: &[Hash]) -> Result { // Print out the structure of the cache (fairly slowly!). let mut by_hash: HashMap> = HashMap::new(); let mut result = String::new(); @@ -123,10 +123,7 @@ impl BlockCache { for (key, entry) in self.cache.iter().chain(self.head_cache.iter()) { let hash = entry.proposal.hash(); - by_hash - .entry(hash) - .or_insert_with(|| HashSet::new()) - .insert(*key); + by_hash.entry(hash).or_default().insert(*key); } for (key, entry) in self.cache.iter().chain(self.head_cache.iter()) { @@ -154,7 +151,7 @@ impl BlockCache { } } } - result.push_str("\n"); + result.push('\n'); Ok(result) } @@ -188,7 +185,7 @@ impl BlockCache { Ok(self .by_hash .get(hash) - .map(|entry_set: &HashSet| { + .and_then(|entry_set: &HashSet| { entry_set .iter() .take(1) @@ -199,8 +196,7 @@ impl BlockCache { .map(|entry| entry.proposal.clone()) }) .last() - }) - .flatten()) + })) } /// Find any view gaps known to exist in the cache and add them to the known view gap @@ -276,7 +272,7 @@ impl BlockCache { .head_cache .range::((Bound::Included(key), Bound::Unbounded)); if let Some((_, v)) = head_iter.next() { - return Some((v.proposal.view(), v.proposal.number())); + Some((v.proposal.view(), v.proposal.number())) } else { None } @@ -494,11 +490,8 @@ impl BlockCache { .get_mut(&entry.parent_hash) .map(|x| x.remove(&key)) }); - by_parent_hash - .entry(*parent_hash) - .or_insert(HashSet::new()) - .insert(key); - by_hash.entry(hash).or_insert(HashSet::new()).insert(key); + by_parent_hash.entry(*parent_hash).or_default().insert(key); + by_hash.entry(hash).or_default().insert(key); } if proposal.header.view <= highest_confirmed_view { @@ -1515,7 +1508,7 @@ impl BlockStore { } } - pub fn illustrate(&self, from_db: &Vec) -> Result { + pub fn illustrate(&self, from_db: &[Hash]) -> Result { self.buffered.illustrate(from_db) } } diff --git a/zilliqa/src/cfg.rs b/zilliqa/src/cfg.rs index d4ace0a9e..c0546c331 100644 --- a/zilliqa/src/cfg.rs +++ b/zilliqa/src/cfg.rs @@ -1,4 +1,4 @@ -use std::{ops::Deref, str::FromStr, time::Duration}; +use std::{net::Ipv4Addr, ops::Deref, str::FromStr, time::Duration}; use alloy::primitives::Address; use libp2p::{Multiaddr, PeerId}; @@ -9,7 +9,6 @@ use crate::{ crypto::{Hash, NodePublicKey}, transaction::EvmGas, }; -use std::net::Ipv4Addr; // Note that z2 constructs instances of this to save as a configuration so it must be both // serializable and deserializable. diff --git a/zilliqa/src/director.rs b/zilliqa/src/director.rs index 15e06be45..c324990cb 100644 --- a/zilliqa/src/director.rs +++ b/zilliqa/src/director.rs @@ -1,8 +1,9 @@ // Director: directs the behaviour of other parts of the system. +use std::collections::HashSet; + use anyhow::Result; use libp2p::PeerId; -use std::collections::HashSet; use tracing::*; #[derive(Debug)] diff --git a/zilliqa/tests/it/consensus.rs b/zilliqa/tests/it/consensus.rs index 9e2c6099a..9764f665b 100644 --- a/zilliqa/tests/it/consensus.rs +++ b/zilliqa/tests/it/consensus.rs @@ -99,7 +99,7 @@ async fn block_production(mut network: Network) { .map_or(0, |b| b.number()) >= 5 }, - 50, + 100, ) .await .unwrap(); diff --git a/zilliqa/tests/it/eth.rs b/zilliqa/tests/it/eth.rs index 54d55ed00..73750d05b 100644 --- a/zilliqa/tests/it/eth.rs +++ b/zilliqa/tests/it/eth.rs @@ -1427,7 +1427,7 @@ async fn test_eth_syncing(mut network: Network) { network .run_until_async( || async { wallet.get_block_number().await.unwrap().as_u64() > 4 }, - 50, + 100, ) .await .unwrap(); From 05f0d05ae6cd832f3affa9995234f5f17116e4fb Mon Sep 17 00:00:00 2001 From: Richard Watts Date: Thu, 14 Nov 2024 20:53:25 +0000 Subject: [PATCH 3/4] (fix) Attempt to fix Cargo.lock --- Cargo.lock | 289 ++++++++++++++--------------------------------------- 1 file changed, 77 insertions(+), 212 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 796c95933..570df8119 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4044,18 +4044,16 @@ dependencies = [ [[package]] name = "igd-next" -version = "0.15.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76b0d7d4541def58a37bf8efc559683f21edce7c82f0d866c93ac21f7e098f93" +checksum = "064d90fec10d541084e7b39ead8875a5a80d9114a2b18791565253bae25f49e4" dependencies = [ "async-trait", "attohttpc", "bytes", "futures", - "http 1.1.0", - "http-body-util", - "hyper 1.5.0", - "hyper-util", + "http 0.2.12", + "hyper 0.14.30", "log", "rand", "tokio", @@ -4703,55 +4701,33 @@ checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libp2p" version = "0.54.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbbe80f9c7e00526cd6b838075b9c171919404a4732cb2fa8ece0a093223bfc4" dependencies = [ "bytes", "either", "futures", "futures-timer", "getrandom", - "libp2p-allow-block-list 0.4.1", + "libp2p-allow-block-list", "libp2p-autonat", - "libp2p-connection-limits 0.4.0", - "libp2p-core 0.42.0", + "libp2p-connection-limits", + "libp2p-core", "libp2p-dns", "libp2p-gossipsub", - "libp2p-identify 0.45.1", + "libp2p-identify", "libp2p-identity", "libp2p-kad", "libp2p-mdns", - "libp2p-metrics 0.15.0", + "libp2p-metrics", "libp2p-quic", "libp2p-request-response", - "libp2p-swarm 0.45.2", + "libp2p-swarm", "libp2p-tcp", "libp2p-upnp", "multiaddr", "pin-project", - "rw-stream-sink 0.4.0", - "thiserror 1.0.69", -] - -[[package]] -name = "libp2p" -version = "0.54.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbbe80f9c7e00526cd6b838075b9c171919404a4732cb2fa8ece0a093223bfc4" -dependencies = [ - "bytes", - "either", - "futures", - "futures-timer", - "getrandom", - "libp2p-allow-block-list 0.4.0", - "libp2p-connection-limits 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-identify 0.45.0", - "libp2p-identity", - "libp2p-metrics 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-swarm 0.45.1", - "multiaddr", - "pin-project", - "rw-stream-sink 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rw-stream-sink", "thiserror 1.0.69", ] @@ -4761,25 +4737,17 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1027ccf8d70320ed77e984f273bc8ce952f623762cb9bf2d126df73caef8041" dependencies = [ - "libp2p-core 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", - "void", -] - -[[package]] -name = "libp2p-allow-block-list" -version = "0.4.1" -dependencies = [ - "libp2p-core 0.42.0", - "libp2p-identity", - "libp2p-swarm 0.45.2", + "libp2p-swarm", "void", ] [[package]] name = "libp2p-autonat" -version = "0.13.1" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a083675f189803d0682a2726131628e808144911dad076858bfbe30b13065499" dependencies = [ "async-trait", "asynchronous-codec", @@ -4788,12 +4756,12 @@ dependencies = [ "futures", "futures-bounded", "futures-timer", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "libp2p-request-response", - "libp2p-swarm 0.45.2", + "libp2p-swarm", "quick-protobuf", - "quick-protobuf-codec 0.3.1", + "quick-protobuf-codec", "rand", "rand_core", "thiserror 1.0.69", @@ -4802,53 +4770,16 @@ dependencies = [ "web-time", ] -[[package]] -name = "libp2p-connection-limits" -version = "0.4.0" -dependencies = [ - "libp2p-core 0.42.0", - "libp2p-identity", - "libp2p-swarm 0.45.2", - "void", -] - [[package]] name = "libp2p-connection-limits" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d003540ee8baef0d254f7b6bfd79bac3ddf774662ca0abf69186d517ef82ad8" dependencies = [ - "libp2p-core 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.1", - "void", -] - -[[package]] -name = "libp2p-core" -version = "0.42.0" -dependencies = [ - "either", - "fnv", - "futures", - "futures-timer", - "libp2p-identity", - "multiaddr", - "multihash", - "multistream-select 0.13.0", - "once_cell", - "parking_lot 0.12.3", - "pin-project", - "quick-protobuf", - "rand", - "rw-stream-sink 0.4.0", - "serde", - "smallvec", - "thiserror 1.0.69", - "tracing", - "unsigned-varint 0.8.0", + "libp2p-swarm", "void", - "web-time", ] [[package]] @@ -4864,13 +4795,14 @@ dependencies = [ "libp2p-identity", "multiaddr", "multihash", - "multistream-select 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", + "multistream-select", "once_cell", "parking_lot 0.12.3", "pin-project", "quick-protobuf", "rand", - "rw-stream-sink 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rw-stream-sink", + "serde", "smallvec", "thiserror 1.0.69", "tracing", @@ -4882,11 +4814,13 @@ dependencies = [ [[package]] name = "libp2p-dns" version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97f37f30d5c7275db282ecd86e54f29dd2176bd3ac656f06abf43bedb21eb8bd" dependencies = [ "async-trait", "futures", "hickory-resolver", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "parking_lot 0.12.3", "smallvec", @@ -4895,7 +4829,9 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.47.1" +version = "0.47.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4e830fdf24ac8c444c12415903174d506e1e077fbe3875c404a78c5935a8543" dependencies = [ "asynchronous-codec", "base64 0.22.1", @@ -4907,12 +4843,12 @@ dependencies = [ "futures-ticker", "getrandom", "hex_fmt", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.2", + "libp2p-swarm", "prometheus-client", "quick-protobuf", - "quick-protobuf-codec 0.3.1", + "quick-protobuf-codec", "rand", "regex", "serde", @@ -4934,33 +4870,12 @@ dependencies = [ "futures", "futures-bounded", "futures-timer", - "libp2p-core 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-identity", - "libp2p-swarm 0.45.1", - "lru", - "quick-protobuf", - "quick-protobuf-codec 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec", - "thiserror 1.0.69", - "tracing", - "void", -] - -[[package]] -name = "libp2p-identify" -version = "0.45.1" -dependencies = [ - "asynchronous-codec", - "either", - "futures", - "futures-bounded", - "futures-timer", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.2", + "libp2p-swarm", "lru", "quick-protobuf", - "quick-protobuf-codec 0.3.1", + "quick-protobuf-codec", "smallvec", "thiserror 1.0.69", "tracing", @@ -4988,7 +4903,9 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.47.0" +version = "0.46.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced237d0bd84bbebb7c2cad4c073160dacb4fe40534963c32ed6d4c6bb7702a3" dependencies = [ "arrayvec", "asynchronous-codec", @@ -4998,11 +4915,11 @@ dependencies = [ "futures", "futures-bounded", "futures-timer", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.2", + "libp2p-swarm", "quick-protobuf", - "quick-protobuf-codec 0.3.1", + "quick-protobuf-codec", "rand", "serde", "sha2", @@ -5017,14 +4934,16 @@ dependencies = [ [[package]] name = "libp2p-mdns" version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14b8546b6644032565eb29046b42744aee1e9f261ed99671b2c93fb140dba417" dependencies = [ "data-encoding", "futures", "hickory-proto", "if-watch", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.2", + "libp2p-swarm", "rand", "smallvec", "socket2", @@ -5033,22 +4952,6 @@ dependencies = [ "void", ] -[[package]] -name = "libp2p-metrics" -version = "0.15.0" -dependencies = [ - "futures", - "libp2p-core 0.42.0", - "libp2p-gossipsub", - "libp2p-identify 0.45.1", - "libp2p-identity", - "libp2p-kad", - "libp2p-swarm 0.45.2", - "pin-project", - "prometheus-client", - "web-time", -] - [[package]] name = "libp2p-metrics" version = "0.15.0" @@ -5056,10 +4959,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77ebafa94a717c8442d8db8d3ae5d1c6a15e30f2d347e0cd31d057ca72e42566" dependencies = [ "futures", - "libp2p-core 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-identify 0.45.0", + "libp2p-core", + "libp2p-gossipsub", + "libp2p-identify", "libp2p-identity", - "libp2p-swarm 0.45.1", + "libp2p-kad", + "libp2p-swarm", "pin-project", "prometheus-client", "web-time", @@ -5068,12 +4973,14 @@ dependencies = [ [[package]] name = "libp2p-quic" version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46352ac5cd040c70e88e7ff8257a2ae2f891a4076abad2c439584a31c15fd24e" dependencies = [ "bytes", "futures", "futures-timer", "if-watch", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "libp2p-tls", "parking_lot 0.12.3", @@ -5090,15 +4997,17 @@ dependencies = [ [[package]] name = "libp2p-request-response" version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1356c9e376a94a75ae830c42cdaea3d4fe1290ba409a22c809033d1b7dcab0a6" dependencies = [ "async-trait", "cbor4ii", "futures", "futures-bounded", "futures-timer", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.45.2", + "libp2p-swarm", "rand", "serde", "smallvec", @@ -5117,31 +5026,11 @@ dependencies = [ "fnv", "futures", "futures-timer", - "libp2p-core 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-identity", - "lru", - "multistream-select 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", - "once_cell", - "rand", - "smallvec", - "tracing", - "void", - "web-time", -] - -[[package]] -name = "libp2p-swarm" -version = "0.45.2" -dependencies = [ - "either", - "fnv", - "futures", - "futures-timer", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "libp2p-swarm-derive", "lru", - "multistream-select 0.13.0", + "multistream-select", "once_cell", "rand", "smallvec", @@ -5154,6 +5043,8 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "206e0aa0ebe004d778d79fb0966aa0de996c19894e2c0605ba2f8524dd4443d8" dependencies = [ "heck 0.5.0", "proc-macro2", @@ -5164,12 +5055,14 @@ dependencies = [ [[package]] name = "libp2p-tcp" version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad964f312c59dcfcac840acd8c555de8403e295d39edf96f5240048b5fcaa314" dependencies = [ "futures", "futures-timer", "if-watch", "libc", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "socket2", "tokio", @@ -5179,10 +5072,12 @@ dependencies = [ [[package]] name = "libp2p-tls" version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b23dddc2b9c355f73c1e36eb0c3ae86f7dc964a3715f0731cfad352db4d847" dependencies = [ "futures", "futures-rustls", - "libp2p-core 0.42.0", + "libp2p-core", "libp2p-identity", "rcgen", "ring 0.17.8", @@ -5195,13 +5090,15 @@ dependencies = [ [[package]] name = "libp2p-upnp" -version = "0.3.1" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01bf2d1b772bd3abca049214a3304615e6a36fa6ffc742bdd1ba774486200b8f" dependencies = [ "futures", "futures-timer", "igd-next", - "libp2p-core 0.42.0", - "libp2p-swarm 0.45.2", + "libp2p-core", + "libp2p-swarm", "tokio", "tracing", "void", @@ -5484,18 +5381,6 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" -[[package]] -name = "multistream-select" -version = "0.13.0" -dependencies = [ - "bytes", - "futures", - "pin-project", - "smallvec", - "tracing", - "unsigned-varint 0.8.0", -] - [[package]] name = "multistream-select" version = "0.13.0" @@ -6619,17 +6504,6 @@ dependencies = [ "byteorder", ] -[[package]] -name = "quick-protobuf-codec" -version = "0.3.1" -dependencies = [ - "asynchronous-codec", - "bytes", - "quick-protobuf", - "thiserror 1.0.69", - "unsigned-varint 0.8.0", -] - [[package]] name = "quick-protobuf-codec" version = "0.3.1" @@ -7441,15 +7315,6 @@ dependencies = [ "wait-timeout", ] -[[package]] -name = "rw-stream-sink" -version = "0.4.0" -dependencies = [ - "futures", - "pin-project", - "static_assertions", -] - [[package]] name = "rw-stream-sink" version = "0.4.0" @@ -9807,7 +9672,7 @@ dependencies = [ "jsonrpsee 0.22.5", "k256", "lazy_static", - "libp2p 0.54.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libp2p", "log", "octocrab", "primitive-types", @@ -9958,7 +9823,7 @@ dependencies = [ "itertools 0.13.0", "jsonrpsee 0.24.4", "k256", - "libp2p 0.54.1", + "libp2p", "lru", "lru-mem", "lz4", From eae3861e486fd0e7d92701aeef6637c9fbf44349 Mon Sep 17 00:00:00 2001 From: Richard Watts Date: Tue, 19 Nov 2024 16:40:38 +0000 Subject: [PATCH 4/4] (feat) Allow whitelisted local addresses, so that local z2 networks can communicate --- zilliqa/src/p2p_node.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/zilliqa/src/p2p_node.rs b/zilliqa/src/p2p_node.rs index 1c1cedbcf..0893c9f62 100644 --- a/zilliqa/src/p2p_node.rs +++ b/zilliqa/src/p2p_node.rs @@ -3,6 +3,7 @@ use std::{ collections::HashMap, iter, + ops::BitAnd, sync::{atomic::AtomicUsize, Arc}, time::Duration, }; @@ -252,7 +253,17 @@ impl P2pNode { for addr in listen_addrs { // If the node is advertising a non-global address, ignore it. let is_non_global = addr.iter().any(|p| match p { - Protocol::Ip4(addr) => addr.is_loopback() || addr.is_private(), + Protocol::Ip4(addr) => { + if let Some((net, mask)) = self.config.listening_subnet { + let left = net.bitand(mask); + let right = addr.bitand(mask); + if left == right { + info!("p2p_identify: {addr:?} is in the listening subnet - passing"); + return false; + } + } + addr.is_loopback() || addr.is_private() + }, Protocol::Ip6(addr) => addr.is_loopback(), _ => false, });