From b14537fac0c69505e64743dba8270c06fa1f11f5 Mon Sep 17 00:00:00 2001 From: D-Stacks <78099568+D-Stacks@users.noreply.github.com> Date: Thu, 19 Sep 2024 22:16:54 +0200 Subject: [PATCH 01/31] `virtual chain from block` batching. (#454) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * expose vspc_from_block batching possibilities to rpc. * fmt * limit by merged blocks, set source as def. start. * small clean-up * fmt * actually bound by num of merged blocks, in include transactions case. * fmt * update. * update_2 * new_high = high * remove high hash in consensus api.. as it is not required. * fmt * make proto comment more accurate. * fix tests, and lints, add to ser / der correctly. * change two freq warns to debug * remove option, default to pp. not source. * fix integration test, some Option left. * bump version: ´0.15.1 => 0.15.2` * remove "optional" startHash * add to cli rpc.rs * remove comment. * edit comment in .proto referencing def. startHash behavior. * only batch added chain blocks, not removed, add check if source is a chain ancestor of high. * remove dangling code in comment * remove error from some prev. commit. * Optionalize limts. --------- Co-authored-by: Michael Sutton --- Cargo.lock | 116 +++++++++--------- Cargo.toml | 112 ++++++++--------- cli/src/modules/rpc.rs | 18 ++- components/consensusmanager/src/session.rs | 16 ++- consensus/core/src/api/mod.rs | 13 +- consensus/src/consensus/mod.rs | 50 ++++++-- .../pipeline/virtual_processor/processor.rs | 2 +- consensus/src/processes/sync/mod.rs | 2 +- consensus/src/processes/traversal_manager.rs | 19 ++- mining/src/mempool/remove_transaction.rs | 6 +- rpc/grpc/core/proto/rpc.proto | 9 +- rpc/service/src/converter/consensus.rs | 3 +- rpc/service/src/service.rs | 21 +++- .../src/daemon_integration_tests.rs | 8 +- 14 files changed, 247 insertions(+), 148 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7f23aca0f3..1296a8922d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2363,7 +2363,7 @@ dependencies = [ [[package]] name = "kaspa-addresses" -version = "0.15.1" +version = "0.15.2" dependencies = [ "borsh", "criterion", @@ -2380,7 +2380,7 @@ dependencies = [ [[package]] name = "kaspa-addressmanager" -version = "0.15.1" +version = "0.15.2" dependencies = [ "borsh", "igd-next", @@ -2403,14 +2403,14 @@ dependencies = [ [[package]] name = "kaspa-alloc" -version = "0.15.1" +version = "0.15.2" dependencies = [ "mimalloc", ] [[package]] name = "kaspa-bip32" -version = "0.15.1" +version = "0.15.2" dependencies = [ "borsh", "bs58", @@ -2437,7 +2437,7 @@ dependencies = [ [[package]] name = "kaspa-cli" -version = "0.15.1" +version = "0.15.2" dependencies = [ "async-trait", "borsh", @@ -2484,7 +2484,7 @@ dependencies = [ [[package]] name = "kaspa-connectionmanager" -version = "0.15.1" +version = "0.15.2" dependencies = [ "duration-string", "futures-util", @@ -2501,7 +2501,7 @@ dependencies = [ [[package]] name = "kaspa-consensus" -version = "0.15.1" +version = "0.15.2" dependencies = [ "arc-swap", "async-channel 2.3.1", @@ -2544,7 +2544,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-client" -version = "0.15.1" +version = "0.15.2" dependencies = [ "ahash", "cfg-if 1.0.0", @@ -2572,7 +2572,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-core" -version = "0.15.1" +version = "0.15.2" dependencies = [ "async-trait", "bincode", @@ -2610,7 +2610,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-notify" -version = "0.15.1" +version = "0.15.2" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", @@ -2629,7 +2629,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-wasm" -version = "0.15.1" +version = "0.15.2" dependencies = [ "cfg-if 1.0.0", "faster-hex", @@ -2653,7 +2653,7 @@ dependencies = [ [[package]] name = "kaspa-consensusmanager" -version = "0.15.1" +version = "0.15.2" dependencies = [ "duration-string", "futures", @@ -2671,7 +2671,7 @@ dependencies = [ [[package]] name = "kaspa-core" -version = "0.15.1" +version = "0.15.2" dependencies = [ "cfg-if 1.0.0", "ctrlc", @@ -2689,7 +2689,7 @@ dependencies = [ [[package]] name = "kaspa-daemon" -version = "0.15.1" +version = "0.15.2" dependencies = [ "async-trait", "borsh", @@ -2711,7 +2711,7 @@ dependencies = [ [[package]] name = "kaspa-database" -version = "0.15.1" +version = "0.15.2" dependencies = [ "bincode", "enum-primitive-derive", @@ -2733,7 +2733,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-client" -version = "0.15.1" +version = "0.15.2" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2765,7 +2765,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-core" -version = "0.15.1" +version = "0.15.2" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2794,7 +2794,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-server" -version = "0.15.1" +version = "0.15.2" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2830,7 +2830,7 @@ dependencies = [ [[package]] name = "kaspa-hashes" -version = "0.15.1" +version = "0.15.2" dependencies = [ "blake2b_simd", "borsh", @@ -2851,7 +2851,7 @@ dependencies = [ [[package]] name = "kaspa-index-core" -version = "0.15.1" +version = "0.15.2" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -2870,7 +2870,7 @@ dependencies = [ [[package]] name = "kaspa-index-processor" -version = "0.15.1" +version = "0.15.2" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -2898,7 +2898,7 @@ dependencies = [ [[package]] name = "kaspa-math" -version = "0.15.1" +version = "0.15.2" dependencies = [ "borsh", "criterion", @@ -2919,14 +2919,14 @@ dependencies = [ [[package]] name = "kaspa-merkle" -version = "0.15.1" +version = "0.15.2" dependencies = [ "kaspa-hashes", ] [[package]] name = "kaspa-metrics-core" -version = "0.15.1" +version = "0.15.2" dependencies = [ "async-trait", "borsh", @@ -2942,7 +2942,7 @@ dependencies = [ [[package]] name = "kaspa-mining" -version = "0.15.1" +version = "0.15.2" dependencies = [ "criterion", "futures-util", @@ -2969,7 +2969,7 @@ dependencies = [ [[package]] name = "kaspa-mining-errors" -version = "0.15.1" +version = "0.15.2" dependencies = [ "kaspa-consensus-core", "thiserror", @@ -2977,7 +2977,7 @@ dependencies = [ [[package]] name = "kaspa-muhash" -version = "0.15.1" +version = "0.15.2" dependencies = [ "criterion", "kaspa-hashes", @@ -2990,7 +2990,7 @@ dependencies = [ [[package]] name = "kaspa-notify" -version = "0.15.1" +version = "0.15.2" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -3026,7 +3026,7 @@ dependencies = [ [[package]] name = "kaspa-p2p-flows" -version = "0.15.1" +version = "0.15.2" dependencies = [ "async-trait", "chrono", @@ -3057,7 +3057,7 @@ dependencies = [ [[package]] name = "kaspa-p2p-lib" -version = "0.15.1" +version = "0.15.2" dependencies = [ "borsh", "ctrlc", @@ -3088,7 +3088,7 @@ dependencies = [ [[package]] name = "kaspa-perf-monitor" -version = "0.15.1" +version = "0.15.2" dependencies = [ "kaspa-core", "log", @@ -3100,7 +3100,7 @@ dependencies = [ [[package]] name = "kaspa-pow" -version = "0.15.1" +version = "0.15.2" dependencies = [ "criterion", "js-sys", @@ -3116,7 +3116,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-core" -version = "0.15.1" +version = "0.15.2" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -3158,7 +3158,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-macros" -version = "0.15.1" +version = "0.15.2" dependencies = [ "convert_case 0.6.0", "proc-macro-error", @@ -3170,7 +3170,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-service" -version = "0.15.1" +version = "0.15.2" dependencies = [ "async-trait", "kaspa-addresses", @@ -3199,7 +3199,7 @@ dependencies = [ [[package]] name = "kaspa-testing-integration" -version = "0.15.1" +version = "0.15.2" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -3259,7 +3259,7 @@ dependencies = [ [[package]] name = "kaspa-txscript" -version = "0.15.1" +version = "0.15.2" dependencies = [ "blake2b_simd", "borsh", @@ -3291,7 +3291,7 @@ dependencies = [ [[package]] name = "kaspa-txscript-errors" -version = "0.15.1" +version = "0.15.2" dependencies = [ "secp256k1", "thiserror", @@ -3299,7 +3299,7 @@ dependencies = [ [[package]] name = "kaspa-utils" -version = "0.15.1" +version = "0.15.2" dependencies = [ "arc-swap", "async-channel 2.3.1", @@ -3335,7 +3335,7 @@ dependencies = [ [[package]] name = "kaspa-utils-tower" -version = "0.15.1" +version = "0.15.2" dependencies = [ "cfg-if 1.0.0", "futures", @@ -3349,7 +3349,7 @@ dependencies = [ [[package]] name = "kaspa-utxoindex" -version = "0.15.1" +version = "0.15.2" dependencies = [ "futures", "kaspa-consensus", @@ -3370,7 +3370,7 @@ dependencies = [ [[package]] name = "kaspa-wallet" -version = "0.15.1" +version = "0.15.2" dependencies = [ "async-std", "async-trait", @@ -3382,7 +3382,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-cli-wasm" -version = "0.15.1" +version = "0.15.2" dependencies = [ "async-trait", "js-sys", @@ -3396,7 +3396,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-core" -version = "0.15.1" +version = "0.15.2" dependencies = [ "aes", "ahash", @@ -3477,7 +3477,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-keys" -version = "0.15.1" +version = "0.15.2" dependencies = [ "async-trait", "borsh", @@ -3510,7 +3510,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-macros" -version = "0.15.1" +version = "0.15.2" dependencies = [ "convert_case 0.5.0", "proc-macro-error", @@ -3523,7 +3523,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-pskt" -version = "0.15.1" +version = "0.15.2" dependencies = [ "bincode", "derive_builder", @@ -3550,7 +3550,7 @@ dependencies = [ [[package]] name = "kaspa-wasm" -version = "0.15.1" +version = "0.15.2" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -3578,7 +3578,7 @@ dependencies = [ [[package]] name = "kaspa-wasm-core" -version = "0.15.1" +version = "0.15.2" dependencies = [ "faster-hex", "hexplay", @@ -3589,7 +3589,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-client" -version = "0.15.1" +version = "0.15.2" dependencies = [ "async-std", "async-trait", @@ -3625,7 +3625,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-example-subscriber" -version = "0.15.1" +version = "0.15.2" dependencies = [ "ctrlc", "futures", @@ -3640,7 +3640,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-proxy" -version = "0.15.1" +version = "0.15.2" dependencies = [ "async-trait", "clap 4.5.16", @@ -3659,7 +3659,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-server" -version = "0.15.1" +version = "0.15.2" dependencies = [ "async-trait", "borsh", @@ -3687,7 +3687,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-simple-client-example" -version = "0.15.1" +version = "0.15.2" dependencies = [ "futures", "kaspa-rpc-core", @@ -3697,7 +3697,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-wasm" -version = "0.15.1" +version = "0.15.2" dependencies = [ "ahash", "async-std", @@ -3727,7 +3727,7 @@ dependencies = [ [[package]] name = "kaspad" -version = "0.15.1" +version = "0.15.2" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", @@ -5151,7 +5151,7 @@ dependencies = [ [[package]] name = "rothschild" -version = "0.15.1" +version = "0.15.2" dependencies = [ "async-channel 2.3.1", "clap 4.5.16", @@ -5603,7 +5603,7 @@ dependencies = [ [[package]] name = "simpa" -version = "0.15.1" +version = "0.15.2" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", diff --git a/Cargo.toml b/Cargo.toml index eaf07936e6..37acfb1729 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -63,7 +63,7 @@ members = [ [workspace.package] rust-version = "1.81.0" -version = "0.15.1" +version = "0.15.2" authors = ["Kaspa developers"] license = "ISC" repository = "https://github.com/kaspanet/rusty-kaspa" @@ -80,61 +80,61 @@ include = [ ] [workspace.dependencies] -# kaspa-testing-integration = { version = "0.15.1", path = "testing/integration" } -kaspa-addresses = { version = "0.15.1", path = "crypto/addresses" } -kaspa-addressmanager = { version = "0.15.1", path = "components/addressmanager" } -kaspa-bip32 = { version = "0.15.1", path = "wallet/bip32" } -kaspa-cli = { version = "0.15.1", path = "cli" } -kaspa-connectionmanager = { version = "0.15.1", path = "components/connectionmanager" } -kaspa-consensus = { version = "0.15.1", path = "consensus" } -kaspa-consensus-core = { version = "0.15.1", path = "consensus/core" } -kaspa-consensus-client = { version = "0.15.1", path = "consensus/client" } -kaspa-consensus-notify = { version = "0.15.1", path = "consensus/notify" } -kaspa-consensus-wasm = { version = "0.15.1", path = "consensus/wasm" } -kaspa-consensusmanager = { version = "0.15.1", path = "components/consensusmanager" } -kaspa-core = { version = "0.15.1", path = "core" } -kaspa-daemon = { version = "0.15.1", path = "daemon" } -kaspa-database = { version = "0.15.1", path = "database" } -kaspa-grpc-client = { version = "0.15.1", path = "rpc/grpc/client" } -kaspa-grpc-core = { version = "0.15.1", path = "rpc/grpc/core" } -kaspa-grpc-server = { version = "0.15.1", path = "rpc/grpc/server" } -kaspa-hashes = { version = "0.15.1", path = "crypto/hashes" } -kaspa-index-core = { version = "0.15.1", path = "indexes/core" } -kaspa-index-processor = { version = "0.15.1", path = "indexes/processor" } -kaspa-math = { version = "0.15.1", path = "math" } -kaspa-merkle = { version = "0.15.1", path = "crypto/merkle" } -kaspa-metrics-core = { version = "0.15.1", path = "metrics/core" } -kaspa-mining = { version = "0.15.1", path = "mining" } -kaspa-mining-errors = { version = "0.15.1", path = "mining/errors" } -kaspa-muhash = { version = "0.15.1", path = "crypto/muhash" } -kaspa-notify = { version = "0.15.1", path = "notify" } -kaspa-p2p-flows = { version = "0.15.1", path = "protocol/flows" } -kaspa-p2p-lib = { version = "0.15.1", path = "protocol/p2p" } -kaspa-perf-monitor = { version = "0.15.1", path = "metrics/perf_monitor" } -kaspa-pow = { version = "0.15.1", path = "consensus/pow" } -kaspa-rpc-core = { version = "0.15.1", path = "rpc/core" } -kaspa-rpc-macros = { version = "0.15.1", path = "rpc/macros" } -kaspa-rpc-service = { version = "0.15.1", path = "rpc/service" } -kaspa-txscript = { version = "0.15.1", path = "crypto/txscript" } -kaspa-txscript-errors = { version = "0.15.1", path = "crypto/txscript/errors" } -kaspa-utils = { version = "0.15.1", path = "utils" } -kaspa-utils-tower = { version = "0.15.1", path = "utils/tower" } -kaspa-utxoindex = { version = "0.15.1", path = "indexes/utxoindex" } -kaspa-wallet = { version = "0.15.1", path = "wallet/native" } -kaspa-wallet-cli-wasm = { version = "0.15.1", path = "wallet/wasm" } -kaspa-wallet-keys = { version = "0.15.1", path = "wallet/keys" } -kaspa-wallet-pskt = { version = "0.15.1", path = "wallet/pskt" } -kaspa-wallet-core = { version = "0.15.1", path = "wallet/core" } -kaspa-wallet-macros = { version = "0.15.1", path = "wallet/macros" } -kaspa-wasm = { version = "0.15.1", path = "wasm" } -kaspa-wasm-core = { version = "0.15.1", path = "wasm/core" } -kaspa-wrpc-client = { version = "0.15.1", path = "rpc/wrpc/client" } -kaspa-wrpc-proxy = { version = "0.15.1", path = "rpc/wrpc/proxy" } -kaspa-wrpc-server = { version = "0.15.1", path = "rpc/wrpc/server" } -kaspa-wrpc-wasm = { version = "0.15.1", path = "rpc/wrpc/wasm" } -kaspa-wrpc-example-subscriber = { version = "0.15.1", path = "rpc/wrpc/examples/subscriber" } -kaspad = { version = "0.15.1", path = "kaspad" } -kaspa-alloc = { version = "0.15.1", path = "utils/alloc" } +# kaspa-testing-integration = { version = "0.15.2", path = "testing/integration" } +kaspa-addresses = { version = "0.15.2", path = "crypto/addresses" } +kaspa-addressmanager = { version = "0.15.2", path = "components/addressmanager" } +kaspa-bip32 = { version = "0.15.2", path = "wallet/bip32" } +kaspa-cli = { version = "0.15.2", path = "cli" } +kaspa-connectionmanager = { version = "0.15.2", path = "components/connectionmanager" } +kaspa-consensus = { version = "0.15.2", path = "consensus" } +kaspa-consensus-core = { version = "0.15.2", path = "consensus/core" } +kaspa-consensus-client = { version = "0.15.2", path = "consensus/client" } +kaspa-consensus-notify = { version = "0.15.2", path = "consensus/notify" } +kaspa-consensus-wasm = { version = "0.15.2", path = "consensus/wasm" } +kaspa-consensusmanager = { version = "0.15.2", path = "components/consensusmanager" } +kaspa-core = { version = "0.15.2", path = "core" } +kaspa-daemon = { version = "0.15.2", path = "daemon" } +kaspa-database = { version = "0.15.2", path = "database" } +kaspa-grpc-client = { version = "0.15.2", path = "rpc/grpc/client" } +kaspa-grpc-core = { version = "0.15.2", path = "rpc/grpc/core" } +kaspa-grpc-server = { version = "0.15.2", path = "rpc/grpc/server" } +kaspa-hashes = { version = "0.15.2", path = "crypto/hashes" } +kaspa-index-core = { version = "0.15.2", path = "indexes/core" } +kaspa-index-processor = { version = "0.15.2", path = "indexes/processor" } +kaspa-math = { version = "0.15.2", path = "math" } +kaspa-merkle = { version = "0.15.2", path = "crypto/merkle" } +kaspa-metrics-core = { version = "0.15.2", path = "metrics/core" } +kaspa-mining = { version = "0.15.2", path = "mining" } +kaspa-mining-errors = { version = "0.15.2", path = "mining/errors" } +kaspa-muhash = { version = "0.15.2", path = "crypto/muhash" } +kaspa-notify = { version = "0.15.2", path = "notify" } +kaspa-p2p-flows = { version = "0.15.2", path = "protocol/flows" } +kaspa-p2p-lib = { version = "0.15.2", path = "protocol/p2p" } +kaspa-perf-monitor = { version = "0.15.2", path = "metrics/perf_monitor" } +kaspa-pow = { version = "0.15.2", path = "consensus/pow" } +kaspa-rpc-core = { version = "0.15.2", path = "rpc/core" } +kaspa-rpc-macros = { version = "0.15.2", path = "rpc/macros" } +kaspa-rpc-service = { version = "0.15.2", path = "rpc/service" } +kaspa-txscript = { version = "0.15.2", path = "crypto/txscript" } +kaspa-txscript-errors = { version = "0.15.2", path = "crypto/txscript/errors" } +kaspa-utils = { version = "0.15.2", path = "utils" } +kaspa-utils-tower = { version = "0.15.2", path = "utils/tower" } +kaspa-utxoindex = { version = "0.15.2", path = "indexes/utxoindex" } +kaspa-wallet = { version = "0.15.2", path = "wallet/native" } +kaspa-wallet-cli-wasm = { version = "0.15.2", path = "wallet/wasm" } +kaspa-wallet-keys = { version = "0.15.2", path = "wallet/keys" } +kaspa-wallet-pskt = { version = "0.15.2", path = "wallet/pskt" } +kaspa-wallet-core = { version = "0.15.2", path = "wallet/core" } +kaspa-wallet-macros = { version = "0.15.2", path = "wallet/macros" } +kaspa-wasm = { version = "0.15.2", path = "wasm" } +kaspa-wasm-core = { version = "0.15.2", path = "wasm/core" } +kaspa-wrpc-client = { version = "0.15.2", path = "rpc/wrpc/client" } +kaspa-wrpc-proxy = { version = "0.15.2", path = "rpc/wrpc/proxy" } +kaspa-wrpc-server = { version = "0.15.2", path = "rpc/wrpc/server" } +kaspa-wrpc-wasm = { version = "0.15.2", path = "rpc/wrpc/wasm" } +kaspa-wrpc-example-subscriber = { version = "0.15.2", path = "rpc/wrpc/examples/subscriber" } +kaspad = { version = "0.15.2", path = "kaspad" } +kaspa-alloc = { version = "0.15.2", path = "utils/alloc" } # external aes = "0.8.3" diff --git a/cli/src/modules/rpc.rs b/cli/src/modules/rpc.rs index f32523c4a3..f96cb5b612 100644 --- a/cli/src/modules/rpc.rs +++ b/cli/src/modules/rpc.rs @@ -121,10 +121,20 @@ impl Rpc { // let result = rpc.get_subnetwork_call(GetSubnetworkRequest { }).await?; // self.println(&ctx, result); // } - // RpcApiOps::GetVirtualChainFromBlock => { - // let result = rpc.get_virtual_chain_from_block_call(GetVirtualChainFromBlockRequest { }).await?; - // self.println(&ctx, result); - // } + RpcApiOps::GetVirtualChainFromBlock => { + if argv.is_empty() { + return Err(Error::custom("Missing startHash argument")); + }; + let start_hash = RpcHash::from_hex(argv.remove(0).as_str())?; + let include_accepted_transaction_ids = argv.remove(0).parse::().unwrap_or_default(); + let result = rpc + .get_virtual_chain_from_block_call( + None, + GetVirtualChainFromBlockRequest { start_hash, include_accepted_transaction_ids }, + ) + .await?; + self.println(&ctx, result); + } // RpcApiOps::GetBlocks => { // let result = rpc.get_blocks_call(GetBlocksRequest { }).await?; // self.println(&ctx, result); diff --git a/components/consensusmanager/src/session.rs b/components/consensusmanager/src/session.rs index 81d5891488..2643739ee3 100644 --- a/components/consensusmanager/src/session.rs +++ b/components/consensusmanager/src/session.rs @@ -267,8 +267,12 @@ impl ConsensusSessionOwned { self.clone().spawn_blocking(|c| c.is_nearly_synced()).await } - pub async fn async_get_virtual_chain_from_block(&self, hash: Hash) -> ConsensusResult { - self.clone().spawn_blocking(move |c| c.get_virtual_chain_from_block(hash)).await + pub async fn async_get_virtual_chain_from_block( + &self, + low: Hash, + chain_path_added_limit: Option, + ) -> ConsensusResult { + self.clone().spawn_blocking(move |c| c.get_virtual_chain_from_block(low, chain_path_added_limit)).await } pub async fn async_get_virtual_utxos( @@ -380,8 +384,12 @@ impl ConsensusSessionOwned { /// Returns acceptance data for a set of blocks belonging to the selected parent chain. /// /// See `self::get_virtual_chain` - pub async fn async_get_blocks_acceptance_data(&self, hashes: Vec) -> ConsensusResult>> { - self.clone().spawn_blocking(move |c| c.get_blocks_acceptance_data(&hashes)).await + pub async fn async_get_blocks_acceptance_data( + &self, + hashes: Vec, + merged_blocks_limit: Option, + ) -> ConsensusResult>> { + self.clone().spawn_blocking(move |c| c.get_blocks_acceptance_data(&hashes, merged_blocks_limit)).await } pub async fn async_is_chain_block(&self, hash: Hash) -> ConsensusResult { diff --git a/consensus/core/src/api/mod.rs b/consensus/core/src/api/mod.rs index 4833c7659a..91165b73db 100644 --- a/consensus/core/src/api/mod.rs +++ b/consensus/core/src/api/mod.rs @@ -157,7 +157,12 @@ pub trait ConsensusApi: Send + Sync { unimplemented!() } - fn get_virtual_chain_from_block(&self, hash: Hash) -> ConsensusResult { + /// Gets the virtual chain paths from `low` to the `sink` hash, or until `chain_path_added_limit` is reached + /// + /// Note: + /// 1) `chain_path_added_limit` will populate removed fully, and then the added chain path, up to `chain_path_added_limit` amount of hashes. + /// 1.1) use `None to impose no limit with optimized backward chain iteration, for better performance in cases where batching is not required. + fn get_virtual_chain_from_block(&self, low: Hash, chain_path_added_limit: Option) -> ConsensusResult { unimplemented!() } @@ -297,7 +302,11 @@ pub trait ConsensusApi: Send + Sync { /// Returns acceptance data for a set of blocks belonging to the selected parent chain. /// /// See `self::get_virtual_chain` - fn get_blocks_acceptance_data(&self, hashes: &[Hash]) -> ConsensusResult>> { + fn get_blocks_acceptance_data( + &self, + hashes: &[Hash], + merged_blocks_limit: Option, + ) -> ConsensusResult>> { unimplemented!() } diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index 8474a6864a..1731729a32 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -607,14 +607,26 @@ impl ConsensusApi for Consensus { self.config.is_nearly_synced(compact.timestamp, compact.daa_score) } - fn get_virtual_chain_from_block(&self, hash: Hash) -> ConsensusResult { - // Calculate chain changes between the given hash and the - // sink. Note that we explicitly don't + fn get_virtual_chain_from_block(&self, low: Hash, chain_path_added_limit: Option) -> ConsensusResult { + // Calculate chain changes between the given `low` and the current sink hash (up to `limit` amount of block hashes). + // Note: + // 1) that we explicitly don't // do the calculation against the virtual itself so that we // won't later need to remove it from the result. + // 2) supplying `None` as `chain_path_added_limit` will result in the full chain path, with optimized performance. let _guard = self.pruning_lock.blocking_read(); - self.validate_block_exists(hash)?; - Ok(self.services.dag_traversal_manager.calculate_chain_path(hash, self.get_sink())) + + // Verify that the block exists + self.validate_block_exists(low)?; + + // Verify that source is on chain(block) + self.services + .reachability_service + .is_chain_ancestor_of(self.get_source(), low) + .then_some(()) + .ok_or(ConsensusError::General("the queried hash does not have source on its chain"))?; + + Ok(self.services.dag_traversal_manager.calculate_chain_path(low, self.get_sink(), chain_path_added_limit)) } /// Returns a Vec of header samples since genesis @@ -914,11 +926,35 @@ impl ConsensusApi for Consensus { self.acceptance_data_store.get(hash).unwrap_option().ok_or(ConsensusError::MissingData(hash)) } - fn get_blocks_acceptance_data(&self, hashes: &[Hash]) -> ConsensusResult>> { + fn get_blocks_acceptance_data( + &self, + hashes: &[Hash], + merged_blocks_limit: Option, + ) -> ConsensusResult>> { + // Note: merged_blocks_limit will limit after the sum of merged blocks is breached along the supplied hash's acceptance data + // and not limit the acceptance data within a queried hash. i.e. It has mergeset_size_limit granularity, this is to guarantee full acceptance data coverage. + if merged_blocks_limit.is_none() { + return hashes + .iter() + .copied() + .map(|hash| self.acceptance_data_store.get(hash).unwrap_option().ok_or(ConsensusError::MissingData(hash))) + .collect::>>(); + } + let merged_blocks_limit = merged_blocks_limit.unwrap(); // we handle `is_none`, so may unwrap. + let mut num_of_merged_blocks = 0usize; + hashes .iter() .copied() - .map(|hash| self.acceptance_data_store.get(hash).unwrap_option().ok_or(ConsensusError::MissingData(hash))) + .map_while(|hash| { + let entry = self.acceptance_data_store.get(hash).unwrap_option().ok_or(ConsensusError::MissingData(hash)); + num_of_merged_blocks += entry.as_ref().map_or(0, |entry| entry.len()); + if num_of_merged_blocks > merged_blocks_limit { + None + } else { + Some(entry) + } + }) .collect::>>() } diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index dfb7394b80..88fee97bff 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -290,7 +290,7 @@ impl VirtualStateProcessor { assert_eq!(virtual_ghostdag_data.selected_parent, new_sink); let sink_multiset = self.utxo_multisets_store.get(new_sink).unwrap(); - let chain_path = self.dag_traversal_manager.calculate_chain_path(prev_sink, new_sink); + let chain_path = self.dag_traversal_manager.calculate_chain_path(prev_sink, new_sink, None); let new_virtual_state = self .calculate_and_commit_virtual_state( virtual_read, diff --git a/consensus/src/processes/sync/mod.rs b/consensus/src/processes/sync/mod.rs index 8472229682..3978913bae 100644 --- a/consensus/src/processes/sync/mod.rs +++ b/consensus/src/processes/sync/mod.rs @@ -111,7 +111,7 @@ impl< (blocks, highest_reached) } - fn find_highest_common_chain_block(&self, low: Hash, high: Hash) -> Hash { + pub fn find_highest_common_chain_block(&self, low: Hash, high: Hash) -> Hash { self.reachability_service .default_backward_chain_iterator(low) .find(|candidate| self.reachability_service.is_chain_ancestor_of(*candidate, high)) diff --git a/consensus/src/processes/traversal_manager.rs b/consensus/src/processes/traversal_manager.rs index 3ae0aef5d7..23dc5c69f0 100644 --- a/consensus/src/processes/traversal_manager.rs +++ b/consensus/src/processes/traversal_manager.rs @@ -31,7 +31,7 @@ impl ChainPath { + pub fn calculate_chain_path(&self, from: Hash, to: Hash, chain_path_added_limit: Option) -> ChainPath { let mut removed = Vec::new(); let mut common_ancestor = from; for current in self.reachability_service.default_backward_chain_iterator(from) { @@ -42,9 +42,20 @@ impl {} TxRemovalReason::DoubleSpend => match removed_transactions.len() { 0 => {} - 1 => warn!("Removed transaction ({}) {}{}", reason, removed_transactions[0], extra_info), - n => warn!( + 1 => debug!("Removed transaction ({}) {}{}", reason, removed_transactions[0], extra_info), + n => debug!( "Removed {} transactions ({}): {}{}", n, reason, diff --git a/rpc/grpc/core/proto/rpc.proto b/rpc/grpc/core/proto/rpc.proto index 8c3e7b3b25..e218681b65 100644 --- a/rpc/grpc/core/proto/rpc.proto +++ b/rpc/grpc/core/proto/rpc.proto @@ -374,8 +374,13 @@ message GetSubnetworkResponseMessage{ RPCError error = 1000; } -// GetVirtualChainFromBlockRequestMessage requests the virtual selected -// parent chain from some startHash to this kaspad's current virtual +/// GetVirtualChainFromBlockRequestMessage requests the virtual selected +/// parent chain from some startHash to this kaspad's current virtual +/// Note: +/// this call batches the response to: +/// a. the network's `mergeset size limit * 10` amount of added chain blocks, if `includeAcceptedTransactionIds = false` +/// b. or `mergeset size limit * 10` amount of merged blocks, if `includeAcceptedTransactionIds = true` +/// c. it does not batch the removed chain blocks, only the added ones. message GetVirtualChainFromBlockRequestMessage{ string startHash = 1; bool includeAcceptedTransactionIds = 2; diff --git a/rpc/service/src/converter/consensus.rs b/rpc/service/src/converter/consensus.rs index a5e5d3b51e..c744300e52 100644 --- a/rpc/service/src/converter/consensus.rs +++ b/rpc/service/src/converter/consensus.rs @@ -162,8 +162,9 @@ impl ConsensusConverter { &self, consensus: &ConsensusProxy, chain_path: &ChainPath, + merged_blocks_limit: Option, ) -> RpcResult> { - let acceptance_data = consensus.async_get_blocks_acceptance_data(chain_path.added.clone()).await.unwrap(); + let acceptance_data = consensus.async_get_blocks_acceptance_data(chain_path.added.clone(), merged_blocks_limit).await.unwrap(); Ok(chain_path .added .iter() diff --git a/rpc/service/src/service.rs b/rpc/service/src/service.rs index 2c22fd6bb1..bdc2a95412 100644 --- a/rpc/service/src/service.rs +++ b/rpc/service/src/service.rs @@ -539,7 +539,7 @@ NOTE: This error usually indicates an RPC conversion error between the node and ) -> RpcResult { let allow_orphan = self.config.unsafe_rpc && request.allow_orphan; if !self.config.unsafe_rpc && request.allow_orphan { - warn!("SubmitTransaction RPC command called with AllowOrphan enabled while node in safe RPC mode -- switching to ForbidOrphan."); + debug!("SubmitTransaction RPC command called with AllowOrphan enabled while node in safe RPC mode -- switching to ForbidOrphan."); } let transaction: Transaction = request.transaction.try_into()?; @@ -609,13 +609,26 @@ NOTE: This error usually indicates an RPC conversion error between the node and request: GetVirtualChainFromBlockRequest, ) -> RpcResult { let session = self.consensus_manager.consensus().session().await; - let virtual_chain = session.async_get_virtual_chain_from_block(request.start_hash).await?; + + // batch_size is set to 10 times the mergeset_size_limit. + // this means batch_size is 2480 on 10 bps, and 1800 on mainnet. + // this bounds by number of merged blocks, if include_accepted_transactions = true + // else it returns the batch_size amount on pure chain blocks. + // Note: batch_size does not bound removed chain blocks, only added chain blocks. + let batch_size = (self.config.mergeset_size_limit * 10) as usize; + let mut virtual_chain_batch = session.async_get_virtual_chain_from_block(request.start_hash, Some(batch_size)).await?; let accepted_transaction_ids = if request.include_accepted_transaction_ids { - self.consensus_converter.get_virtual_chain_accepted_transaction_ids(&session, &virtual_chain).await? + let accepted_transaction_ids = self + .consensus_converter + .get_virtual_chain_accepted_transaction_ids(&session, &virtual_chain_batch, Some(batch_size)) + .await?; + // bound added to the length of the accepted transaction ids, which is bounded by merged blocks + virtual_chain_batch.added = virtual_chain_batch.added[..accepted_transaction_ids.len()].to_vec(); + accepted_transaction_ids } else { vec![] }; - Ok(GetVirtualChainFromBlockResponse::new(virtual_chain.removed, virtual_chain.added, accepted_transaction_ids)) + Ok(GetVirtualChainFromBlockResponse::new(virtual_chain_batch.removed, virtual_chain_batch.added, accepted_transaction_ids)) } async fn get_block_count_call( diff --git a/testing/integration/src/daemon_integration_tests.rs b/testing/integration/src/daemon_integration_tests.rs index a923202942..460cf049c3 100644 --- a/testing/integration/src/daemon_integration_tests.rs +++ b/testing/integration/src/daemon_integration_tests.rs @@ -106,7 +106,13 @@ async fn daemon_mining_test() { assert_eq!(dag_info.sink, last_block_hash.unwrap()); // Check that acceptance data contains the expected coinbase tx ids - let vc = rpc_client2.get_virtual_chain_from_block(kaspa_consensus::params::SIMNET_GENESIS.hash, true).await.unwrap(); + let vc = rpc_client2 + .get_virtual_chain_from_block( + kaspa_consensus::params::SIMNET_GENESIS.hash, // + true, + ) + .await + .unwrap(); assert_eq!(vc.removed_chain_block_hashes.len(), 0); assert_eq!(vc.added_chain_block_hashes.len(), 10); assert_eq!(vc.accepted_transaction_ids.len(), 10); From 4d03153f9cb3d7e6674ac7c28c1956c0f4b75a03 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Fri, 20 Sep 2024 01:58:28 +0300 Subject: [PATCH 02/31] A few CLI rpc query fixes (#563) --- cli/src/modules/rpc.rs | 5 +++-- rpc/core/src/model/tx.rs | 34 +++++++++++++++++++++++++++++++--- 2 files changed, 34 insertions(+), 5 deletions(-) diff --git a/cli/src/modules/rpc.rs b/cli/src/modules/rpc.rs index f96cb5b612..cf6bc6bd20 100644 --- a/cli/src/modules/rpc.rs +++ b/cli/src/modules/rpc.rs @@ -114,7 +114,8 @@ impl Rpc { } let hash = argv.remove(0); let hash = RpcHash::from_hex(hash.as_str())?; - let result = rpc.get_block_call(None, GetBlockRequest { hash, include_transactions: true }).await?; + let include_transactions = argv.first().and_then(|x| x.parse::().ok()).unwrap_or(true); + let result = rpc.get_block_call(None, GetBlockRequest { hash, include_transactions }).await?; self.println(&ctx, result); } // RpcApiOps::GetSubnetwork => { @@ -126,7 +127,7 @@ impl Rpc { return Err(Error::custom("Missing startHash argument")); }; let start_hash = RpcHash::from_hex(argv.remove(0).as_str())?; - let include_accepted_transaction_ids = argv.remove(0).parse::().unwrap_or_default(); + let include_accepted_transaction_ids = argv.first().and_then(|x| x.parse::().ok()).unwrap_or_default(); let result = rpc .get_virtual_chain_from_block_call( None, diff --git a/rpc/core/src/model/tx.rs b/rpc/core/src/model/tx.rs index c2b91f1870..0c17e26f53 100644 --- a/rpc/core/src/model/tx.rs +++ b/rpc/core/src/model/tx.rs @@ -4,7 +4,7 @@ use kaspa_consensus_core::tx::{ ScriptPublicKey, ScriptVec, TransactionId, TransactionIndexType, TransactionInput, TransactionOutpoint, TransactionOutput, UtxoEntry, }; -use kaspa_utils::serde_bytes_fixed_ref; +use kaspa_utils::{hex::ToHex, serde_bytes_fixed_ref}; use serde::{Deserialize, Serialize}; use workflow_serializer::prelude::*; @@ -131,7 +131,7 @@ impl Deserializer for RpcTransactionOutpoint { } /// Represents a Kaspa transaction input -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct RpcTransactionInput { pub previous_outpoint: RpcTransactionOutpoint, @@ -142,6 +142,18 @@ pub struct RpcTransactionInput { pub verbose_data: Option, } +impl std::fmt::Debug for RpcTransactionInput { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("RpcTransactionInput") + .field("previous_outpoint", &self.previous_outpoint) + .field("signature_script", &self.signature_script.to_hex()) + .field("sequence", &self.sequence) + .field("sig_op_count", &self.sig_op_count) + .field("verbose_data", &self.verbose_data) + .finish() + } +} + impl From for RpcTransactionInput { fn from(input: TransactionInput) -> Self { Self { @@ -277,7 +289,7 @@ impl Deserializer for RpcTransactionOutputVerboseData { } /// Represents a Kaspa transaction -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct RpcTransaction { pub version: u16, @@ -292,6 +304,22 @@ pub struct RpcTransaction { pub verbose_data: Option, } +impl std::fmt::Debug for RpcTransaction { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("RpcTransaction") + .field("version", &self.version) + .field("lock_time", &self.lock_time) + .field("subnetwork_id", &self.subnetwork_id) + .field("gas", &self.gas) + .field("payload", &self.payload.to_hex()) + .field("mass", &self.mass) + .field("inputs", &self.inputs) // Inputs and outputs are placed purposely at the end for better debug visibility + .field("outputs", &self.outputs) + .field("verbose_data", &self.verbose_data) + .finish() + } +} + impl Serializer for RpcTransaction { fn serialize(&self, writer: &mut W) -> std::io::Result<()> { store!(u16, &1, writer)?; From 9fae376500c3b7bde4ac0d0f03f15d47a4d6f12c Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Fri, 20 Sep 2024 13:31:55 +0300 Subject: [PATCH 03/31] Deploy linux binary without musl in its name + various minor miscellaneous things towards v0.15.2 (#564) * remove musl from linux binary name * remove simpa from win and osx builds in order to be consistent with linux build * safe eqv optimization: use inplace truncate (tested) --- .github/workflows/deploy.yaml | 8 ++------ rpc/service/src/service.rs | 2 +- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/.github/workflows/deploy.yaml b/.github/workflows/deploy.yaml index 8f46cb1fe5..537eeef898 100644 --- a/.github/workflows/deploy.yaml +++ b/.github/workflows/deploy.yaml @@ -65,8 +65,8 @@ jobs: cp target/x86_64-unknown-linux-musl/release/kaspad bin/ cp target/x86_64-unknown-linux-musl/release/rothschild bin/ cp target/x86_64-unknown-linux-musl/release/kaspa-wallet bin/ - archive="bin/rusty-kaspa-${{ github.event.release.tag_name }}-linux-musl-amd64.zip" - asset_name="rusty-kaspa-${{ github.event.release.tag_name }}-linux-musl-amd64.zip" + archive="bin/rusty-kaspa-${{ github.event.release.tag_name }}-linux-amd64.zip" + asset_name="rusty-kaspa-${{ github.event.release.tag_name }}-linux-amd64.zip" zip -r "${archive}" ./bin/* echo "archive=${archive}" >> $GITHUB_ENV echo "asset_name=${asset_name}" >> $GITHUB_ENV @@ -76,12 +76,10 @@ jobs: shell: bash run: | cargo build --bin kaspad --release - cargo build --bin simpa --release cargo build --bin rothschild --release cargo build --bin kaspa-wallet --release mkdir bin || true cp target/release/kaspad.exe bin/ - cp target/release/simpa.exe bin/ cp target/release/rothschild.exe bin/ cp target/release/kaspa-wallet.exe bin/ archive="bin/rusty-kaspa-${{ github.event.release.tag_name }}-win64.zip" @@ -94,12 +92,10 @@ jobs: if: runner.os == 'macOS' run: | cargo build --bin kaspad --release - cargo build --bin simpa --release cargo build --bin rothschild --release cargo build --bin kaspa-wallet --release mkdir bin || true cp target/release/kaspad bin/ - cp target/release/simpa bin/ cp target/release/rothschild bin/ cp target/release/kaspa-wallet bin/ archive="bin/rusty-kaspa-${{ github.event.release.tag_name }}-osx.zip" diff --git a/rpc/service/src/service.rs b/rpc/service/src/service.rs index bdc2a95412..d75ff770b0 100644 --- a/rpc/service/src/service.rs +++ b/rpc/service/src/service.rs @@ -623,7 +623,7 @@ NOTE: This error usually indicates an RPC conversion error between the node and .get_virtual_chain_accepted_transaction_ids(&session, &virtual_chain_batch, Some(batch_size)) .await?; // bound added to the length of the accepted transaction ids, which is bounded by merged blocks - virtual_chain_batch.added = virtual_chain_batch.added[..accepted_transaction_ids.len()].to_vec(); + virtual_chain_batch.added.truncate(accepted_transaction_ids.len()); accepted_transaction_ids } else { vec![] From d66cbe3300bb54adfbbf38327881b20b2909d3ba Mon Sep 17 00:00:00 2001 From: demisrael <81626907+demisrael@users.noreply.github.com> Date: Mon, 23 Sep 2024 08:02:58 +0300 Subject: [PATCH 04/31] rothschild: donate funds to external address with custom priority fee (#482) * rothschild: donate funds to external address Signed-off-by: Dmitry Perchanov * rothschild: Append priority fee to txs. Signed-off-by: Dmitry Perchanov * rothschild: add option to choose and randomize fee Signed-off-by: Dmitry Perchanov * rothschild: address clippy formatting issues Signed-off-by: Dmitry Perchanov --------- Signed-off-by: Dmitry Perchanov Signed-off-by: Dmitry Perchanov Co-authored-by: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Co-authored-by: Dmitry Perchanov --- rothschild/src/main.rs | 76 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 70 insertions(+), 6 deletions(-) diff --git a/rothschild/src/main.rs b/rothschild/src/main.rs index d303f1da05..35d08493bb 100644 --- a/rothschild/src/main.rs +++ b/rothschild/src/main.rs @@ -17,7 +17,10 @@ use kaspa_rpc_core::{api::rpc::RpcApi, notify::mode::NotificationMode, RpcUtxoEn use kaspa_txscript::pay_to_address_script; use parking_lot::Mutex; use rayon::prelude::*; -use secp256k1::{rand::thread_rng, Keypair}; +use secp256k1::{ + rand::{thread_rng, Rng}, + Keypair, +}; use tokio::time::{interval, MissedTickBehavior}; const DEFAULT_SEND_AMOUNT: u64 = 10 * SOMPI_PER_KASPA; @@ -40,6 +43,9 @@ pub struct Args { pub rpc_server: String, pub threads: u8, pub unleashed: bool, + pub addr: Option, + pub priority_fee: u64, + pub randomize_fee: bool, } impl Args { @@ -51,6 +57,9 @@ impl Args { rpc_server: m.get_one::("rpcserver").cloned().unwrap_or("localhost:16210".to_owned()), threads: m.get_one::("threads").cloned().unwrap(), unleashed: m.get_one::("unleashed").cloned().unwrap_or(false), + addr: m.get_one::("addr").cloned(), + priority_fee: m.get_one::("priority-fee").cloned().unwrap_or(0), + randomize_fee: m.get_one::("randomize-fee").cloned().unwrap_or(false), } } } @@ -85,6 +94,25 @@ pub fn cli() -> Command { .help("The number of threads to use for TX generation. Set to 0 to use 1 thread per core. Default is 2."), ) .arg(Arg::new("unleashed").long("unleashed").action(ArgAction::SetTrue).hide(true).help("Allow higher TPS")) + .arg(Arg::new("addr").long("to-addr").short('a').value_name("addr").help("address to send to")) + .arg( + Arg::new("priority-fee") + .long("priority-fee") + .short('f') + .value_name("priority-fee") + .default_value("0") + .value_parser(clap::value_parser!(u64)) + .help("Transaction priority fee"), + ) + .arg( + Arg::new("randomize-fee") + .long("randomize-fee") + .short('r') + .value_name("randomize-fee") + .action(ArgAction::SetTrue) + .default_value("false") + .help("Randomize transaction priority fee"), + ) } async fn new_rpc_client(subscription_context: &SubscriptionContext, address: &str) -> GrpcClient { @@ -111,6 +139,11 @@ struct ClientPoolArg { utxos_len: usize, } +struct TxsFeeConfig { + priority_fee: u64, + randomize_fee: bool, +} + #[tokio::main] async fn main() { kaspa_core::log::init_logger(None, ""); @@ -150,9 +183,31 @@ async fn main() { let kaspa_addr = Address::new(ADDRESS_PREFIX, ADDRESS_VERSION, &schnorr_key.x_only_public_key().0.serialize()); + let kaspa_to_addr = args.addr.as_ref().map_or_else(|| kaspa_addr.clone(), |addr_str| Address::try_from(addr_str.clone()).unwrap()); + + let fee_config = TxsFeeConfig { priority_fee: args.priority_fee, randomize_fee: args.randomize_fee }; + rayon::ThreadPoolBuilder::new().num_threads(args.threads as usize).build_global().unwrap(); - info!("Using Rothschild with private key {} and address {}", schnorr_key.display_secret(), String::from(&kaspa_addr)); + let mut log_message = format!( + "Using Rothschild with:\n\ + \tprivate key: {}\n\ + \tfrom address: {}", + schnorr_key.display_secret(), + String::from(&kaspa_addr) + ); + if args.addr.is_some() { + log_message.push_str(&format!("\n\tto address: {}", String::from(&kaspa_to_addr))); + } + if args.priority_fee != 0 { + log_message.push_str(&format!( + "\n\tpriority fee: {} SOMPS {}", + fee_config.priority_fee, + if fee_config.randomize_fee { "[randomize]" } else { "" } + )); + } + info!("{}", log_message); + let info = rpc_client.get_block_dag_info().await.unwrap(); let coinbase_maturity = match info.network.suffix { Some(11) => TESTNET11_PARAMS.coinbase_maturity, @@ -249,13 +304,14 @@ async fn main() { let has_funds = maybe_send_tx( txs_to_send, &tx_sender, - kaspa_addr.clone(), + kaspa_to_addr.clone(), &mut utxos, &mut pending, schnorr_key, stats.clone(), maximize_inputs, &mut next_available_utxo_index, + &fee_config, ) .await; if !has_funds { @@ -369,6 +425,7 @@ async fn maybe_send_tx( stats: Arc>, maximize_inputs: bool, next_available_utxo_index: &mut usize, + fee_config: &TxsFeeConfig, ) -> bool { let num_outs = if maximize_inputs { 1 } else { 2 }; @@ -377,7 +434,7 @@ async fn maybe_send_tx( let selected_utxos_groups = (0..txs_to_send) .map(|_| { let (selected_utxos, selected_amount) = - select_utxos(utxos, DEFAULT_SEND_AMOUNT, num_outs, maximize_inputs, next_available_utxo_index); + select_utxos(utxos, DEFAULT_SEND_AMOUNT, num_outs, maximize_inputs, next_available_utxo_index, fee_config); if selected_amount == 0 { return None; } @@ -473,10 +530,12 @@ fn select_utxos( num_outs: u64, maximize_utxos: bool, next_available_utxo_index: &mut usize, + fee_config: &TxsFeeConfig, ) -> (Vec<(TransactionOutpoint, UtxoEntry)>, u64) { const MAX_UTXOS: usize = 84; let mut selected_amount: u64 = 0; let mut selected = Vec::new(); + let mut rng = thread_rng(); while next_available_utxo_index < &mut utxos.len() { let (outpoint, entry) = utxos[*next_available_utxo_index].clone(); @@ -484,11 +543,16 @@ fn select_utxos( selected.push((outpoint, entry)); let fee = required_fee(selected.len(), num_outs); + let priority_fee = if fee_config.randomize_fee && fee_config.priority_fee > 0 { + rng.gen_range(0..fee_config.priority_fee) + } else { + fee_config.priority_fee + }; *next_available_utxo_index += 1; - if selected_amount >= min_amount + fee && (!maximize_utxos || selected.len() == MAX_UTXOS) { - return (selected, selected_amount - fee); + if selected_amount >= min_amount + fee + priority_fee && (!maximize_utxos || selected.len() == MAX_UTXOS) { + return (selected, selected_amount - fee - priority_fee); } if selected.len() > MAX_UTXOS { From 4bfa392922fff59c1e248b7b141d07b35f3294bc Mon Sep 17 00:00:00 2001 From: Maxim <59533214+biryukovmaxim@users.noreply.github.com> Date: Tue, 24 Sep 2024 21:45:43 +0400 Subject: [PATCH 05/31] fix wrong combiner condition (#567) --- wallet/pskt/src/utils.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wallet/pskt/src/utils.rs b/wallet/pskt/src/utils.rs index 28b7959ed6..cab2119512 100644 --- a/wallet/pskt/src/utils.rs +++ b/wallet/pskt/src/utils.rs @@ -6,7 +6,7 @@ where V: Eq + Clone, K: Ord + Clone, { - if lhs.len() > rhs.len() { + if lhs.len() >= rhs.len() { if let Some((field, rhs, lhs)) = rhs.iter().map(|(k, v)| (k, v, lhs.get(k))).find(|(_, v, rhs_v)| rhs_v.is_some_and(|rv| rv != *v)) { From 200b8ea63a6786d784713de092810a6854b81880 Mon Sep 17 00:00:00 2001 From: aspect Date: Fri, 27 Sep 2024 02:09:57 +0300 Subject: [PATCH 06/31] fix wRPC json notification format (#571) --- rpc/wrpc/server/src/connection.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rpc/wrpc/server/src/connection.rs b/rpc/wrpc/server/src/connection.rs index 97a4e8adce..e118d161d0 100644 --- a/rpc/wrpc/server/src/connection.rs +++ b/rpc/wrpc/server/src/connection.rs @@ -134,7 +134,7 @@ impl Connection { { match encoding { Encoding::Borsh => workflow_rpc::server::protocol::borsh::create_serialized_notification_message(op, msg), - Encoding::SerdeJson => workflow_rpc::server::protocol::borsh::create_serialized_notification_message(op, msg), + Encoding::SerdeJson => workflow_rpc::server::protocol::serde_json::create_serialized_notification_message(op, msg), } } } From 035a394d144bac7f142c9556f65da4612c24a8fe Mon Sep 17 00:00:00 2001 From: aspect Date: Fri, 27 Sep 2024 05:59:59 +0300 Subject: [PATCH 07/31] Documentation updates (#570) * docs * Export ConsensusSessionOwned * add CI pass to run `cargo doc` * module rust docs * lints * fix typos * replace glob import terminology with "re-exports" * cleanup --- .github/workflows/ci.yaml | 3 + cli/src/modules/connect.rs | 4 +- components/consensusmanager/src/lib.rs | 3 +- components/consensusmanager/src/session.rs | 9 +-- consensus/client/src/error.rs | 2 + consensus/client/src/hash.rs | 7 +++ consensus/client/src/header.rs | 11 ++++ consensus/client/src/input.rs | 13 ++++ consensus/client/src/lib.rs | 14 +++++ consensus/client/src/outpoint.rs | 48 +++++++++----- consensus/client/src/output.rs | 13 ++++ consensus/client/src/result.rs | 2 + consensus/client/src/serializable/mod.rs | 22 +++++++ consensus/client/src/serializable/numeric.rs | 8 ++- consensus/client/src/serializable/string.rs | 5 +- consensus/client/src/sign.rs | 4 ++ consensus/client/src/transaction.rs | 7 +++ consensus/client/src/utils.rs | 6 ++ consensus/client/src/utxo.rs | 19 ++++++ consensus/core/src/api/mod.rs | 2 +- consensus/core/src/config/bps.rs | 2 +- consensus/core/src/lib.rs | 6 ++ consensus/core/src/network.rs | 13 ++++ consensus/core/src/tx.rs | 9 +++ consensus/src/processes/ghostdag/protocol.rs | 2 +- .../src/processes/reachability/interval.rs | 4 +- crypto/addresses/src/lib.rs | 24 ++++++- crypto/txscript/src/opcodes/mod.rs | 2 +- indexes/utxoindex/src/core/errors.rs | 4 +- indexes/utxoindex/src/index.rs | 5 +- indexes/utxoindex/src/update_container.rs | 4 +- metrics/core/src/data.rs | 59 ----------------- mining/src/feerate/fee_estimation.ipynb | 10 +-- .../src/mempool/model/frontier/search_tree.rs | 5 +- notify/src/address/tracker.rs | 4 +- notify/src/notifier.rs | 4 +- .../flows/src/flowcontext/transactions.rs | 2 +- rpc/core/src/api/connection.rs | 4 ++ rpc/core/src/api/ctl.rs | 4 ++ rpc/core/src/api/mod.rs | 4 ++ rpc/core/src/api/notifications.rs | 4 ++ rpc/core/src/api/ops.rs | 4 ++ rpc/core/src/api/rpc.rs | 8 ++- rpc/core/src/convert/block.rs | 2 + rpc/core/src/convert/mod.rs | 4 ++ rpc/core/src/convert/notification.rs | 2 + rpc/core/src/convert/scope.rs | 2 + rpc/core/src/convert/tx.rs | 2 + rpc/core/src/convert/utxo.rs | 2 + rpc/core/src/error.rs | 4 ++ rpc/core/src/lib.rs | 14 +++++ rpc/core/src/model/mod.rs | 3 + rpc/core/src/notify/mod.rs | 4 ++ rpc/core/src/wasm/convert.rs | 4 ++ rpc/core/src/wasm/message.rs | 4 ++ rpc/core/src/wasm/mod.rs | 2 + rpc/grpc/client/src/lib.rs | 2 +- rpc/grpc/core/src/convert/message.rs | 2 +- rpc/wrpc/client/src/client.rs | 13 ++-- rpc/wrpc/client/src/error.rs | 2 + rpc/wrpc/client/src/lib.rs | 16 +++++ rpc/wrpc/client/src/node.rs | 2 + rpc/wrpc/client/src/parse.rs | 2 + rpc/wrpc/client/src/prelude.rs | 2 + rpc/wrpc/client/src/resolver.rs | 25 +++++++- rpc/wrpc/client/src/result.rs | 2 + rpc/wrpc/wasm/src/client.rs | 9 ++- rpc/wrpc/wasm/src/lib.rs | 4 ++ rpc/wrpc/wasm/src/notify.rs | 4 ++ rpc/wrpc/wasm/src/resolver.rs | 4 ++ simpa/src/main.rs | 2 +- utils/src/lib.rs | 6 ++ utils/src/option.rs | 2 +- wallet/bip32/src/address_type.rs | 5 ++ wallet/bip32/src/lib.rs | 2 + wallet/bip32/src/private_key.rs | 1 + wallet/bip32/src/public_key.rs | 2 +- wallet/core/src/account/descriptor.rs | 5 ++ wallet/core/src/account/kind.rs | 5 ++ wallet/core/src/account/pskb.rs | 5 ++ wallet/core/src/api/message.rs | 21 ++++--- wallet/core/src/api/mod.rs | 2 + wallet/core/src/api/traits.rs | 26 +++++++- wallet/core/src/api/transport.rs | 4 +- wallet/core/src/compat/mod.rs | 4 ++ wallet/core/src/cryptobox.rs | 13 +++- wallet/core/src/events.rs | 6 ++ wallet/core/src/factory.rs | 3 + wallet/core/src/lib.rs | 63 +++++++------------ wallet/core/src/message.rs | 1 + wallet/core/src/metrics.rs | 29 +++------ wallet/core/src/prelude.rs | 3 +- wallet/core/src/rpc.rs | 8 ++- wallet/core/src/settings.rs | 5 ++ wallet/core/src/tx/payment.rs | 10 +++ wallet/core/src/utxo/processor.rs | 4 +- wallet/core/src/wallet/api.rs | 10 +-- wallet/core/src/wallet/args.rs | 1 - wallet/core/src/wallet/maps.rs | 1 + wallet/core/src/wallet/mod.rs | 17 +++-- wallet/core/src/wasm/cryptobox.rs | 2 +- wallet/core/src/wasm/signer.rs | 4 +- wallet/keys/src/derivation/gen0/mod.rs | 2 +- wallet/keys/src/derivation/gen1/mod.rs | 3 +- wallet/keys/src/derivation/mod.rs | 4 ++ wallet/keys/src/derivation_path.rs | 6 ++ wallet/keys/src/keypair.rs | 8 ++- wallet/keys/src/lib.rs | 7 +++ wallet/keys/src/prelude.rs | 4 ++ wallet/keys/src/privatekey.rs | 6 +- wallet/keys/src/privkeygen.rs | 4 ++ wallet/keys/src/pubkeygen.rs | 4 ++ wallet/keys/src/publickey.rs | 4 +- wallet/keys/src/secret.rs | 4 +- wallet/keys/src/types.rs | 2 +- wallet/keys/src/xprv.rs | 4 ++ wallet/keys/src/xpub.rs | 4 ++ wallet/pskt/src/bundle.rs | 5 ++ wallet/pskt/src/convert.rs | 6 ++ wallet/pskt/src/error.rs | 2 + wallet/pskt/src/global.rs | 2 + wallet/pskt/src/input.rs | 2 + wallet/pskt/src/output.rs | 2 + wallet/pskt/src/pskt.rs | 21 +++++++ wallet/pskt/src/role.rs | 2 + wallet/pskt/src/utils.rs | 2 + wasm/src/lib.rs | 31 ++++++--- 127 files changed, 701 insertions(+), 239 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 693266885c..49fe4e4637 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -126,6 +126,9 @@ jobs: - name: Run cargo doc tests with features=no-asm on kaspa-hashes run: cargo test --doc --release -p kaspa-hashes --features=no-asm + - name: Run cargo doc + run: cargo doc --release --no-deps --workspace + # test-release: # name: Test Suite Release # runs-on: ${{ matrix.os }} diff --git a/cli/src/modules/connect.rs b/cli/src/modules/connect.rs index 024f7e6934..a755915d4f 100644 --- a/cli/src/modules/connect.rs +++ b/cli/src/modules/connect.rs @@ -14,11 +14,11 @@ impl Connect { let (is_public, url) = match arg_or_server_address.as_deref() { Some("public") => { tprintln!(ctx, "Connecting to a public node"); - (true, Resolver::default().fetch(WrpcEncoding::Borsh, network_id).await.map_err(|e| e.to_string())?.url) + (true, Resolver::default().get_url(WrpcEncoding::Borsh, network_id).await.map_err(|e| e.to_string())?) } None => { tprintln!(ctx, "No server set, connecting to a public node"); - (true, Resolver::default().fetch(WrpcEncoding::Borsh, network_id).await.map_err(|e| e.to_string())?.url) + (true, Resolver::default().get_url(WrpcEncoding::Borsh, network_id).await.map_err(|e| e.to_string())?) } Some(url) => { (false, wrpc_client.parse_url_with_network_type(url.to_string(), network_id.into()).map_err(|e| e.to_string())?) diff --git a/components/consensusmanager/src/lib.rs b/components/consensusmanager/src/lib.rs index 54bdda40b9..6d31653aab 100644 --- a/components/consensusmanager/src/lib.rs +++ b/components/consensusmanager/src/lib.rs @@ -9,7 +9,8 @@ mod session; pub use batch::BlockProcessingBatch; pub use session::{ - spawn_blocking, ConsensusInstance, ConsensusProxy, ConsensusSessionBlocking, SessionLock, SessionReadGuard, SessionWriteGuard, + spawn_blocking, ConsensusInstance, ConsensusProxy, ConsensusSessionBlocking, ConsensusSessionOwned, SessionLock, SessionReadGuard, + SessionWriteGuard, }; /// Consensus controller trait. Includes methods required to start/stop/control consensus, but which should not diff --git a/components/consensusmanager/src/session.rs b/components/consensusmanager/src/session.rs index 2643739ee3..8e0c6e9335 100644 --- a/components/consensusmanager/src/session.rs +++ b/components/consensusmanager/src/session.rs @@ -91,7 +91,7 @@ impl ConsensusInstance { /// Returns an unguarded *blocking* consensus session. There's no guarantee that data will not be pruned between /// two sequential consensus calls. This session doesn't hold the consensus pruning lock, so it should - /// be preferred upon [`session_blocking`] when data consistency is not important. + /// be preferred upon [`session_blocking()`](Self::session_blocking) when data consistency is not important. pub fn unguarded_session_blocking(&self) -> ConsensusSessionBlocking<'static> { ConsensusSessionBlocking::new_without_session_guard(self.consensus.clone()) } @@ -100,7 +100,7 @@ impl ConsensusInstance { /// that consensus state is consistent between operations, that is, no pruning was performed between the calls. /// The returned object is an *owned* consensus session type which can be cloned and shared across threads. /// The sharing ability is useful for spawning blocking operations on a different thread using the same - /// session object, see [`ConsensusSessionOwned::spawn_blocking`]. The caller is responsible to make sure + /// session object, see [`ConsensusSessionOwned::spawn_blocking()`](ConsensusSessionOwned::spawn_blocking). The caller is responsible to make sure /// that the overall lifetime of this session is not too long (~2 seconds max) pub async fn session(&self) -> ConsensusSessionOwned { let g = self.session_lock.read_owned().await; @@ -109,7 +109,7 @@ impl ConsensusInstance { /// Returns an unguarded consensus session. There's no guarantee that data will not be pruned between /// two sequential consensus calls. This session doesn't hold the consensus pruning lock, so it should - /// be preferred upon [`session`] when data consistency is not important. + /// be preferred upon [`session()`](Self::session) when data consistency is not important. pub fn unguarded_session(&self) -> ConsensusSessionOwned { ConsensusSessionOwned::new_without_session_guard(self.consensus.clone()) } @@ -139,7 +139,8 @@ impl Deref for ConsensusSessionBlocking<'_> { } /// An *owned* consensus session type which can be cloned and shared across threads. -/// See method `spawn_blocking` within for context on the usefulness of this type +/// See method `spawn_blocking` within for context on the usefulness of this type. +/// Please note - you must use [`ConsensusProxy`] type alias instead of this struct. #[derive(Clone)] pub struct ConsensusSessionOwned { _session_guard: Option, diff --git a/consensus/client/src/error.rs b/consensus/client/src/error.rs index e0aab2156c..e632f517d5 100644 --- a/consensus/client/src/error.rs +++ b/consensus/client/src/error.rs @@ -1,3 +1,5 @@ +//! The [`Error`](enum@Error) enum used by this crate + use thiserror::Error; use wasm_bindgen::{JsError, JsValue}; use workflow_wasm::jserror::JsErrorData; diff --git a/consensus/client/src/hash.rs b/consensus/client/src/hash.rs index 4402cfb1b5..1577689a67 100644 --- a/consensus/client/src/hash.rs +++ b/consensus/client/src/hash.rs @@ -1,3 +1,10 @@ +//! +//! WASM bindings for transaction hashers: [`TransactionSigningHash`](native::TransactionSigningHash) +//! and [`TransactionSigningHashECDSA`](native::TransactionSigningHashECDSA). +//! + +#![allow(non_snake_case)] + use crate::imports::*; use crate::result::Result; use kaspa_hashes as native; diff --git a/consensus/client/src/header.rs b/consensus/client/src/header.rs index 56fd92845e..6f04a73c43 100644 --- a/consensus/client/src/header.rs +++ b/consensus/client/src/header.rs @@ -1,3 +1,9 @@ +//! +//! Implementation of the Block [`Header`] struct. +//! + +#![allow(non_snake_case)] + use crate::error::Error; use js_sys::{Array, Object}; use kaspa_consensus_core::hashing; @@ -59,10 +65,15 @@ export interface IRawHeader { #[wasm_bindgen] extern "C" { + /// WASM (TypeScript) type definition for the Header-like struct: `Header | IHeader | IRawHeader`. + /// + /// @category Consensus #[wasm_bindgen(typescript_type = "Header | IHeader | IRawHeader")] pub type HeaderT; } +/// Kaspa Block Header +/// /// @category Consensus #[derive(Clone, Debug, Serialize, Deserialize, CastFromJs)] #[serde(rename_all = "camelCase")] diff --git a/consensus/client/src/input.rs b/consensus/client/src/input.rs index 736696bfae..a5018199d5 100644 --- a/consensus/client/src/input.rs +++ b/consensus/client/src/input.rs @@ -1,3 +1,9 @@ +//! +//! Implementation of the client-side [`TransactionInput`] struct used by the client-side [`Transaction`] struct. +//! + +#![allow(non_snake_case)] + use crate::imports::*; use crate::result::Result; use crate::TransactionOutpoint; @@ -33,14 +39,21 @@ export interface ITransactionInputVerboseData { } #[wasm_bindgen] extern "C" { + /// WASM (TypeScript) type representing `ITransactionInput | TransactionInput` + /// @category Consensus #[wasm_bindgen(typescript_type = "ITransactionInput | TransactionInput")] pub type TransactionInputT; + /// WASM (TypeScript) type representing `ITransactionInput[] | TransactionInput[]` + /// @category Consensus #[wasm_bindgen(typescript_type = "(ITransactionInput | TransactionInput)[]")] pub type TransactionInputArrayAsArgT; + /// WASM (TypeScript) type representing `TransactionInput[]` + /// @category Consensus #[wasm_bindgen(typescript_type = "TransactionInput[]")] pub type TransactionInputArrayAsResultT; } +/// Inner type used by [`TransactionInput`] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TransactionInputInner { diff --git a/consensus/client/src/lib.rs b/consensus/client/src/lib.rs index eb482eab16..3afae2f78b 100644 --- a/consensus/client/src/lib.rs +++ b/consensus/client/src/lib.rs @@ -1,3 +1,17 @@ +//! +//! # Client-side consensus primitives. +//! +//! This crate offers client-side primitives mirroring the consensus layer of the Kaspa p2p node. +//! It declares structs such as [`Transaction`], [`TransactionInput`], [`TransactionOutput`], +//! [`TransactionOutpoint`], [`UtxoEntry`], and [`UtxoEntryReference`] +//! that are used by the Wallet subsystem as well as WASM bindings. +//! +//! Unlike raw consensus primitives (used for high-performance DAG processing) the primitives +//! offered in this crate are designed to be used in client-side applications. Their internal +//! data is typically wrapped into `Arc>`, allowing for easy sharing between +//! async / threaded environments and WASM bindings. +//! + pub mod error; mod imports; mod input; diff --git a/consensus/client/src/outpoint.rs b/consensus/client/src/outpoint.rs index 06be53f6aa..a9b39f5e4f 100644 --- a/consensus/client/src/outpoint.rs +++ b/consensus/client/src/outpoint.rs @@ -1,3 +1,11 @@ +//! +//! Implementation of the client-side [`TransactionOutpoint`] used by the [`TransactionInput`] struct. +//! + +#![allow(non_snake_case)] + +use cfg_if::cfg_if; + use crate::imports::*; use crate::result::Result; @@ -14,6 +22,7 @@ export interface ITransactionOutpoint { } "#; +/// Inner type used by [`TransactionOutpoint`] #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Ord, PartialOrd)] #[serde(rename_all = "camelCase")] pub struct TransactionOutpointInner { @@ -110,26 +119,31 @@ impl TransactionOutpoint { } } -#[cfg_attr(feature = "wasm32-sdk", wasm_bindgen)] -impl TransactionOutpoint { - #[cfg_attr(feature = "wasm32-sdk", wasm_bindgen(constructor))] - pub fn ctor(transaction_id: TransactionId, index: u32) -> TransactionOutpoint { - Self { inner: Arc::new(TransactionOutpointInner { transaction_id, index }) } - } +cfg_if! { + if #[cfg(feature = "wasm32-sdk")] { - #[cfg_attr(feature = "wasm32-sdk", wasm_bindgen(js_name = "getId"))] - pub fn id_string(&self) -> String { - format!("{}-{}", self.get_transaction_id_as_string(), self.get_index()) - } + #[wasm_bindgen] + impl TransactionOutpoint { + #[wasm_bindgen(constructor)] + pub fn ctor(transaction_id: TransactionId, index: u32) -> TransactionOutpoint { + Self { inner: Arc::new(TransactionOutpointInner { transaction_id, index }) } + } - #[cfg_attr(feature = "wasm32-sdk", wasm_bindgen(getter, js_name = transactionId))] - pub fn get_transaction_id_as_string(&self) -> String { - self.inner().transaction_id.to_string() - } + #[wasm_bindgen(js_name = "getId")] + pub fn id_string(&self) -> String { + format!("{}-{}", self.get_transaction_id_as_string(), self.get_index()) + } - #[cfg_attr(feature = "wasm32-sdk", wasm_bindgen(getter, js_name = index))] - pub fn get_index(&self) -> TransactionIndexType { - self.inner().index + #[wasm_bindgen(getter, js_name = transactionId)] + pub fn get_transaction_id_as_string(&self) -> String { + self.inner().transaction_id.to_string() + } + + #[wasm_bindgen(getter, js_name = index)] + pub fn get_index(&self) -> TransactionIndexType { + self.inner().index + } + } } } diff --git a/consensus/client/src/output.rs b/consensus/client/src/output.rs index 8f335c47d7..17b4a58c80 100644 --- a/consensus/client/src/output.rs +++ b/consensus/client/src/output.rs @@ -1,3 +1,9 @@ +//! +//! Implementation of the client-side [`TransactionOutput`] used by the [`Transaction`] struct. +//! + +#![allow(non_snake_case)] + use crate::imports::*; #[wasm_bindgen(typescript_custom_section)] @@ -28,14 +34,21 @@ export interface ITransactionOutputVerboseData { #[wasm_bindgen] extern "C" { + /// WASM (TypeScript) type representing `ITransactionOutput | TransactionOutput` + /// @category Consensus #[wasm_bindgen(typescript_type = "ITransactionOutput | TransactionOutput")] pub type TransactionOutputT; + /// WASM (TypeScript) type representing `ITransactionOutput[] | TransactionOutput[]` + /// @category Consensus #[wasm_bindgen(typescript_type = "(ITransactionOutput | TransactionOutput)[]")] pub type TransactionOutputArrayAsArgT; + /// WASM (TypeScript) type representing `TransactionOutput[]` + /// @category Consensus #[wasm_bindgen(typescript_type = "TransactionOutput[]")] pub type TransactionOutputArrayAsResultT; } +/// Inner type used by [`TransactionOutput`] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TransactionOutputInner { diff --git a/consensus/client/src/result.rs b/consensus/client/src/result.rs index 4c8cb83f54..d8bff8aa10 100644 --- a/consensus/client/src/result.rs +++ b/consensus/client/src/result.rs @@ -1 +1,3 @@ +//! [`Result`] type alias that is bound to the [`Error`](super::error::Error) type from this crate. + pub type Result = std::result::Result; diff --git a/consensus/client/src/serializable/mod.rs b/consensus/client/src/serializable/mod.rs index a590ab2862..ab78d956be 100644 --- a/consensus/client/src/serializable/mod.rs +++ b/consensus/client/src/serializable/mod.rs @@ -1,3 +1,24 @@ +//! +//! # Standardized JSON serialization and deserialization of Kaspa transactions. +//! +//! This module provides standardized JSON serialization and deserialization of +//! Kaspa transactions. There are two sub-modules: `numeric` and `string`. +//! +//! The `numeric` module provides serialization and deserialization of transactions +//! with all large integer values as `bigint` types in WASM or numerical values that +//! exceed the largest integer that can be represented by the JavaScript `number` type. +//! +//! The `string` module provides serialization and deserialization of transactions +//! with all large integer values as `string` types. This allows deserialization +//! via JSON in JavaScript environments and later conversion to `bigint` types. +//! +//! These data structures can be used for manual transport of transactions using JSON. +//! For more advanced use cases, please refer to `PSKT` in the [`kaspa_wallet_pskt`](https://docs.rs/kaspa_wallet_pskt) +//! crate. +//! + +#![allow(non_snake_case)] + pub mod numeric; pub mod string; @@ -80,6 +101,7 @@ export interface ISerializableTransaction { #[wasm_bindgen] extern "C" { + /// WASM (TypeScript) representation of the `ISerializableTransaction` interface. #[wasm_bindgen(extends = js_sys::Array, typescript_type = "ISerializableTransaction")] pub type SerializableTransactionT; } diff --git a/consensus/client/src/serializable/numeric.rs b/consensus/client/src/serializable/numeric.rs index 733afd54e9..6c24db634a 100644 --- a/consensus/client/src/serializable/numeric.rs +++ b/consensus/client/src/serializable/numeric.rs @@ -1,4 +1,10 @@ -//! This module implements the primitives for external transaction signing. +//! +//! This module implements transaction-related primitives for JSON serialization +//! where all large integer values (`u64`) are serialized to JSON using `serde` and +//! can exceed the largest integer value representable by the JavaScript `number` type. +//! (i.e. transactions serialized using this module can not be deserialized in JavaScript +//! but may be deserialized in other JSON-capable environments that support large integers) +//! use crate::error::Error; use crate::imports::*; diff --git a/consensus/client/src/serializable/string.rs b/consensus/client/src/serializable/string.rs index e35cdb028b..35c7907b29 100644 --- a/consensus/client/src/serializable/string.rs +++ b/consensus/client/src/serializable/string.rs @@ -1,4 +1,7 @@ -//! This module implements the primitives for external transaction signing. +//! +//! This module implements transaction-related primitives for JSON serialization +//! where all large integer values (`u64`) are serialized to and from JSON as strings. +//! use crate::imports::*; use crate::result::Result; diff --git a/consensus/client/src/sign.rs b/consensus/client/src/sign.rs index c254aee076..4044dc5701 100644 --- a/consensus/client/src/sign.rs +++ b/consensus/client/src/sign.rs @@ -1,3 +1,7 @@ +//! +//! Utilities for signing transactions. +//! + use crate::transaction::Transaction; use core::iter::once; use itertools::Itertools; diff --git a/consensus/client/src/transaction.rs b/consensus/client/src/transaction.rs index fb6d185f0e..17cc381265 100644 --- a/consensus/client/src/transaction.rs +++ b/consensus/client/src/transaction.rs @@ -1,3 +1,7 @@ +//! +//! Declares the client-side [`Transaction`] type, which represents a Kaspa transaction. +//! + #![allow(non_snake_case)] use crate::imports::*; @@ -53,10 +57,13 @@ export interface ITransactionVerboseData { #[wasm_bindgen] extern "C" { + /// WASM (TypeScript) type representing `ITransaction | Transaction` + /// @category Consensus #[wasm_bindgen(typescript_type = "ITransaction | Transaction")] pub type TransactionT; } +/// Inner type used by [`Transaction`] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TransactionInner { diff --git a/consensus/client/src/utils.rs b/consensus/client/src/utils.rs index 4f543d45bc..7e08556fec 100644 --- a/consensus/client/src/utils.rs +++ b/consensus/client/src/utils.rs @@ -1,3 +1,9 @@ +//! +//! Client-side utility functions and their WASM bindings. +//! + +#![allow(non_snake_case)] + use crate::imports::*; use crate::result::Result; use kaspa_addresses::*; diff --git a/consensus/client/src/utxo.rs b/consensus/client/src/utxo.rs index 0a8b3fdb94..bbfc1199d1 100644 --- a/consensus/client/src/utxo.rs +++ b/consensus/client/src/utxo.rs @@ -1,3 +1,13 @@ +//! +//! # UTXO client-side data structures. +//! +//! This module provides client-side data structures for UTXO management. +//! In particular, the [`UtxoEntry`] and [`UtxoEntryReference`] structs +//! are used to represent UTXO entries in the wallet subsystem and WASM bindings. +//! + +#![allow(non_snake_case)] + use crate::imports::*; use crate::outpoint::{TransactionOutpoint, TransactionOutpointInner}; use crate::result::Result; @@ -29,16 +39,22 @@ export interface IUtxoEntry { #[wasm_bindgen] extern "C" { + /// WASM type representing an array of [`UtxoEntryReference`] objects (i.e. `UtxoEntryReference[]`) #[wasm_bindgen(extends = Array, typescript_type = "UtxoEntryReference[]")] pub type UtxoEntryReferenceArrayT; + /// WASM type representing a UTXO entry interface (a UTXO-like object) #[wasm_bindgen(typescript_type = "IUtxoEntry")] pub type IUtxoEntry; + /// WASM type representing an array of UTXO entries (i.e. `IUtxoEntry[]`) #[wasm_bindgen(typescript_type = "IUtxoEntry[]")] pub type IUtxoEntryArray; } +/// A UTXO entry Id is a unique identifier for a UTXO entry defined by the `txid+output_index`. pub type UtxoEntryId = TransactionOutpointInner; +/// [`UtxoEntry`] struct represents a client-side UTXO entry. +/// /// @category Wallet SDK #[derive(Clone, Debug, Serialize, Deserialize, CastFromJs)] #[serde(rename_all = "camelCase")] @@ -119,6 +135,8 @@ impl From<&UtxoEntry> for cctx::UtxoEntry { } } +/// [`Arc`] reference to a [`UtxoEntry`] used by the wallet subsystems. +/// /// @category Wallet SDK #[derive(Clone, Debug, Serialize, Deserialize, CastFromJs)] #[wasm_bindgen(inspectable)] @@ -251,6 +269,7 @@ impl PartialOrd for UtxoEntryReference { } } +/// An extension trait to convert a JS value into a vec of UTXO entry references. pub trait TryIntoUtxoEntryReferences { fn try_into_utxo_entry_references(&self) -> Result>; } diff --git a/consensus/core/src/api/mod.rs b/consensus/core/src/api/mod.rs index 91165b73db..365b8404c1 100644 --- a/consensus/core/src/api/mod.rs +++ b/consensus/core/src/api/mod.rs @@ -39,7 +39,7 @@ pub struct BlockValidationFutures { /// A future triggered when DAG state which included this block has been processed by the virtual processor /// (exceptions are header-only blocks and trusted blocks which have the future completed before virtual - /// processing along with the [`block_task`]) + /// processing along with the `block_task`) pub virtual_state_task: BlockValidationFuture, } diff --git a/consensus/core/src/config/bps.rs b/consensus/core/src/config/bps.rs index c0c52a6dfd..5e98aac5df 100644 --- a/consensus/core/src/config/bps.rs +++ b/consensus/core/src/config/bps.rs @@ -33,7 +33,7 @@ impl Bps { } /// Returns the GHOSTDAG K value which was pre-computed for this BPS - /// (see [`calculate_ghostdag_k`] and [`gen_ghostdag_table`] for the full calculation) + /// (see [`calculate_ghostdag_k`] and `gen_ghostdag_table` for the full calculation) #[rustfmt::skip] pub const fn ghostdag_k() -> KType { match BPS { diff --git a/consensus/core/src/lib.rs b/consensus/core/src/lib.rs index 46ad3f2cea..188b2403b4 100644 --- a/consensus/core/src/lib.rs +++ b/consensus/core/src/lib.rs @@ -1,3 +1,9 @@ +//! +//! # Consensus Core +//! +//! This crate implements primitives used in the Kaspa node consensus processing. +//! + extern crate alloc; extern crate core; extern crate self as consensus_core; diff --git a/consensus/core/src/network.rs b/consensus/core/src/network.rs index d5e9abd244..18e52eacbf 100644 --- a/consensus/core/src/network.rs +++ b/consensus/core/src/network.rs @@ -1,3 +1,16 @@ +//! +//! # Network Types +//! +//! This module implements [`NetworkType`] (such as `mainnet`, `testnet`, `devnet`, and `simnet`) +//! and [`NetworkId`] that combines a network type with an optional numerical suffix. +//! +//! The suffix is used to differentiate between multiple networks of the same type and is used +//! explicitly with `testnet` networks, allowing declaration of testnet versions such as +//! `testnet-10`, `testnet-11`, etc. +//! + +#![allow(non_snake_case)] + use borsh::{BorshDeserialize, BorshSerialize}; use kaspa_addresses::Prefix; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; diff --git a/consensus/core/src/tx.rs b/consensus/core/src/tx.rs index bad1b679a2..a4dd7dd45b 100644 --- a/consensus/core/src/tx.rs +++ b/consensus/core/src/tx.rs @@ -1,3 +1,11 @@ +//! +//! # Transaction +//! +//! This module implements consensus [`Transaction`] structure and related types. +//! + +#![allow(non_snake_case)] + mod script_public_key; use borsh::{BorshDeserialize, BorshSerialize}; @@ -25,6 +33,7 @@ use crate::{ /// COINBASE_TRANSACTION_INDEX is the index of the coinbase transaction in every block pub const COINBASE_TRANSACTION_INDEX: usize = 0; +/// A 32-byte Kaspa transaction identifier. pub type TransactionId = kaspa_hashes::Hash; /// Holds details about an individual transaction output in a utxo diff --git a/consensus/src/processes/ghostdag/protocol.rs b/consensus/src/processes/ghostdag/protocol.rs index 87beeb565d..8dfe4e7937 100644 --- a/consensus/src/processes/ghostdag/protocol.rs +++ b/consensus/src/processes/ghostdag/protocol.rs @@ -91,7 +91,7 @@ impl pub fn ghostdag(&self, parents: &[Hash]) -> GhostdagData { assert!(!parents.is_empty(), "genesis must be added via a call to init"); diff --git a/consensus/src/processes/reachability/interval.rs b/consensus/src/processes/reachability/interval.rs index 9f8d7fbd09..b910f3ddf1 100644 --- a/consensus/src/processes/reachability/interval.rs +++ b/consensus/src/processes/reachability/interval.rs @@ -89,7 +89,7 @@ impl Interval { } /// Splits this interval to exactly |sizes| parts where - /// |part_i| = sizes[i]. This method expects sum(sizes) to be exactly + /// |part_i| = sizes\[i\]. This method expects sum(sizes) to be exactly /// equal to the interval's size. pub fn split_exact(&self, sizes: &[u64]) -> Vec { assert_eq!(sizes.iter().sum::(), self.size(), "sum of sizes must be equal to the interval's size"); @@ -107,7 +107,7 @@ impl Interval { /// Splits this interval to |sizes| parts /// by the allocation rule described below. This method expects sum(sizes) /// to be smaller or equal to the interval's size. Every part_i is - /// allocated at least sizes[i] capacity. The remaining budget is + /// allocated at least sizes\[i\] capacity. The remaining budget is /// split by an exponentially biased rule described below. /// /// This rule follows the GHOSTDAG protocol behavior where the child diff --git a/crypto/addresses/src/lib.rs b/crypto/addresses/src/lib.rs index 8aca863866..8e3ea385a8 100644 --- a/crypto/addresses/src/lib.rs +++ b/crypto/addresses/src/lib.rs @@ -1,3 +1,11 @@ +//! +//! Kaspa [`Address`] implementation. +//! +//! In it's string form, the Kaspa [`Address`] is represented by a `bech32`-encoded +//! address string combined with a network type. The `bech32` string encoding is +//! comprised of a public key, the public key version and the resulting checksum. +//! + use borsh::{BorshDeserialize, BorshSerialize}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use smallvec::SmallVec; @@ -11,6 +19,7 @@ use workflow_wasm::{ mod bech32; +/// Error type produced by [`Address`] operations. #[derive(Error, PartialEq, Eq, Debug, Clone)] pub enum AddressError { #[error("The address has an invalid prefix {0}")] @@ -190,7 +199,8 @@ pub const PAYLOAD_VECTOR_SIZE: usize = 36; /// Used as the underlying type for address payload, optimized for the largest version length (33). pub type PayloadVec = SmallVec<[u8; PAYLOAD_VECTOR_SIZE]>; -/// Kaspa `Address` struct that serializes to and from an address format string: `kaspa:qz0s...t8cv`. +/// Kaspa [`Address`] struct that serializes to and from an address format string: `kaspa:qz0s...t8cv`. +/// /// @category Address #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Hash, CastFromJs)] #[wasm_bindgen(inspectable)] @@ -516,12 +526,24 @@ impl TryCastFromJs for Address { #[wasm_bindgen] extern "C" { + /// WASM (TypeScript) type representing an Address-like object: `Address | string`. + /// + /// @category Address #[wasm_bindgen(extends = js_sys::Array, typescript_type = "Address | string")] pub type AddressT; + /// WASM (TypeScript) type representing an array of Address-like objects: `(Address | string)[]`. + /// + /// @category Address #[wasm_bindgen(extends = js_sys::Array, typescript_type = "(Address | string)[]")] pub type AddressOrStringArrayT; + /// WASM (TypeScript) type representing an array of [`Address`] objects: `Address[]`. + /// + /// @category Address #[wasm_bindgen(extends = js_sys::Array, typescript_type = "Address[]")] pub type AddressArrayT; + /// WASM (TypeScript) type representing an [`Address`] or an undefined value: `Address | undefined`. + /// + /// @category Address #[wasm_bindgen(typescript_type = "Address | undefined")] pub type AddressOrUndefinedT; } diff --git a/crypto/txscript/src/opcodes/mod.rs b/crypto/txscript/src/opcodes/mod.rs index 4406bd5b6d..ad800d2488 100644 --- a/crypto/txscript/src/opcodes/mod.rs +++ b/crypto/txscript/src/opcodes/mod.rs @@ -2747,7 +2747,7 @@ mod test { (1u64, vec![], false), // Case 1: 0 = locktime < txLockTime (0x800000, vec![0x7f, 0, 0], false), // Case 2: 0 < locktime < txLockTime (0x800000, vec![0x7f, 0, 0, 0, 0, 0, 0, 0, 0], true), // Case 3: locktime too big - (LOCK_TIME_THRESHOLD * 2, vec![0x7f, 0, 0, 0], true), // Case 4: lock times are inconsistant + (LOCK_TIME_THRESHOLD * 2, vec![0x7f, 0, 0, 0], true), // Case 4: lock times are inconsistent ] { let mut tx = base_tx.clone(); tx.0.lock_time = tx_lock_time; diff --git a/indexes/utxoindex/src/core/errors.rs b/indexes/utxoindex/src/core/errors.rs index 61aa877ab8..0e09989055 100644 --- a/indexes/utxoindex/src/core/errors.rs +++ b/indexes/utxoindex/src/core/errors.rs @@ -4,7 +4,7 @@ use thiserror::Error; use crate::IDENT; use kaspa_database::prelude::StoreError; -/// Errors originating from the [`UtxoIndex`]. +/// Errors originating from the [`UtxoIndex`](crate::UtxoIndex). #[derive(Error, Debug)] pub enum UtxoIndexError { #[error("[{IDENT}]: {0}")] @@ -14,5 +14,5 @@ pub enum UtxoIndexError { DBResetError(#[from] io::Error), } -/// Results originating from the [`UtxoIndex`]. +/// Results originating from the [`UtxoIndex`](crate::UtxoIndex). pub type UtxoIndexResult = Result; diff --git a/indexes/utxoindex/src/index.rs b/indexes/utxoindex/src/index.rs index b71935afa2..3b1bf2fe9d 100644 --- a/indexes/utxoindex/src/index.rs +++ b/indexes/utxoindex/src/index.rs @@ -21,7 +21,8 @@ use std::{ const RESYNC_CHUNK_SIZE: usize = 2048; //Increased from 1k (used in go-kaspad), for quicker resets, while still having a low memory footprint. -/// UtxoIndex indexes [`CompactUtxoEntryCollections`] by [`ScriptPublicKey`], commits them to its owns store, and emits changes. +/// UtxoIndex indexes `CompactUtxoEntryCollections` by [`ScriptPublicKey`](kaspa_consensus_core::tx::ScriptPublicKey), +/// commits them to its owns store, and emits changes. /// Note: The UtxoIndex struct by itself is not thread save, only correct usage of the supplied RwLock via `new` makes it so. /// please follow guidelines found in the comments under `utxoindex::core::api::UtxoIndexApi` for proper thread safety. pub struct UtxoIndex { @@ -131,7 +132,7 @@ impl UtxoIndexApi for UtxoIndex { /// Deletes and reinstates the utxoindex database, syncing it from scratch via the consensus database. /// /// **Notes:** - /// 1) There is an implicit expectation that the consensus store must have [VirtualParent] tips. i.e. consensus database must be initiated. + /// 1) There is an implicit expectation that the consensus store must have VirtualParent tips. i.e. consensus database must be initiated. /// 2) resyncing while consensus notifies of utxo differences, may result in a corrupted db. fn resync(&mut self) -> UtxoIndexResult<()> { info!("Resyncing the utxoindex..."); diff --git a/indexes/utxoindex/src/update_container.rs b/indexes/utxoindex/src/update_container.rs index 8555a02d41..96449dbffe 100644 --- a/indexes/utxoindex/src/update_container.rs +++ b/indexes/utxoindex/src/update_container.rs @@ -25,7 +25,7 @@ impl UtxoIndexChanges { } } - /// Add a [`UtxoDiff`] the the [`UtxoIndexChanges`] struct. + /// Add a [`UtxoDiff`] the [`UtxoIndexChanges`] struct. pub fn update_utxo_diff(&mut self, utxo_diff: UtxoDiff) { let (to_add, mut to_remove) = (utxo_diff.add, utxo_diff.remove); @@ -53,7 +53,7 @@ impl UtxoIndexChanges { } } - /// Add a [`Vec<(TransactionOutpoint, UtxoEntry)>`] the the [`UtxoIndexChanges`] struct + /// Add a [`Vec<(TransactionOutpoint, UtxoEntry)>`] the [`UtxoIndexChanges`] struct /// /// Note: This is meant to be used when resyncing. pub fn add_utxos_from_vector(&mut self, utxo_vector: Vec<(TransactionOutpoint, UtxoEntry)>) { diff --git a/metrics/core/src/data.rs b/metrics/core/src/data.rs index d47941aa7c..ce9dc72161 100644 --- a/metrics/core/src/data.rs +++ b/metrics/core/src/data.rs @@ -252,65 +252,6 @@ pub enum Metric { } impl Metric { - // TODO - this will be refactored at a later date - // as this requires changes and testing in /kos - // pub fn group(&self) -> &'static str { - // match self { - // Metric::NodeCpuUsage - // | Metric::NodeResidentSetSizeBytes - // | Metric::NodeVirtualMemorySizeBytes - // | Metric::NodeFileHandlesCount - // | Metric::NodeDiskIoReadBytes - // | Metric::NodeDiskIoWriteBytes - // | Metric::NodeDiskIoReadPerSec - // | Metric::NodeDiskIoWritePerSec - // | Metric::NodeBorshLiveConnections - // | Metric::NodeBorshConnectionAttempts - // | Metric::NodeBorshHandshakeFailures - // | Metric::NodeJsonLiveConnections - // | Metric::NodeJsonConnectionAttempts - // | Metric::NodeJsonHandshakeFailures - // | Metric::NodeBorshBytesTx - // | Metric::NodeBorshBytesRx - // | Metric::NodeJsonBytesTx - // | Metric::NodeJsonBytesRx - // | Metric::NodeP2pBytesTx - // | Metric::NodeP2pBytesRx - // | Metric::NodeGrpcUserBytesTx - // | Metric::NodeGrpcUserBytesRx - // | Metric::NodeTotalBytesTx - // | Metric::NodeTotalBytesRx - // | Metric::NodeBorshBytesTxPerSecond - // | Metric::NodeBorshBytesRxPerSecond - // | Metric::NodeJsonBytesTxPerSecond - // | Metric::NodeJsonBytesRxPerSecond - // | Metric::NodeP2pBytesTxPerSecond - // | Metric::NodeP2pBytesRxPerSecond - // | Metric::NodeGrpcUserBytesTxPerSecond - // | Metric::NodeGrpcUserBytesRxPerSecond - // | Metric::NodeTotalBytesTxPerSecond - // | Metric::NodeTotalBytesRxPerSecond - // | Metric::NodeActivePeers => "system", - // // -- - // Metric::NodeBlocksSubmittedCount - // | Metric::NodeHeadersProcessedCount - // | Metric::NodeDependenciesProcessedCount - // | Metric::NodeBodiesProcessedCount - // | Metric::NodeTransactionsProcessedCount - // | Metric::NodeChainBlocksProcessedCount - // | Metric::NodeMassProcessedCount - // | Metric::NodeDatabaseBlocksCount - // | Metric::NodeDatabaseHeadersCount - // | Metric::NetworkMempoolSize - // | Metric::NetworkTransactionsPerSecond - // | Metric::NetworkTipHashesCount - // | Metric::NetworkDifficulty - // | Metric::NetworkPastMedianTime - // | Metric::NetworkVirtualParentHashesCount - // | Metric::NetworkVirtualDaaScore => "kaspa", - // } - // } - pub fn is_key_performance_metric(&self) -> bool { matches!( self, diff --git a/mining/src/feerate/fee_estimation.ipynb b/mining/src/feerate/fee_estimation.ipynb index 694f47450c..a8b8fbfc89 100644 --- a/mining/src/feerate/fee_estimation.ipynb +++ b/mining/src/feerate/fee_estimation.ipynb @@ -252,7 +252,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXoAAAD8CAYAAAB5Pm/hAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAHVlJREFUeJzt3XuQnHW95/H3t+8990lmMplkQhIwoIAJlxj14PFwRNR4OcA5LoVb60HLLU4VeNTaU7Wl7JaHtWTX2rPKrq5yFgTFEnVTikcUvLABBY5ASBASyIUk5DKT20xuc0vm0tPf/aOfCZNkkpnMdOeZfvrzqup6nufXTz/9bS6f3zO//j1Pm7sjIiLRFQu7ABERKS0FvYhIxCnoRUQiTkEvIhJxCnoRkYhT0IuIRJyCXkQk4hT0IiIRp6AXEYm4RNgFADQ1NfmiRYvCLkNEpKysW7fuoLs3T7TfjAj6RYsWsXbt2rDLEBEpK2a2azL7aehGRCTiFPQiIhGnoBcRiTgFvYhIxCnoRUQiTkEvIhJxCnoRkYgr66Dfsr+Xf/rtZo4eGwq7FBGRGausg37XoX6+/dR22g8fD7sUEZEZa8KgN7MFZvaUmW0ys9fM7PNB+11mtsfMXg4eHx7zmi+Z2TYz22JmHyxV8S11GQAO9AyU6i1ERMreZG6BkAP+wd1fMrNaYJ2ZPRE8d4+7/4+xO5vZpcAtwGXAPOD/mdnF7j5SzMJhTND3KuhFRM5kwjN6d9/n7i8F673AJmD+WV5yA/ATdx909x3ANmBFMYo9VVNNCgMO9AyW4vAiIpFwTmP0ZrYIuBJ4IWj6rJmtN7MHzawxaJsPtI95WQdn7ximLBGP0VCVpFNDNyIiZzTpoDezGuBnwBfcvQe4F7gIuALYB3x9dNdxXu7jHO82M1trZmu7urrOufBR9dmkxuhFRM5iUkFvZkkKIf+wuz8C4O4H3H3E3fPA/bw5PNMBLBjz8jZg76nHdPf73H25uy9vbp7wdspnVJ9Nsl9BLyJyRpOZdWPAA8Amd//GmPbWMbvdBLwarD8K3GJmaTNbDCwB1hSv5JM1VCU1Ri8ichaTmXVzDfBJYIOZvRy03Ql8wsyuoDAssxP4OwB3f83MVgEbKczYuaMUM25GNVSlONw/xFAuTypR1pcFiIiUxIRB7+7PMv64++Nnec3dwN3TqGvSGqqSAHT1DTK/IXs+3lJEpKyU/SlwYxD0+kJWRGR8ZR/0DdkUgKZYioicQfkH/Ykzen0hKyIynrIP+ppMgphp6EZE5EzKPuhjZtSkEzqjFxE5g7IPeoDqdIJO3dhMRGRckQj6qlScfUcV9CIi44lE0FenE7oNgojIGUQi6GvSCfoGcxwbyoVdiojIjBOJoK/NFC7w3avhGxGR00Qj6NOFufR7j+q3Y0VEThWNoD9xRq+gFxE5VSSCvjqdwFDQi4iMJxJBH48ZtZkEe7s1Ri8icqpIBD0UboWgM3oRkdNFJ+hTCfYcUdCLiJwqOkGfSbCvewD3036HXESkokUm6GszSYZG8hzqHwq7FBGRGSVCQa8pliIi44lO0Kd1dayIyHiiE/QZXR0rIjKeyAR9JhkjGTcFvYjIKSIT9GZGXSbJPl00JSJyksgEPUB1Ok7HkWNhlyEiMqNEKuhrM0k6dNGUiMhJIhX0ddkkh/qH9AMkIiJjRCro64OZNzqrFxF5U7SCPlsI+t2HNE4vIjIqUkFfly1cNNWuL2RFRE6IVNBnk3FSiRi7DyvoRURGTRj0ZrbAzJ4ys01m9pqZfT5on2VmT5jZ1mDZGLSbmX3TzLaZ2Xozu6rUH2JMrdRnk7Qr6EVETpjMGX0O+Ad3fxvwLuAOM7sU+CKw2t2XAKuDbYCVwJLgcRtwb9GrPovadEJn9CIiY0wY9O6+z91fCtZ7gU3AfOAG4KFgt4eAG4P1G4AfeMHzQIOZtRa98jOoyybZffiY7ksvIhI4pzF6M1sEXAm8ALS4+z4odAbAnGC3+UD7mJd1BG2nHus2M1trZmu7urrOvfIzqM8mGRjWfelFREZNOujNrAb4GfAFd+85267jtJ12eu3u97n7cndf3tzcPNkyJjQ680bDNyIiBZMKejNLUgj5h939kaD5wOiQTLDsDNo7gAVjXt4G7C1OuRMbvWhKX8iKiBRMZtaNAQ8Am9z9G2OeehS4NVi/FfjFmPa/DWbfvAvoHh3iOR/qsgp6EZGxEpPY5xrgk8AGM3s5aLsT+Bqwysw+A+wG/k3w3OPAh4FtwDHg00WteALJeIwazbwRETlhwqB392cZf9wd4Lpx9nfgjmnWNS11mQS7dBsEEREgYlfGjqqvSvLGwf6wyxARmREiGfSNVSm6egfpH9TtikVEIhn0DVWFL2R36KxeRCSaQd9YlQIU9CIiENGgb8jqjF5EZFQkgz4Rj1GfTSroRUSIaNBD4Z43b3T1hV2GiEjoIhv0DdnCFEvdxVJEKl10g74qSe9AjsO6i6WIVLjIBr1m3oiIFEQ26Efn0usKWRGpdJEN+rpMkriZzuhFpOJFNuhjMaOhKsm2Ts28EZHKFtmgB2isTrFlf2/YZYiIhCrSQT+7OkX74WMMDI+EXYqISGgiHfQfHHmaZ1KfI333bLjncli/KuySRETOu8n8wlRZuqTz17y/6+ukYoOFhu52+OXnCutLbw6vMBGR8yyyZ/Tv2f0dUj54cuPwcVj9lXAKEhEJSWSDvnbwwPhPdHec30JEREIW2aDvTbeM/0R92/ktREQkZJEN+mcvuJ3hWObkxmQWrvtyOAWJiIQksl/GbpmzEoB37vjfNA53kaudR+oDd+mLWBGpOJENeiiE/R+r3scPX9jNPTcu46alGrYRkcoT2aGbUQ1VKeJmbNmvWyGISGWKfNDHY8bsmhSb9/eEXYqISCgiH/QAs2tSvLqnO+wyRERCURFB31yT5mDfEJ29A2GXIiJy3lVG0NemAdi0T3eyFJHKUxlBX1MI+tf2avhGRCrPhEFvZg+aWaeZvTqm7S4z22NmLwePD4957ktmts3MtpjZB0tV+LlIJ+M0ZJNs3KsvZEWk8kzmjP77wIfGab/H3a8IHo8DmNmlwC3AZcFrvmNm8WIVOx2za1K8pqAXkQo0YdC7+9PA4Uke7wbgJ+4+6O47gG3AimnUVzTNNWl2HuynfzAXdikiIufVdMboP2tm64OhncagbT7QPmafjqDtNGZ2m5mtNbO1XV1d0yhjcppr0ziwWT8tKCIVZqpBfy9wEXAFsA/4etBu4+zr4x3A3e9z9+Xuvry5uXmKZUze6Mybjfs0fCMilWVKQe/uB9x9xN3zwP28OTzTASwYs2sbsHd6JRZHTTpBVSrOho6jYZciInJeTSnozax1zOZNwOiMnEeBW8wsbWaLgSXAmumVWBxmxpzaNC+3K+hFpLJMePdKM/sxcC3QZGYdwD8C15rZFRSGZXYCfwfg7q+Z2SpgI5AD7nD3kdKUfu5a6jKs2XGYvsEcNelI37hTROSECdPO3T8xTvMDZ9n/buDu6RRVKnPrMjjw6p5u3nXh7LDLERE5LyriythRLXWFX5x6RcM3IlJBKiros6k4jVVJjdOLSEWpqKCHwjTLPynoRaSCVFzQz63LsL97gM4e3bJYRCpDxQX96Di9hm9EpFJUXNDPqU0TMwW9iFSOigv6RDzGnNoML+6c7H3aRETKW8UFPcC8hgwvtx9lYHjGXMslIlIyFRr0WYZHnPUd+sUpEYm+ig16QMM3IlIRKjLos8k4TTUp1uxQ0ItI9FVk0APMrc+wbtcRRvLj3i5fRCQyKjbo5zdk6RvMsUk/RCIiEVexQa9xehGpFBUb9HWZJPXZJM9tPxR2KSIiJVWxQQ/Q1pjlj9sPkRvJh12KiEjJVHTQXzCrir7BHBv2aD69iERXRQd9W2NhnP7ZrQdDrkREpHQqOuirUgnm1KV5dpuCXkSiq6KDHmBBQxXrdh3h2FAu7FJEREpCQT8rSy7vvKCrZEUkoio+6Oc3ZEnETOP0IhJZFR/0iXiM+Q1ZntzcGXYpIiIlUfFBD7CoqZodB/vZcbA/7FJERIpOQQ9c2FQNwOpNB0KuRESk+BT0QF02SXNNmtWbNHwjItGjoA8snF3Fmp2H6T4+HHYpIiJFpaAPLG6qZiTvPP16V9iliIgUlYI+MLc+Q1UqrnF6EYmcCYPezB40s04ze3VM2ywze8LMtgbLxqDdzOybZrbNzNab2VWlLL6YYmYsml3NE5sOMJgbCbscEZGimcwZ/feBD53S9kVgtbsvAVYH2wArgSXB4zbg3uKUeX4saamhf3CEZ17XxVMiEh0TBr27Pw2cen+AG4CHgvWHgBvHtP/AC54HGsystVjFltqCxiqyyTiPbdgXdikiIkUz1TH6FnffBxAs5wTt84H2Mft1BG2nMbPbzGytma3t6poZX4DGY8bipmqe2HiAgWEN34hINBT7y1gbp83H29Hd73P35e6+vLm5uchlTN3FLTX0DeZ4Rve+EZGImGrQHxgdkgmWo1cadQALxuzXBuydennnX1tjFR9P/ZGrH/lzuKsB7rkc1q8KuywRkSmbatA/CtwarN8K/GJM+98Gs2/eBXSPDvGUi0sP/oavxu5nVu4A4NDdDr/8nMJeRMrWZKZX/hh4DrjEzDrM7DPA14DrzWwrcH2wDfA48AawDbgfuL0kVZfQe3Z/hwyDJzcOH4fVXwmnIBGRaUpMtIO7f+IMT103zr4O3DHdosJUO3iGC6a6O85vISIiRaIrY0/Rm24Z/4n6tvNbiIhIkSjoT/HsBbczHMuc3JjMwnVfDqcgEZFpmnDoptJsmbMSKIzV1wweoNOaaPnYf8WW3hxyZSIiU6OgH8eWOSvZMmclm/b18LuNB3i46p1cE3ZRIiJTpKGbs1gyp4aqVJwfPLcz7FJERKZMQX8WiXiMt7XW8cTGA+w9ejzsckREpkRBP4Gl8+txhx+9sDvsUkREpkRBP4G6bJLFTdX8aM1u3adeRMqSgn4SlrbVc7h/iF+9UlZ3cxARART0k3LBrCqaalL88x+2k8+PezNOEZEZS0E/CWbG1Rc0srWzj6e2dE78AhGRGURBP0lLWmqpzya59/fbwy5FROScKOgnKR4zrljQwNpdR3hx56m/rCgiMnMp6M/BZfPqqErF+dbqrWGXIiIyaQr6c5CMx7jqgkae3nqQNTt0Vi8i5UFBf46WttVTk07wT7/dTOH2+yIiM5uC/hwl4zGWL2rkxZ1HeFo/IC4iZUBBPwWXz6unPpvkv/9ms+bVi8iMp6CfgnjMeOfiWby2t4dH/rQn7HJERM5KQT9Fb51bS2t9hq/9ehN9g7mwyxEROSMF/RSZGe9d0szBviG+9aSmW4rIzKWgn4a59Rkuba3lgWd2sONgf9jliIiMS0E/TX92URPxmPGln63XdEsRmZEU9NNUnU7wnrc08fyOw/zkxfawyxEROY2Cvggum1fHgsYsdz+2if3dA2GXIyJyEgV9EZgZ73vrHAZzI9z58w0awhGRGUVBXyQNVSnefeFsntzcyQ/1+7IiMoMo6IvoigUNLJpdxVd/tZEt+3vDLkdEBFDQF5WZ8f63tZCIG3//45cYGNaPiYtI+KYV9Ga208w2mNnLZrY2aJtlZk+Y2dZg2VicUstDdTrB9W9r4fUDffznf3lV4/UiErpinNH/pbtf4e7Lg+0vAqvdfQmwOtiuKAtnV7Ni8Sx+uq6DHzy3K+xyRKTClWLo5gbgoWD9IeDGErzHjPeuxbO4sKmar/xyI89tPxR2OSJSwaYb9A78zszWmdltQVuLu+8DCJZzpvkeZcnM+MBlLTRUJbn94XW6RYKIhGa6QX+Nu18FrATuMLP3TvaFZnabma01s7VdXV3TLGNmSififGRpK4O5PJ984AU6e3UxlYicf9MKenffGyw7gZ8DK4ADZtYKECw7z/Da+9x9ubsvb25unk4ZM1pjVYqPLZtHZ88gtz64ht6B4bBLEpEKM+WgN7NqM6sdXQc+ALwKPArcGux2K/CL6RZZ7ubWZfjw2+eyZX8v//6htRwb0v3rReT8mc4ZfQvwrJm9AqwBHnP33wBfA643s63A9cF2xVs4u5oPXDqXNTsP86kHX6RfP1YiIudJYqovdPc3gGXjtB8CrptOUVF1ydxaAH67cT+f+t4avv/pFVSnp/yvQERkUnRl7Hl2ydxaPnTZXNbtOsK/vf95DvUNhl2SiEScgj4EF7fU8uG3t/La3h7++jt/ZNchTb0UkdJR0IfkouYa/vqq+XT1DXLjt/+VHU99D+65HO5qKCzXrwq7RBGJCA0Qh6i1PsvHr2pj+OX/S8vv/xlsqPBEdzv88nOF9aU3h1egiESCzuhD1lid4oupVVSNhvyo4eOw+ivhFCUikaKgnwHqhg6M/0R3x/ktREQiSUE/A/SmW8Zt78vM1W2ORWTaFPQzwLMX3M5wLHNS2wBp7uy5iU9970XaDx8LqTIRiQIF/QywZc5KnrjoTnrSc3GMnvRcVi/5T/S85Sae236I6+/5A//nD9sZHsmHXaqIlCHNupkhtsxZyZY5K09qWwZc2FzNH17v4r/9ejOP/GkPd33sMt590exwihSRsqQz+hmuNpPko0vn8dGlrew7epxP3P88n3noRbZ16sfHRWRydEZfJi5qrmHhrCpebj/Ks1sP8sHNz3DzOxZwx19eRFtjVdjlicgMpqAvI4l4jOWLZnHpvDrW7DjMqhfbWbW2nb+5aj63X/sWFjVVh12iiMxACvoyVJVKcO0lc7h6YSPrdh3hkZf28NN1HXx06Tw+fc0irrygMewSRWQGUdCXsdpMkmsvmcM7Fs3ipd1H+O1r+3n0lb0snV/Pp65ZxEeWtpJOxMMuU0RCZjPhgpzly5f72rVrp/TaJzcf4JX27iJXVJ6Gcnk27eth/Z5uDvcP0ViV5KYr2/j41W1cOq8u7PJEpMjMbJ27L59oP53RR0gqEWPZggaWttWz+/AxXt3bw0PP7eTBf93BW+fW8vGr2/irZfOYU5eZ8FgiEh0K+ggyMxbOrmbh7GqOD4/w+v5eNu/v5auPbeLuxzZx1cJGVl4+lw9eNpcFszRjRyTqFPQRl03GWbaggWULGjjcP8S2zj62d/Xx1cc28dXHNnH5vDre97YW/uLiZpa11ZOI69IKkahR0FeQWdUpViyexYrFszh6bIjtXf1s7+rjW09u5Zurt1KbSfDnS5r4i4ub+bOLmmhrzGJmYZctItOkoK9QDVUprl6Y4uqFjQwMj7D78DF2HTrGM68f5PEN+wGYW5dhxeJZvGPxLFYsmsWSOTXEYgp+kXKjoBcyyTgXt9RycUst7s6h/iH2HDnOnqPHeWpzJ4++sheA+mySZW31LG1r4O1t9Sxtq2duXUZn/SIznIJeTmJmNNWkaapJs2xBA+5Oz0COPUePs/focTbv7+XZbQfJB7NyZ1WnWNZWz+Xz61nSUsslLbUsbqomldBYv8hMoaCXszIz6rNJ6rNJLm0tzMXPjeTp6huks2eQA70DbNjTzR9e7zoR/vGYsbipmkuCvxLeMqeGRU1VLJxdTU1a/8mJnG/6v07OWSIeo7U+S2t99kRbbiTPkWPDHOof5HD/EIf6hvjj9oM8vmEfYy/Jm12dYnFTYernotlVLGyqZuGsKlobMjRVp/UdgEgJKOilKBLxGM21aZpr0ye1D4/kOXpsmKPHhjh6fJju48Ps7xng9QO99AzkTto3GTda67PMa8gwryHLvPos8xqytDZkaK3P0FyTprEqpc5A5Bwp6KWkkmfoAKDQCXQH4d83kKN3MEfvwDDth4+xcW8PfYO5E8NBo+JmzK5JnThmc01h2TRm2VidpLEqRUNVUvf6EUFBLyFKxmMnvvgdTz7v9A/l6B3I0TeY49jQCMeGcvQPjtA/mONQ3xAvDR0Zt0MYlU3GaagqBH9jdZKGqhSNwXZ9trBdm0kUHukktZkENcG2OgmJCgW9zFixmFGbSVKbSZ51P3dnYDhf6ASGRhgYHn3kT6wfG8px5NgQg7leBoZHOD40wkS380vFY1Sn49RmktRlT+4IatIJsqk41akEVak42VS8sEwWtt9sG/N8Mq4rjyUUJQt6M/sQ8L+AOPBdd/9aqd5LKpuZkQ3CdLK/puvuDOYKHcFQLs9gLs/QSJ6hXP7N7VyewZHC830DOQ73DzGc8zf3G8kzcqY/Jc4gGTeyyUKt6UScdCJGJhknm4yTTsZIJ2In2gvbwXoiRjo5Zj1x+v7JRIxEzEjGY6TGW0/ESMUL6/GY6fqHMKxfBau/At0dUN8G130Zlt5c8rctSdCbWRz4NnA90AG8aGaPuvvGUryfyLkyMzLJOJnk9IZnRvJOLp9neMQZHsmTC5aFh5MLlsP509ty+UJH0TMwzJFjQ+TdGcl7cExnZKSwHD1+MRmFobNEvNAZJONGIhYjmbBCZxAvdAqjncToMh6LEY9BIhYjHjMSMSMWLE/ffrNTiY95/uT12ATHKNQVixW+n4nFjJgZMYOYjXZYhSm9Y9tPPGKMux43w4LtuBWOMXq8mFGaTnD9Kvjl52D4eGG7u72wDSUP+1Kd0a8Atrn7GwBm9hPgBkBBL5FSCK04pb48wN3JOyc6h9xohzDiJzqbvBc6nrw7+bwz4k4+T7D0U5Zvtp/oYIL3yOed4VyeweGRE/vmvbCfO4UHwb7BMdwhz+n75oNjlqOxHcBJHcaJtjc7hbi9uR4zTnRIxpttP+y9kxY/fvKbDB8vnOGXadDPB9rHbHcA7yzFG82uTrNYv5UqMmONdlLuflrHMdqxjAQdw9jOZ+x+I2M6mRMdDn7K9pj1Me95cnuwzuT2OdPxTnpfd/LB8cZ2hKM1jb6m2Q+O/w+ou6Pk/w5KFfTj/d1zUr9uZrcBtwFccMEFU36j0VvwiojMaPe0FYZrTlXfVvK3LtUUgA5gwZjtNmDv2B3c/T53X+7uy5ubm0tUhojIDHHdlyGZPbktmS20l1ipgv5FYImZLTazFHAL8GiJ3ktEZOZbejN87JtQvwCwwvJj3yzfWTfunjOzzwK/pTC98kF3f60U7yUiUjaW3nxegv1UJZsr4O6PA4+X6vgiIjI5ukxPRCTiFPQiIhGnoBcRiTgFvYhIxCnoRUQiTkEvIhJxCnoRkYgz9/BvLWdmXcCuKb68CTjD3YIiI+qfUZ+vvOnzhWehu094D5kZEfTTYWZr3X152HWUUtQ/oz5fedPnm/k0dCMiEnEKehGRiItC0N8XdgHnQdQ/oz5fedPnm+HKfoxeRETOLgpn9CIichZlG/Rm9qCZdZrZq2HXUgpmtsDMnjKzTWb2mpl9PuyaisnMMma2xsxeCT7ffwm7plIws7iZ/cnMfhV2LaVgZjvNbIOZvWxma8Oup9jMrMHMfmpmm4P/F98ddk1TUbZDN2b2XqAP+IG7Xx52PcVmZq1Aq7u/ZGa1wDrgRnffGHJpRWFmBlS7e5+ZJYFngc+7+/Mhl1ZUZvYfgOVAnbt/NOx6is3MdgLL3c/0y9flzcweAp5x9+8Gv5ZX5e5Hw67rXJXtGb27Pw0cDruOUnH3fe7+UrDeC2wC5odbVfF4QV+wmQwe5XnWcQZm1gZ8BPhu2LXIuTOzOuC9wAMA7j5UjiEPZRz0lcTMFgFXAi+EW0lxBcMaLwOdwBPuHqnPB/xP4D8C+bALKSEHfmdm68zstrCLKbILgS7ge8Hw23fNrDrsoqZCQT/DmVkN8DPgC+7eE3Y9xeTuI+5+BdAGrDCzyAzBmdlHgU53Xxd2LSV2jbtfBawE7giGVKMiAVwF3OvuVwL9wBfDLWlqFPQzWDB2/TPgYXd/JOx6SiX4c/j3wIdCLqWYrgH+KhjD/gnwPjP7YbglFZ+77w2WncDPgRXhVlRUHUDHmL80f0oh+MuOgn6GCr6sfADY5O7fCLueYjOzZjNrCNazwPuBzeFWVTzu/iV3b3P3RcAtwJPu/u9CLquozKw6mChAMKTxASAys+DcfT/QbmaXBE3XAWU5GSIRdgFTZWY/Bq4FmsysA/hHd38g3KqK6hrgk8CGYBwb4E53fzzEmoqpFXjIzOIUTjhWuXskpyBGWAvw88I5CQngR+7+m3BLKrq/Bx4OZty8AXw65HqmpGynV4qIyORo6EZEJOIU9CIiEaegFxGJOAW9iEjEKehFRCJOQS8iEnEKehGRiFPQi4hE3P8H1DStq24uP4EAAAAASUVORK5CYII=\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXoAAAD8CAYAAAB5Pm/hAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAHVlJREFUeJzt3XuQnHW95/H3t+8990lmMplkQhIwoIAJlxj14PFwRNR4OcA5LoVb60HLLU4VeNTaU7Wl7JaHtWTX2rPKrq5yFgTFEnVTikcUvLABBY5ASBASyIUk5DKT20xuc0vm0tPf/aOfCZNkkpnMdOeZfvrzqup6nufXTz/9bS6f3zO//j1Pm7sjIiLRFQu7ABERKS0FvYhIxCnoRUQiTkEvIhJxCnoRkYhT0IuIRJyCXkQk4hT0IiIRp6AXEYm4RNgFADQ1NfmiRYvCLkNEpKysW7fuoLs3T7TfjAj6RYsWsXbt2rDLEBEpK2a2azL7aehGRCTiFPQiIhGnoBcRiTgFvYhIxCnoRUQiTkEvIhJxCnoRkYgr66Dfsr+Xf/rtZo4eGwq7FBGRGausg37XoX6+/dR22g8fD7sUEZEZa8KgN7MFZvaUmW0ys9fM7PNB+11mtsfMXg4eHx7zmi+Z2TYz22JmHyxV8S11GQAO9AyU6i1ERMreZG6BkAP+wd1fMrNaYJ2ZPRE8d4+7/4+xO5vZpcAtwGXAPOD/mdnF7j5SzMJhTND3KuhFRM5kwjN6d9/n7i8F673AJmD+WV5yA/ATdx909x3ANmBFMYo9VVNNCgMO9AyW4vAiIpFwTmP0ZrYIuBJ4IWj6rJmtN7MHzawxaJsPtI95WQdn7ximLBGP0VCVpFNDNyIiZzTpoDezGuBnwBfcvQe4F7gIuALYB3x9dNdxXu7jHO82M1trZmu7urrOufBR9dmkxuhFRM5iUkFvZkkKIf+wuz8C4O4H3H3E3fPA/bw5PNMBLBjz8jZg76nHdPf73H25uy9vbp7wdspnVJ9Nsl9BLyJyRpOZdWPAA8Amd//GmPbWMbvdBLwarD8K3GJmaTNbDCwB1hSv5JM1VCU1Ri8ichaTmXVzDfBJYIOZvRy03Ql8wsyuoDAssxP4OwB3f83MVgEbKczYuaMUM25GNVSlONw/xFAuTypR1pcFiIiUxIRB7+7PMv64++Nnec3dwN3TqGvSGqqSAHT1DTK/IXs+3lJEpKyU/SlwYxD0+kJWRGR8ZR/0DdkUgKZYioicQfkH/Ykzen0hKyIynrIP+ppMgphp6EZE5EzKPuhjZtSkEzqjFxE5g7IPeoDqdIJO3dhMRGRckQj6qlScfUcV9CIi44lE0FenE7oNgojIGUQi6GvSCfoGcxwbyoVdiojIjBOJoK/NFC7w3avhGxGR00Qj6NOFufR7j+q3Y0VEThWNoD9xRq+gFxE5VSSCvjqdwFDQi4iMJxJBH48ZtZkEe7s1Ri8icqpIBD0UboWgM3oRkdNFJ+hTCfYcUdCLiJwqOkGfSbCvewD3036HXESkokUm6GszSYZG8hzqHwq7FBGRGSVCQa8pliIi44lO0Kd1dayIyHiiE/QZXR0rIjKeyAR9JhkjGTcFvYjIKSIT9GZGXSbJPl00JSJyksgEPUB1Ok7HkWNhlyEiMqNEKuhrM0k6dNGUiMhJIhX0ddkkh/qH9AMkIiJjRCro64OZNzqrFxF5U7SCPlsI+t2HNE4vIjIqUkFfly1cNNWuL2RFRE6IVNBnk3FSiRi7DyvoRURGTRj0ZrbAzJ4ys01m9pqZfT5on2VmT5jZ1mDZGLSbmX3TzLaZ2Xozu6rUH2JMrdRnk7Qr6EVETpjMGX0O+Ad3fxvwLuAOM7sU+CKw2t2XAKuDbYCVwJLgcRtwb9GrPovadEJn9CIiY0wY9O6+z91fCtZ7gU3AfOAG4KFgt4eAG4P1G4AfeMHzQIOZtRa98jOoyybZffiY7ksvIhI4pzF6M1sEXAm8ALS4+z4odAbAnGC3+UD7mJd1BG2nHus2M1trZmu7urrOvfIzqM8mGRjWfelFREZNOujNrAb4GfAFd+85267jtJ12eu3u97n7cndf3tzcPNkyJjQ680bDNyIiBZMKejNLUgj5h939kaD5wOiQTLDsDNo7gAVjXt4G7C1OuRMbvWhKX8iKiBRMZtaNAQ8Am9z9G2OeehS4NVi/FfjFmPa/DWbfvAvoHh3iOR/qsgp6EZGxEpPY5xrgk8AGM3s5aLsT+Bqwysw+A+wG/k3w3OPAh4FtwDHg00WteALJeIwazbwRETlhwqB392cZf9wd4Lpx9nfgjmnWNS11mQS7dBsEEREgYlfGjqqvSvLGwf6wyxARmREiGfSNVSm6egfpH9TtikVEIhn0DVWFL2R36KxeRCSaQd9YlQIU9CIiENGgb8jqjF5EZFQkgz4Rj1GfTSroRUSIaNBD4Z43b3T1hV2GiEjoIhv0DdnCFEvdxVJEKl10g74qSe9AjsO6i6WIVLjIBr1m3oiIFEQ26Efn0usKWRGpdJEN+rpMkriZzuhFpOJFNuhjMaOhKsm2Ts28EZHKFtmgB2isTrFlf2/YZYiIhCrSQT+7OkX74WMMDI+EXYqISGgiHfQfHHmaZ1KfI333bLjncli/KuySRETOu8n8wlRZuqTz17y/6+ukYoOFhu52+OXnCutLbw6vMBGR8yyyZ/Tv2f0dUj54cuPwcVj9lXAKEhEJSWSDvnbwwPhPdHec30JEREIW2aDvTbeM/0R92/ktREQkZJEN+mcvuJ3hWObkxmQWrvtyOAWJiIQksl/GbpmzEoB37vjfNA53kaudR+oDd+mLWBGpOJENeiiE/R+r3scPX9jNPTcu46alGrYRkcoT2aGbUQ1VKeJmbNmvWyGISGWKfNDHY8bsmhSb9/eEXYqISCgiH/QAs2tSvLqnO+wyRERCURFB31yT5mDfEJ29A2GXIiJy3lVG0NemAdi0T3eyFJHKUxlBX1MI+tf2avhGRCrPhEFvZg+aWaeZvTqm7S4z22NmLwePD4957ktmts3MtpjZB0tV+LlIJ+M0ZJNs3KsvZEWk8kzmjP77wIfGab/H3a8IHo8DmNmlwC3AZcFrvmNm8WIVOx2za1K8pqAXkQo0YdC7+9PA4Uke7wbgJ+4+6O47gG3AimnUVzTNNWl2HuynfzAXdikiIufVdMboP2tm64OhncagbT7QPmafjqDtNGZ2m5mtNbO1XV1d0yhjcppr0ziwWT8tKCIVZqpBfy9wEXAFsA/4etBu4+zr4x3A3e9z9+Xuvry5uXmKZUze6Mybjfs0fCMilWVKQe/uB9x9xN3zwP28OTzTASwYs2sbsHd6JRZHTTpBVSrOho6jYZciInJeTSnozax1zOZNwOiMnEeBW8wsbWaLgSXAmumVWBxmxpzaNC+3K+hFpLJMePdKM/sxcC3QZGYdwD8C15rZFRSGZXYCfwfg7q+Z2SpgI5AD7nD3kdKUfu5a6jKs2XGYvsEcNelI37hTROSECdPO3T8xTvMDZ9n/buDu6RRVKnPrMjjw6p5u3nXh7LDLERE5LyriythRLXWFX5x6RcM3IlJBKiros6k4jVVJjdOLSEWpqKCHwjTLPynoRaSCVFzQz63LsL97gM4e3bJYRCpDxQX96Di9hm9EpFJUXNDPqU0TMwW9iFSOigv6RDzGnNoML+6c7H3aRETKW8UFPcC8hgwvtx9lYHjGXMslIlIyFRr0WYZHnPUd+sUpEYm+ig16QMM3IlIRKjLos8k4TTUp1uxQ0ItI9FVk0APMrc+wbtcRRvLj3i5fRCQyKjbo5zdk6RvMsUk/RCIiEVexQa9xehGpFBUb9HWZJPXZJM9tPxR2KSIiJVWxQQ/Q1pjlj9sPkRvJh12KiEjJVHTQXzCrir7BHBv2aD69iERXRQd9W2NhnP7ZrQdDrkREpHQqOuirUgnm1KV5dpuCXkSiq6KDHmBBQxXrdh3h2FAu7FJEREpCQT8rSy7vvKCrZEUkoio+6Oc3ZEnETOP0IhJZFR/0iXiM+Q1ZntzcGXYpIiIlUfFBD7CoqZodB/vZcbA/7FJERIpOQQ9c2FQNwOpNB0KuRESk+BT0QF02SXNNmtWbNHwjItGjoA8snF3Fmp2H6T4+HHYpIiJFpaAPLG6qZiTvPP16V9iliIgUlYI+MLc+Q1UqrnF6EYmcCYPezB40s04ze3VM2ywze8LMtgbLxqDdzOybZrbNzNab2VWlLL6YYmYsml3NE5sOMJgbCbscEZGimcwZ/feBD53S9kVgtbsvAVYH2wArgSXB4zbg3uKUeX4saamhf3CEZ17XxVMiEh0TBr27Pw2cen+AG4CHgvWHgBvHtP/AC54HGsystVjFltqCxiqyyTiPbdgXdikiIkUz1TH6FnffBxAs5wTt84H2Mft1BG2nMbPbzGytma3t6poZX4DGY8bipmqe2HiAgWEN34hINBT7y1gbp83H29Hd73P35e6+vLm5uchlTN3FLTX0DeZ4Rve+EZGImGrQHxgdkgmWo1cadQALxuzXBuydennnX1tjFR9P/ZGrH/lzuKsB7rkc1q8KuywRkSmbatA/CtwarN8K/GJM+98Gs2/eBXSPDvGUi0sP/oavxu5nVu4A4NDdDr/8nMJeRMrWZKZX/hh4DrjEzDrM7DPA14DrzWwrcH2wDfA48AawDbgfuL0kVZfQe3Z/hwyDJzcOH4fVXwmnIBGRaUpMtIO7f+IMT103zr4O3DHdosJUO3iGC6a6O85vISIiRaIrY0/Rm24Z/4n6tvNbiIhIkSjoT/HsBbczHMuc3JjMwnVfDqcgEZFpmnDoptJsmbMSKIzV1wweoNOaaPnYf8WW3hxyZSIiU6OgH8eWOSvZMmclm/b18LuNB3i46p1cE3ZRIiJTpKGbs1gyp4aqVJwfPLcz7FJERKZMQX8WiXiMt7XW8cTGA+w9ejzsckREpkRBP4Gl8+txhx+9sDvsUkREpkRBP4G6bJLFTdX8aM1u3adeRMqSgn4SlrbVc7h/iF+9UlZ3cxARART0k3LBrCqaalL88x+2k8+PezNOEZEZS0E/CWbG1Rc0srWzj6e2dE78AhGRGURBP0lLWmqpzya59/fbwy5FROScKOgnKR4zrljQwNpdR3hx56m/rCgiMnMp6M/BZfPqqErF+dbqrWGXIiIyaQr6c5CMx7jqgkae3nqQNTt0Vi8i5UFBf46WttVTk07wT7/dTOH2+yIiM5uC/hwl4zGWL2rkxZ1HeFo/IC4iZUBBPwWXz6unPpvkv/9ms+bVi8iMp6CfgnjMeOfiWby2t4dH/rQn7HJERM5KQT9Fb51bS2t9hq/9ehN9g7mwyxEROSMF/RSZGe9d0szBviG+9aSmW4rIzKWgn4a59Rkuba3lgWd2sONgf9jliIiMS0E/TX92URPxmPGln63XdEsRmZEU9NNUnU7wnrc08fyOw/zkxfawyxEROY2Cvggum1fHgsYsdz+2if3dA2GXIyJyEgV9EZgZ73vrHAZzI9z58w0awhGRGUVBXyQNVSnefeFsntzcyQ/1+7IiMoMo6IvoigUNLJpdxVd/tZEt+3vDLkdEBFDQF5WZ8f63tZCIG3//45cYGNaPiYtI+KYV9Ga208w2mNnLZrY2aJtlZk+Y2dZg2VicUstDdTrB9W9r4fUDffznf3lV4/UiErpinNH/pbtf4e7Lg+0vAqvdfQmwOtiuKAtnV7Ni8Sx+uq6DHzy3K+xyRKTClWLo5gbgoWD9IeDGErzHjPeuxbO4sKmar/xyI89tPxR2OSJSwaYb9A78zszWmdltQVuLu+8DCJZzpvkeZcnM+MBlLTRUJbn94XW6RYKIhGa6QX+Nu18FrATuMLP3TvaFZnabma01s7VdXV3TLGNmSififGRpK4O5PJ984AU6e3UxlYicf9MKenffGyw7gZ8DK4ADZtYKECw7z/Da+9x9ubsvb25unk4ZM1pjVYqPLZtHZ88gtz64ht6B4bBLEpEKM+WgN7NqM6sdXQc+ALwKPArcGux2K/CL6RZZ7ubWZfjw2+eyZX8v//6htRwb0v3rReT8mc4ZfQvwrJm9AqwBHnP33wBfA643s63A9cF2xVs4u5oPXDqXNTsP86kHX6RfP1YiIudJYqovdPc3gGXjtB8CrptOUVF1ydxaAH67cT+f+t4avv/pFVSnp/yvQERkUnRl7Hl2ydxaPnTZXNbtOsK/vf95DvUNhl2SiEScgj4EF7fU8uG3t/La3h7++jt/ZNchTb0UkdJR0IfkouYa/vqq+XT1DXLjt/+VHU99D+65HO5qKCzXrwq7RBGJCA0Qh6i1PsvHr2pj+OX/S8vv/xlsqPBEdzv88nOF9aU3h1egiESCzuhD1lid4oupVVSNhvyo4eOw+ivhFCUikaKgnwHqhg6M/0R3x/ktREQiSUE/A/SmW8Zt78vM1W2ORWTaFPQzwLMX3M5wLHNS2wBp7uy5iU9970XaDx8LqTIRiQIF/QywZc5KnrjoTnrSc3GMnvRcVi/5T/S85Sae236I6+/5A//nD9sZHsmHXaqIlCHNupkhtsxZyZY5K09qWwZc2FzNH17v4r/9ejOP/GkPd33sMt590exwihSRsqQz+hmuNpPko0vn8dGlrew7epxP3P88n3noRbZ16sfHRWRydEZfJi5qrmHhrCpebj/Ks1sP8sHNz3DzOxZwx19eRFtjVdjlicgMpqAvI4l4jOWLZnHpvDrW7DjMqhfbWbW2nb+5aj63X/sWFjVVh12iiMxACvoyVJVKcO0lc7h6YSPrdh3hkZf28NN1HXx06Tw+fc0irrygMewSRWQGUdCXsdpMkmsvmcM7Fs3ipd1H+O1r+3n0lb0snV/Pp65ZxEeWtpJOxMMuU0RCZjPhgpzly5f72rVrp/TaJzcf4JX27iJXVJ6Gcnk27eth/Z5uDvcP0ViV5KYr2/j41W1cOq8u7PJEpMjMbJ27L59oP53RR0gqEWPZggaWttWz+/AxXt3bw0PP7eTBf93BW+fW8vGr2/irZfOYU5eZ8FgiEh0K+ggyMxbOrmbh7GqOD4/w+v5eNu/v5auPbeLuxzZx1cJGVl4+lw9eNpcFszRjRyTqFPQRl03GWbaggWULGjjcP8S2zj62d/Xx1cc28dXHNnH5vDre97YW/uLiZpa11ZOI69IKkahR0FeQWdUpViyexYrFszh6bIjtXf1s7+rjW09u5Zurt1KbSfDnS5r4i4ub+bOLmmhrzGJmYZctItOkoK9QDVUprl6Y4uqFjQwMj7D78DF2HTrGM68f5PEN+wGYW5dhxeJZvGPxLFYsmsWSOTXEYgp+kXKjoBcyyTgXt9RycUst7s6h/iH2HDnOnqPHeWpzJ4++sheA+mySZW31LG1r4O1t9Sxtq2duXUZn/SIznIJeTmJmNNWkaapJs2xBA+5Oz0COPUePs/focTbv7+XZbQfJB7NyZ1WnWNZWz+Xz61nSUsslLbUsbqomldBYv8hMoaCXszIz6rNJ6rNJLm0tzMXPjeTp6huks2eQA70DbNjTzR9e7zoR/vGYsbipmkuCvxLeMqeGRU1VLJxdTU1a/8mJnG/6v07OWSIeo7U+S2t99kRbbiTPkWPDHOof5HD/EIf6hvjj9oM8vmEfYy/Jm12dYnFTYernotlVLGyqZuGsKlobMjRVp/UdgEgJKOilKBLxGM21aZpr0ye1D4/kOXpsmKPHhjh6fJju48Ps7xng9QO99AzkTto3GTda67PMa8gwryHLvPos8xqytDZkaK3P0FyTprEqpc5A5Bwp6KWkkmfoAKDQCXQH4d83kKN3MEfvwDDth4+xcW8PfYO5E8NBo+JmzK5JnThmc01h2TRm2VidpLEqRUNVUvf6EUFBLyFKxmMnvvgdTz7v9A/l6B3I0TeY49jQCMeGcvQPjtA/mONQ3xAvDR0Zt0MYlU3GaagqBH9jdZKGqhSNwXZ9trBdm0kUHukktZkENcG2OgmJCgW9zFixmFGbSVKbSZ51P3dnYDhf6ASGRhgYHn3kT6wfG8px5NgQg7leBoZHOD40wkS380vFY1Sn49RmktRlT+4IatIJsqk41akEVak42VS8sEwWtt9sG/N8Mq4rjyUUJQt6M/sQ8L+AOPBdd/9aqd5LKpuZkQ3CdLK/puvuDOYKHcFQLs9gLs/QSJ6hXP7N7VyewZHC830DOQ73DzGc8zf3G8kzcqY/Jc4gGTeyyUKt6UScdCJGJhknm4yTTsZIJ2In2gvbwXoiRjo5Zj1x+v7JRIxEzEjGY6TGW0/ESMUL6/GY6fqHMKxfBau/At0dUN8G130Zlt5c8rctSdCbWRz4NnA90AG8aGaPuvvGUryfyLkyMzLJOJnk9IZnRvJOLp9neMQZHsmTC5aFh5MLlsP509ty+UJH0TMwzJFjQ+TdGcl7cExnZKSwHD1+MRmFobNEvNAZJONGIhYjmbBCZxAvdAqjncToMh6LEY9BIhYjHjMSMSMWLE/ffrNTiY95/uT12ATHKNQVixW+n4nFjJgZMYOYjXZYhSm9Y9tPPGKMux43w4LtuBWOMXq8mFGaTnD9Kvjl52D4eGG7u72wDSUP+1Kd0a8Atrn7GwBm9hPgBkBBL5FSCK04pb48wN3JOyc6h9xohzDiJzqbvBc6nrw7+bwz4k4+T7D0U5Zvtp/oYIL3yOed4VyeweGRE/vmvbCfO4UHwb7BMdwhz+n75oNjlqOxHcBJHcaJtjc7hbi9uR4zTnRIxpttP+y9kxY/fvKbDB8vnOGXadDPB9rHbHcA7yzFG82uTrNYv5UqMmONdlLuflrHMdqxjAQdw9jOZ+x+I2M6mRMdDn7K9pj1Me95cnuwzuT2OdPxTnpfd/LB8cZ2hKM1jb6m2Q+O/w+ou6Pk/w5KFfTj/d1zUr9uZrcBtwFccMEFU36j0VvwiojMaPe0FYZrTlXfVvK3LtUUgA5gwZjtNmDv2B3c/T53X+7uy5ubm0tUhojIDHHdlyGZPbktmS20l1ipgv5FYImZLTazFHAL8GiJ3ktEZOZbejN87JtQvwCwwvJj3yzfWTfunjOzzwK/pTC98kF3f60U7yUiUjaW3nxegv1UJZsr4O6PA4+X6vgiIjI5ukxPRCTiFPQiIhGnoBcRiTgFvYhIxCnoRUQiTkEvIhJxCnoRkYgz9/BvLWdmXcCuKb68CTjD3YIiI+qfUZ+vvOnzhWehu094D5kZEfTTYWZr3X152HWUUtQ/oz5fedPnm/k0dCMiEnEKehGRiItC0N8XdgHnQdQ/oz5fedPnm+HKfoxeRETOLgpn9CIichZlG/Rm9qCZdZrZq2HXUgpmtsDMnjKzTWb2mpl9PuyaisnMMma2xsxeCT7ffwm7plIws7iZ/cnMfhV2LaVgZjvNbIOZvWxma8Oup9jMrMHMfmpmm4P/F98ddk1TUbZDN2b2XqAP+IG7Xx52PcVmZq1Aq7u/ZGa1wDrgRnffGHJpRWFmBlS7e5+ZJYFngc+7+/Mhl1ZUZvYfgOVAnbt/NOx6is3MdgLL3c/0y9flzcweAp5x9+8Gv5ZX5e5Hw67rXJXtGb27Pw0cDruOUnH3fe7+UrDeC2wC5odbVfF4QV+wmQwe5XnWcQZm1gZ8BPhu2LXIuTOzOuC9wAMA7j5UjiEPZRz0lcTMFgFXAi+EW0lxBcMaLwOdwBPuHqnPB/xP4D8C+bALKSEHfmdm68zstrCLKbILgS7ge8Hw23fNrDrsoqZCQT/DmVkN8DPgC+7eE3Y9xeTuI+5+BdAGrDCzyAzBmdlHgU53Xxd2LSV2jbtfBawE7giGVKMiAVwF3OvuVwL9wBfDLWlqFPQzWDB2/TPgYXd/JOx6SiX4c/j3wIdCLqWYrgH+KhjD/gnwPjP7YbglFZ+77w2WncDPgRXhVlRUHUDHmL80f0oh+MuOgn6GCr6sfADY5O7fCLueYjOzZjNrCNazwPuBzeFWVTzu/iV3b3P3RcAtwJPu/u9CLquozKw6mChAMKTxASAys+DcfT/QbmaXBE3XAWU5GSIRdgFTZWY/Bq4FmsysA/hHd38g3KqK6hrgk8CGYBwb4E53fzzEmoqpFXjIzOIUTjhWuXskpyBGWAvw88I5CQngR+7+m3BLKrq/Bx4OZty8AXw65HqmpGynV4qIyORo6EZEJOIU9CIiEaegFxGJOAW9iEjEKehFRCJOQS8iEnEKehGRiFPQi4hE3P8H1DStq24uP4EAAAAASUVORK5CYII=", "text/plain": [ "
" ] @@ -303,7 +303,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXoAAAD8CAYAAAB5Pm/hAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAHW9JREFUeJzt3Xl0XGeZ5/HvU6WSVLKszVpiSbblJMZZyGLjOAHTaUgAk8CQhKUn0ECGgTZNBw6cYTJDaOYA5wwzORMCPX2gM52QNMlAJxMghEAHTAiBEAix5Wze4tiJN8mbbEebtZbqmT/qypZt2ZalKt/Srd/nHJ1776t7q57K8ruv3nrvvebuiIhIdMXCLkBERHJLQS8iEnEKehGRiFPQi4hEnIJeRCTiFPQiIhGnoBcRiTgFvYhIxCnoRUQirijsAgBqa2u9paUl7DJERKaVNWvW7Hf3ulPtlxdB39LSQmtra9hliIhMK2a2fSL7aehGRCTiFPQiIhGnoBcRiTgFvYhIxCnoRUQiTkEvIhJxCnoRkYib1kG/aU8Pt698mc6+obBLERHJW9M66LcfOMR3n3yVnQf7wy5FRCRvTeugb6goBWBv90DIlYiI5K9TBr2ZzTGzJ81so5mtN7PPB+1fM7N2M3sh+Ll2zDG3mtkWM9tkZstzVfzhoO9R0IuInMhE7nWTAr7o7s+Z2UxgjZk9Hvzu2+7+zbE7m9kFwI3AhUAj8Bsze4O7j2SzcIDa8mLMYG/3YLZfWkQkMk7Zo3f33e7+XLDeA2wEmk5yyHXAg+4+6O5bgS3A0mwUe6yieIza8hL2aehGROSETmuM3sxagEXAs0HTZ83sJTO718yqg7YmYOeYw9oY58RgZivMrNXMWjs6Ok678FENFSUaoxcROYkJB72ZlQM/Ab7g7t3AncA5wKXAbuCO0V3HOdyPa3C/y92XuPuSurpT3k75hBpmlmroRkTkJCYU9GaWIBPyP3T3hwHcfa+7j7h7GribI8MzbcCcMYc3A7uyV/LR6itK2acvY0VETmgis24MuAfY6O7fGtM+e8xuNwDrgvVHgRvNrMTM5gMLgFXZK/loDRUl7O8dYiiVztVbiIhMaxOZdbMM+Biw1sxeCNq+DHzYzC4lMyyzDfg0gLuvN7OHgA1kZuzcnIsZN6NGp1h29A7SVJXM1duIiExbpwx6d3+a8cfdHzvJMd8AvjGFuibsrDEXTSnoRUSON62vjAWorygB0BRLEZETmPZBf+Q2CJp5IyIynmkf9DVlxRTFTHPpRUROYNoHfSxm1M8sUY9eROQEpn3Qg+bSi4icTCSCXrdBEBE5sYgEfSl7uhT0IiLjiUzQdw+k6BtKhV2KiEjeiUTQj14otatTvXoRkWNFIugbDwe9nh0rInKsiAR95qIpBb2IyPEiEfQNFaWYwS59ISsicpxIBH0iHqNhZql69CIi44hE0ENm+EZBLyJyvMgE/eyqJLs1dCMicpzIBH1TVZL2zn7cj3s8rYhIQYtM0DdWljKUSnPg0FDYpYiI5JXoBH0wl363LpoSETlK5IK+XV/IiogcJXJBr5k3IiJHi0zQV5clKE3E2N2loBcRGSsyQW9mNFYmdWMzEZFjRCboITN806ahGxGRo0Qq6JuqkrS/rqAXERkrUkE/d1YZ+3sH6R8aCbsUEZG8Eamgn1NTBsDO1/tCrkREJH9EK+irM1MsdxxQ0IuIjIpU0M9Vj15E5DiRCvqaGcWUFcfZcVBBLyIy6pRBb2ZzzOxJM9toZuvN7PNBe42ZPW5mm4NlddBuZvaPZrbFzF4ys8W5/hBjamVuTRk7D2rmjYjIqIn06FPAF939fOAK4GYzuwD4EvCEuy8Angi2Aa4BFgQ/K4A7s171STRXl7FTPXoRkcNOGfTuvtvdnwvWe4CNQBNwHXBfsNt9wPXB+nXA/Z7xZ6DKzGZnvfITmFtTxo6DfbovvYhI4LTG6M2sBVgEPAs0uPtuyJwMgPpgtyZg55jD2oK2Y19rhZm1mllrR0fH6Vd+AnNrkvQPj+i+9CIigQkHvZmVAz8BvuDu3SfbdZy247rX7n6Xuy9x9yV1dXUTLeOURufS6wtZEZGMCQW9mSXIhPwP3f3hoHnv6JBMsNwXtLcBc8Yc3gzsyk65p3Z4iqWCXkQEmNisGwPuATa6+7fG/OpR4KZg/SbgZ2PaPx7MvrkC6Bod4jkTmqsV9CIiYxVNYJ9lwMeAtWb2QtD2ZeA24CEz+ySwA/hQ8LvHgGuBLUAf8ImsVnwKyeI4dTNLNMVSRCRwyqB396cZf9wd4Opx9nfg5inWNSVza8rYduBQmCWIiOSNSF0ZO2p+7QwFvYhIILJBv7d7kEODqbBLEREJXSSD/uzaGQBs3a9evYhIJIN+fp2CXkRkVCSDvmWWgl5EZFQkg740EaepKqmgFxEhokEPmS9kX1PQi4hEO+i3dvTqLpYiUvAiHfTdAykO6i6WIlLgohv0mnkjIgJEOOhH59JrnF5ECl1kg76pKkkiburRi0jBi2zQF8VjzJs1gy37esMuRUQkVJENeoAF9eUKehEpeNEO+oaZbD9wiIHhkbBLEREJTaSDvqtviLTD+f/tVyy77bc88nx72CWJiJxxkQ36R55v58HVO4HMk8nbO/u59eG1CnsRKTiRDfrbV25iMJU+qq1/eITbV24KqSIRkXBENuh3dY7/zNgTtYuIRFVkg76xKnla7SIiURXZoL9l+UKSifhRbclEnFuWLwypIhGRcBSFXUCuXL+oCYCv/3w9r/cNUzezhL+/9vzD7SIihSKyPXrIhP1Dn34zAF++9jyFvIgUpEgHPUBL7QwSceOVvbpCVkQKU+SDPhGPcU5dORt3d4ddiohIKCIf9AAXNFawYZeCXkQKU0EE/YWNlezrGaSjZzDsUkREzriCCPoLZlcAsEHDNyJSgAor6DV8IyIF6JRBb2b3mtk+M1s3pu1rZtZuZi8EP9eO+d2tZrbFzDaZ2fJcFX46KssSNFcn1aMXkYI0kR7994F3j9P+bXe/NPh5DMDMLgBuBC4MjvknM4uPc+wZd8HsCtbv6gq7DBGRM+6UQe/uTwEHJ/h61wEPuvugu28FtgBLp1Bf1lzQWMHW/YfoG0qFXYqIyBk1lTH6z5rZS8HQTnXQ1gTsHLNPW9B2HDNbYWatZtba0dExhTIm5sLGStzh5T09OX8vEZF8MtmgvxM4B7gU2A3cEbTbOPv6eC/g7ne5+xJ3X1JXVzfJMibugsbMF7Lr9YWsiBSYSQW9u+919xF3TwN3c2R4pg2YM2bXZmDX1ErMjsbKUqrLEqxr0zi9iBSWSQW9mc0es3kDMDoj51HgRjMrMbP5wAJg1dRKzA4z45I5VbzY1hl2KSIiZ9Qpb1NsZg8AbwNqzawN+CrwNjO7lMywzDbg0wDuvt7MHgI2ACngZncfyU3pp++S5iqeemUzhwZTzCiJ7B2aRUSOcsq0c/cPj9N8z0n2/wbwjakUlSuXzqki7bC2vYsrzp4VdjkiImdEQVwZO+ri5koAXtyp4RsRKRwFFfSzykuYW1OmcXoRKSgFFfRA5gvZnZp5IyKFo/CCvrmS9s5+9vUMhF2KiMgZUXBBv2huFYB69SJSMAou6C9srKQoZjy/4/WwSxEROSMKLuhLE3EubKygdZuCXkQKQ8EFPcBlLTW80NbJYCpvruUSEcmZwgz6+TUMpdK8pPveiEgBKMygb6kBYNXWid5mX0Rk+irIoK+ZUcy59eWs3qagF5HoK8igh0yvfs221xlJj3u7fBGRyCjYoF86v5qewRQv79GDSEQk2go26EfH6VdrnF5EIq5gg765uoymqiTPvHYg7FJERHKqYIMe4K3n1vKnVw9onF5EIq2gg37Zglp6BlKsbdd8ehGJrsIO+nMyT5l6enNHyJWIiOROQQf9rPISLphdwdNb9oddiohIzhR00AP8xYJantveSd9QKuxSRERyouCDftm5tQyNpHU7BBGJrIIP+staaiiOx3h6s4ZvRCSaCj7ok8VxLj+7hic37Qu7FBGRnCj4oAe46rx6Xu04xLb9h8IuRUQk6xT0wNXnNQDwxMvq1YtI9CjogbmzylhQX84TG/eGXYqISNYp6ANXn9/Aqq0H6R4YDrsUEZGsUtAH3nF+Pam089QrukpWRKJFQR9YNLea6rIET2zUOL2IRMspg97M7jWzfWa2bkxbjZk9bmabg2V10G5m9o9mtsXMXjKzxbksPpviMeOq8xr4zca9DKZGwi5HRCRrJtKj/z7w7mPavgQ84e4LgCeCbYBrgAXBzwrgzuyUeWa89+LZ9Ayk+KPufSMiEXLKoHf3p4Bj7w9wHXBfsH4fcP2Y9vs9489AlZnNzlaxubbs3FoqSov4xUu7wy5FRCRrJjtG3+DuuwGCZX3Q3gTsHLNfW9B2HDNbYWatZtba0ZEfX4AWF8VYfuFZPL5ewzciEh3Z/jLWxmkb9/FN7n6Xuy9x9yV1dXVZLmPyrr14Nj2DKf7wioZvRCQaiiZ53F4zm+3uu4OhmdGpKm3AnDH7NQO7plLgmbbsnFqSiRife+B5BoZHaKxKcsvyhVy/aNw/TERE8t5ke/SPAjcF6zcBPxvT/vFg9s0VQNfoEM908dja3QyNOP3DIzjQ3tnPrQ+v5ZHn28MuTURkUiYyvfIB4BlgoZm1mdkngduAd5rZZuCdwTbAY8BrwBbgbuDvclJ1Dt2+ctNxDwvvHx7h9pWbQqpIRGRqTjl04+4fPsGvrh5nXwdunmpRYdrV2X9a7SIi+U5Xxh6jsSp5Wu0iIvlOQX+MW5YvJJmIH9WWTMS5ZfnCkCoSEZmayc66iazR2TW3r9xEe2c/8ZjxP254o2bdiMi0pR79OK5f1MQfv3QVd3zoEkbSTkNladgliYhMmoL+JN5z8WyqyhL832e2h12KiMikKehPojQR598vmcOvN+xld5dm3YjI9KSgP4WPXjGPtDsPPLsj7FJERCZFQX8Kc2rKuGphPf+6aidDqXTY5YiInDYF/QR87M3z2N87yL+tnVa37RERART0E3LlgjoW1Jfzz79/jczFvyIi04eCfgJiMeNv//IcXt7Tw+825ce980VEJkpBP0Hvu7SRxspS7vz9q2GXIiJyWhT0E5SIx/jUX5zNqq0HWbP99bDLERGZMAX9abhx6RyqyhJ857ebwy5FRGTCFPSnoay4iBVXns2TmzpYs/3Y56WLiOQnBf1p+g9vaaG2vIT/9atNmoEjItOCgv40lRUX8dm3n8OzWw/y9BY9QFxE8p+CfhI+fPlcmqqSfHPlJtJp9epFJL8p6CehpCjOF96xgBfbuvjZi3pouIjkNwX9JH1gcTMXN1dy2y9f5tBgKuxyREROSEE/SbGY8dV/dyF7uwf5p99tCbscEZETUtBPwZvmVXPDoibufmor2w8cCrscEZFxKein6EvXnEdxUYy//+k6TbcUkbykoJ+ihopS/us15/H0lv38aE1b2OWIiBxHQZ8Ff710Lktbavjvv9jAvu6BsMsRETmKgj4LYjHjtg9cxEAqzVce0RCOiOQXBX2WnF1Xzn9+1xv49Ya9PLh6Z9jliIgcpqDPok+99Wzeem4tX//5erbs6wm7HBERQEGfVbGY8a2/uoSy4iI+98ALDAyPhF2SiMjUgt7MtpnZWjN7wcxag7YaM3vczDYHy+rslDo91FeU8s0PXczG3d18/efrwy5HRCQrPfq3u/ul7r4k2P4S8IS7LwCeCLYLylXnNfB3bzuHB1bt5IfPbg+7HBEpcLkYurkOuC9Yvw+4Pgfvkfe++K6FvH1hHV97dD2rt+khJSISnqkGvQO/NrM1ZrYiaGtw990AwbJ+iu8xLcVjxj/cuIjm6jI+84M17DjQF3ZJIlKgphr0y9x9MXANcLOZXTnRA81shZm1mllrR0fHFMvIT5XJBHd/fAmptPPxe59lf+9g2CWJSAGaUtC7+65guQ/4KbAU2GtmswGC5b4THHuXuy9x9yV1dXVTKSOvnVtfzj03Xcae7gH+4/dX65bGInLGTTrozWyGmc0cXQfeBawDHgVuCna7CfjZVIuc7t40r5rvfmQx63d18zf3t9I/pGmXInLmTKVH3wA8bWYvAquAf3P3XwG3Ae80s83AO4Ptgnf1+Q3c/sGLeea1A3zyvtUKexE5Y4ome6C7vwZcMk77AeDqqRQVVe9f3AzAF3/0Ip+8bzX33HQZyeJ4yFWJSNTpytgz7P2Lm/nWX13Cn187wEfveZbXDw2FXZKIRJyCPgQ3LGrmOx9ZzNr2Lj7wf/7EzoOaeikiuTPpoRuZmmsvmk1teQmfum8177/zT3z8ink8uHonuzr7aaxKcsvyhVy/qCnsMkUkAtSjD9HS+TX85DNvYTiV5o7HX6G9sx8H2jv7ufXhtTzyfHvYJYpIBCjoQ7agYSal43wh2z88wu0rN4VQkYhEjYI+D+ztGv/xg7s6+89wJSISRQr6PNBYlRy3vTKZ0GMJRWTKFPR54JblC0kmjh6+iRl09g/zN/e3srtLPXsRmTwFfR64flET//P9F9FUlcSApqokd3zwEr7ynvN5est+3nHH7/mXP25lJK3evYicPsuHoYElS5Z4a2tr2GXkpZ0H+/jKI+v4/SsdXNRUydfedwFvmlcTdlkikgfMbM2Yhz6dkHr0eW5OTRnf/8RlfOcji9jbPcAH7nyGz/xgDdv2Hwq7NBGZJnTB1DRgZrz34kauOq+eu5/ayj8/9Sq/2biXv758Hn/7l+dwVmVp2CWKSB7T0M00tK97gG//5hUeam0jbsaHljTzmbedQ3N1WdilicgZNNGhGwX9NLbjQB93/v5VfrxmJ+5w3aVNfGJZC29sqgy7NBE5AxT0BWRXZz93PfUa/2/1TvqHR1gyr5qb3tLCu994Fom4voYRiSoFfQHq6h/mR607uf+Z7ew42Ef9zBJuWNzEBxc3s6BhZtjliUiWKegL2Eja+d2mfTywagdPbupgJO1cMqeKDy5u4j0XN1IzozjsEkUkCxT0AkBHzyA/e6GdH69p4+U9PcQMLp8/i2suOovlF55FQ4Vm7IhMVwp6OYq7s2F3N79cu4dfrtvNqx2ZefhvmlfNVefVc+WCOi5srCAWs5ArFZGJUtDLSW3e28Ov1u1h5YY9rGvvBmDWjGL+YkEtV76hjmXn1qq3L5LnFPQyYR09g/xhcwdPvdLBU5v3czB4ju3cmjKWzq9haUsNl82voWVWGWbq8YvkCwW9TEo67azf1c2zWw+wautBWre/fjj4a8tLuHROFRc3V3JRcyUXNVVSW14ScsUihUtBL1nh7rza0cuqra/Tuu0gL7Z18tr+Q4z+Z9NYWcpFzZVc2FjJGxpmsvCsmcytKSOusX6RnJto0OteN3JSZsa59TM5t34mH7l8LgA9A8Os39XNuvYuXmrr4qW2Tlau33v4mJKiGOfUlbPwrJksaChnQf1M5teW0VxdRmni+McmikhuKejltM0sTXDF2bO44uxZh9sODabYsq+XTXt72Ly3h017e3nm1QP8dMwDzs2gsTLJvFllzJs1g5ZgObemjKaqJBXJIn0HIJIDCnrJihklRVwyp4pL5lQd1d7VP8yrHb3sONDHtgOH2B4sV67fc3js//BrFMeZXZWksSpJY2UpjVVJZleW0lSV5KzKUupmllBeopOByOlS0EtOVSYTLJ5bzeK51cf9rqt/mB0H+thxsI/dXf20d/azu3OAXV39bNjVzf7eweOOKU3EqC0voW5mCXXB8vB2sF5dlqC6rJiKZELfFYigoJcQVSYTmdk7zePfbXNgeIQ9XZng39M1wP7eQTp6BtnfO0RHzyDbD/QdNSvoWGaZ96guKw6WmfWqsmKqyxJUzSimKpmgvLSIitIiZpYmKC8pYmZpETOKi3TxmESGgl7yVmkiTkvtDFpqZ5x0v+GRNAcPZcK/o3eQzr4hXj80nFn2DfN63xBd/cN09A7yyt5eOvuGODQ0ctLXNIPy4iLKSzPBP/YkMHoiKCuOkzy8jFMW/CQTRUfWi+OUBfuUFMU07CShyFnQm9m7gf8NxIHvufttuXovKWyJeIyGitLTupJ3MDVCV98wXf3DdA+k6B1M0TMwTO9Aip6BzHrPYGa9dyBFz2DmhLHzYB/dAykODaboHz75yeJYMYNk4sjJoTQRo6QocwIoGbteFKM0Mdp+pK2kKB7sd2Tf0f2Ki2Ik4jGK4kZxfPz1RLCu4azwPPJ8O7ev3MSuzn4aq5Lcsnwh1y9qyvn75iTozSwOfBd4J9AGrDazR919Qy7eT+R0lRTFqa+IUz+F2zyk085AaoS+oRH6hzLLvqHUkfXhEfqHUkH7kX36hzNtQ6k0A8MjDKbSDAyn6eofZnA4zWAqzWAq0z44nGYgNUI2L3eJGYdDPzHmBDC6XhSPURw3io75fVHMKIob8VhmPWZGUcyIx4NlzIjb2O3YkfbYkX2KYkYsNv4+R+8XIxaDoljm5BQzgqVhY9ZjRrDMHGfB9lHrwe8txpH1Y14j139tPfJ8O7c+vPZwB6G9s59bH14LkPOwz1WPfimwxd1fAzCzB4HrAAW9REYsZsGwTG5HQN2dVNoPnxQyJ4Dj11PpNEMpZ3gksz6ccoZG0qRG0gyPOMNB2/BI+qj1Y48bSnnm+JHMPr2pFEOpNCNpP/yTOrxMM5KGkXT6cNvYfaYTO3zCOHLiiFnm3/PoiWP0hBAfc3IYbY+ZBfuCceQkE4tltl/e083wyNH/TPqHR7h95aZpG/RNwM4x223A5Tl6L5FIM7PDPevp9PgYdyftkEqnSacJTgo+7gnh2BPF4RPJiJP2zEnIHdLBa46kHXdnJNh2z+yfHt1n7Pp422Ne47j18Y457vUy6yPuh+vKvP+R7cxy9J+DHxfyo3Z19uf830Wugn68v4GO+pRmtgJYATB37twclSEiYcn0fCEeG70aurCvil52229pHyfUG6uSOX/vXD1QtA2YM2a7Gdg1dgd3v8vdl7j7krq6uhyVISKSH25ZvpDkMbcASSbi3LJ8Yc7fO1c9+tXAAjObD7QDNwIfydF7iYjkvdFx+MjMunH3lJl9FlhJ5u+1e919fS7eS0Rkurh+UdMZCfZj5Wy6gLs/BjyWq9cXEZGJydUYvYiI5AkFvYhIxCnoRUQiTkEvIhJxCnoRkYhT0IuIRJyCXkQk4syzef/TyRZh1gFsn+ThtcD+LJaTj6L+GfX5pjd9vvDMc/dT3kMmL4J+Ksys1d2XhF1HLkX9M+rzTW/6fPlPQzciIhGnoBcRibgoBP1dYRdwBkT9M+rzTW/6fHlu2o/Ri4jIyUWhRy8iIicxbYPezO41s31mti7sWnLBzOaY2ZNmttHM1pvZ58OuKZvMrNTMVpnZi8Hn+3rYNeWCmcXN7Hkz+0XYteSCmW0zs7Vm9oKZtYZdT7aZWZWZ/djMXg7+X3xz2DVNxrQdujGzK4Fe4H53f2PY9WSbmc0GZrv7c2Y2E1gDXO/uG0IuLSvMzIAZ7t5rZgngaeDz7v7nkEvLKjP7T8ASoMLd3xt2PdlmZtuAJe6er/PMp8TM7gP+4O7fM7NioMzdO8Ou63RN2x69uz8FHAy7jlxx993u/lyw3gNsBM78o2lyxDN6g81E8DM9ex0nYGbNwHuA74Vdi5w+M6sArgTuAXD3oekY8jCNg76QmFkLsAh4NtxKsisY1ngB2Ac87u6R+nzAPwD/BUiHXUgOOfBrM1tjZivCLibLzgY6gH8Jht++Z2Yzwi5qMhT0ec7MyoGfAF9w9+6w68kmdx9x90uBZmCpmUVmCM7M3gvsc/c1YdeSY8vcfTFwDXBzMKQaFUXAYuBOd18EHAK+FG5Jk6Ogz2PB2PVPgB+6+8Nh15MrwZ/DvwPeHXIp2bQMeF8whv0gcJWZ/SDckrLP3XcFy33AT4Gl4VaUVW1A25i/NH9MJvinHQV9ngq+rLwH2Oju3wq7nmwzszozqwrWk8A7gJfDrSp73P1Wd2929xbgRuC37v7RkMvKKjObEUwUIBjSeBcQmVlw7r4H2GlmC4Omq4FpORmiKOwCJsvMHgDeBtSaWRvwVXe/J9yqsmoZ8DFgbTCODfBld38sxJqyaTZwn5nFyXQ4HnL3SE5BjLAG4KeZPglFwL+6+6/CLSnrPgf8MJhx8xrwiZDrmZRpO71SREQmRkM3IiIRp6AXEYk4Bb2ISMQp6EVEIk5BLyIScQp6EZGIU9CLiEScgl5EJOL+P7IeihVMYkBBAAAAAElFTkSuQmCC\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXoAAAD8CAYAAAB5Pm/hAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAHW9JREFUeJzt3Xl0XGeZ5/HvU6WSVLKszVpiSbblJMZZyGLjOAHTaUgAk8CQhKUn0ECGgTZNBw6cYTJDaOYA5wwzORMCPX2gM52QNMlAJxMghEAHTAiBEAix5Wze4tiJN8mbbEebtZbqmT/qypZt2ZalKt/Srd/nHJ1776t7q57K8ruv3nrvvebuiIhIdMXCLkBERHJLQS8iEnEKehGRiFPQi4hEnIJeRCTiFPQiIhGnoBcRiTgFvYhIxCnoRUQirijsAgBqa2u9paUl7DJERKaVNWvW7Hf3ulPtlxdB39LSQmtra9hliIhMK2a2fSL7aehGRCTiFPQiIhGnoBcRiTgFvYhIxCnoRUQiTkEvIhJxCnoRkYib1kG/aU8Pt698mc6+obBLERHJW9M66LcfOMR3n3yVnQf7wy5FRCRvTeugb6goBWBv90DIlYiI5K9TBr2ZzTGzJ81so5mtN7PPB+1fM7N2M3sh+Ll2zDG3mtkWM9tkZstzVfzhoO9R0IuInMhE7nWTAr7o7s+Z2UxgjZk9Hvzu2+7+zbE7m9kFwI3AhUAj8Bsze4O7j2SzcIDa8mLMYG/3YLZfWkQkMk7Zo3f33e7+XLDeA2wEmk5yyHXAg+4+6O5bgS3A0mwUe6yieIza8hL2aehGROSETmuM3sxagEXAs0HTZ83sJTO718yqg7YmYOeYw9oY58RgZivMrNXMWjs6Ok678FENFSUaoxcROYkJB72ZlQM/Ab7g7t3AncA5wKXAbuCO0V3HOdyPa3C/y92XuPuSurpT3k75hBpmlmroRkTkJCYU9GaWIBPyP3T3hwHcfa+7j7h7GribI8MzbcCcMYc3A7uyV/LR6itK2acvY0VETmgis24MuAfY6O7fGtM+e8xuNwDrgvVHgRvNrMTM5gMLgFXZK/loDRUl7O8dYiiVztVbiIhMaxOZdbMM+Biw1sxeCNq+DHzYzC4lMyyzDfg0gLuvN7OHgA1kZuzcnIsZN6NGp1h29A7SVJXM1duIiExbpwx6d3+a8cfdHzvJMd8AvjGFuibsrDEXTSnoRUSON62vjAWorygB0BRLEZETmPZBf+Q2CJp5IyIynmkf9DVlxRTFTHPpRUROYNoHfSxm1M8sUY9eROQEpn3Qg+bSi4icTCSCXrdBEBE5sYgEfSl7uhT0IiLjiUzQdw+k6BtKhV2KiEjeiUTQj14otatTvXoRkWNFIugbDwe9nh0rInKsiAR95qIpBb2IyPEiEfQNFaWYwS59ISsicpxIBH0iHqNhZql69CIi44hE0ENm+EZBLyJyvMgE/eyqJLs1dCMicpzIBH1TVZL2zn7cj3s8rYhIQYtM0DdWljKUSnPg0FDYpYiI5JXoBH0wl363LpoSETlK5IK+XV/IiogcJXJBr5k3IiJHi0zQV5clKE3E2N2loBcRGSsyQW9mNFYmdWMzEZFjRCboITN806ahGxGRo0Qq6JuqkrS/rqAXERkrUkE/d1YZ+3sH6R8aCbsUEZG8Eamgn1NTBsDO1/tCrkREJH9EK+irM1MsdxxQ0IuIjIpU0M9Vj15E5DiRCvqaGcWUFcfZcVBBLyIy6pRBb2ZzzOxJM9toZuvN7PNBe42ZPW5mm4NlddBuZvaPZrbFzF4ys8W5/hBjamVuTRk7D2rmjYjIqIn06FPAF939fOAK4GYzuwD4EvCEuy8Angi2Aa4BFgQ/K4A7s171STRXl7FTPXoRkcNOGfTuvtvdnwvWe4CNQBNwHXBfsNt9wPXB+nXA/Z7xZ6DKzGZnvfITmFtTxo6DfbovvYhI4LTG6M2sBVgEPAs0uPtuyJwMgPpgtyZg55jD2oK2Y19rhZm1mllrR0fH6Vd+AnNrkvQPj+i+9CIigQkHvZmVAz8BvuDu3SfbdZy247rX7n6Xuy9x9yV1dXUTLeOURufS6wtZEZGMCQW9mSXIhPwP3f3hoHnv6JBMsNwXtLcBc8Yc3gzsyk65p3Z4iqWCXkQEmNisGwPuATa6+7fG/OpR4KZg/SbgZ2PaPx7MvrkC6Bod4jkTmqsV9CIiYxVNYJ9lwMeAtWb2QtD2ZeA24CEz+ySwA/hQ8LvHgGuBLUAf8ImsVnwKyeI4dTNLNMVSRCRwyqB396cZf9wd4Opx9nfg5inWNSVza8rYduBQmCWIiOSNSF0ZO2p+7QwFvYhIILJBv7d7kEODqbBLEREJXSSD/uzaGQBs3a9evYhIJIN+fp2CXkRkVCSDvmWWgl5EZFQkg740EaepKqmgFxEhokEPmS9kX1PQi4hEO+i3dvTqLpYiUvAiHfTdAykO6i6WIlLgohv0mnkjIgJEOOhH59JrnF5ECl1kg76pKkkiburRi0jBi2zQF8VjzJs1gy37esMuRUQkVJENeoAF9eUKehEpeNEO+oaZbD9wiIHhkbBLEREJTaSDvqtviLTD+f/tVyy77bc88nx72CWJiJxxkQ36R55v58HVO4HMk8nbO/u59eG1CnsRKTiRDfrbV25iMJU+qq1/eITbV24KqSIRkXBENuh3dY7/zNgTtYuIRFVkg76xKnla7SIiURXZoL9l+UKSifhRbclEnFuWLwypIhGRcBSFXUCuXL+oCYCv/3w9r/cNUzezhL+/9vzD7SIihSKyPXrIhP1Dn34zAF++9jyFvIgUpEgHPUBL7QwSceOVvbpCVkQKU+SDPhGPcU5dORt3d4ddiohIKCIf9AAXNFawYZeCXkQKU0EE/YWNlezrGaSjZzDsUkREzriCCPoLZlcAsEHDNyJSgAor6DV8IyIF6JRBb2b3mtk+M1s3pu1rZtZuZi8EP9eO+d2tZrbFzDaZ2fJcFX46KssSNFcn1aMXkYI0kR7994F3j9P+bXe/NPh5DMDMLgBuBC4MjvknM4uPc+wZd8HsCtbv6gq7DBGRM+6UQe/uTwEHJ/h61wEPuvugu28FtgBLp1Bf1lzQWMHW/YfoG0qFXYqIyBk1lTH6z5rZS8HQTnXQ1gTsHLNPW9B2HDNbYWatZtba0dExhTIm5sLGStzh5T09OX8vEZF8MtmgvxM4B7gU2A3cEbTbOPv6eC/g7ne5+xJ3X1JXVzfJMibugsbMF7Lr9YWsiBSYSQW9u+919xF3TwN3c2R4pg2YM2bXZmDX1ErMjsbKUqrLEqxr0zi9iBSWSQW9mc0es3kDMDoj51HgRjMrMbP5wAJg1dRKzA4z45I5VbzY1hl2KSIiZ9Qpb1NsZg8AbwNqzawN+CrwNjO7lMywzDbg0wDuvt7MHgI2ACngZncfyU3pp++S5iqeemUzhwZTzCiJ7B2aRUSOcsq0c/cPj9N8z0n2/wbwjakUlSuXzqki7bC2vYsrzp4VdjkiImdEQVwZO+ri5koAXtyp4RsRKRwFFfSzykuYW1OmcXoRKSgFFfRA5gvZnZp5IyKFo/CCvrmS9s5+9vUMhF2KiMgZUXBBv2huFYB69SJSMAou6C9srKQoZjy/4/WwSxEROSMKLuhLE3EubKygdZuCXkQKQ8EFPcBlLTW80NbJYCpvruUSEcmZwgz6+TUMpdK8pPveiEgBKMygb6kBYNXWid5mX0Rk+irIoK+ZUcy59eWs3qagF5HoK8igh0yvfs221xlJj3u7fBGRyCjYoF86v5qewRQv79GDSEQk2go26EfH6VdrnF5EIq5gg765uoymqiTPvHYg7FJERHKqYIMe4K3n1vKnVw9onF5EIq2gg37Zglp6BlKsbdd8ehGJrsIO+nMyT5l6enNHyJWIiOROQQf9rPISLphdwdNb9oddiohIzhR00AP8xYJantveSd9QKuxSRERyouCDftm5tQyNpHU7BBGJrIIP+staaiiOx3h6s4ZvRCSaCj7ok8VxLj+7hic37Qu7FBGRnCj4oAe46rx6Xu04xLb9h8IuRUQk6xT0wNXnNQDwxMvq1YtI9CjogbmzylhQX84TG/eGXYqISNYp6ANXn9/Aqq0H6R4YDrsUEZGsUtAH3nF+Pam089QrukpWRKJFQR9YNLea6rIET2zUOL2IRMspg97M7jWzfWa2bkxbjZk9bmabg2V10G5m9o9mtsXMXjKzxbksPpviMeOq8xr4zca9DKZGwi5HRCRrJtKj/z7w7mPavgQ84e4LgCeCbYBrgAXBzwrgzuyUeWa89+LZ9Ayk+KPufSMiEXLKoHf3p4Bj7w9wHXBfsH4fcP2Y9vs9489AlZnNzlaxubbs3FoqSov4xUu7wy5FRCRrJjtG3+DuuwGCZX3Q3gTsHLNfW9B2HDNbYWatZtba0ZEfX4AWF8VYfuFZPL5ewzciEh3Z/jLWxmkb9/FN7n6Xuy9x9yV1dXVZLmPyrr14Nj2DKf7wioZvRCQaiiZ53F4zm+3uu4OhmdGpKm3AnDH7NQO7plLgmbbsnFqSiRife+B5BoZHaKxKcsvyhVy/aNw/TERE8t5ke/SPAjcF6zcBPxvT/vFg9s0VQNfoEM908dja3QyNOP3DIzjQ3tnPrQ+v5ZHn28MuTURkUiYyvfIB4BlgoZm1mdkngduAd5rZZuCdwTbAY8BrwBbgbuDvclJ1Dt2+ctNxDwvvHx7h9pWbQqpIRGRqTjl04+4fPsGvrh5nXwdunmpRYdrV2X9a7SIi+U5Xxh6jsSp5Wu0iIvlOQX+MW5YvJJmIH9WWTMS5ZfnCkCoSEZmayc66iazR2TW3r9xEe2c/8ZjxP254o2bdiMi0pR79OK5f1MQfv3QVd3zoEkbSTkNladgliYhMmoL+JN5z8WyqyhL832e2h12KiMikKehPojQR598vmcOvN+xld5dm3YjI9KSgP4WPXjGPtDsPPLsj7FJERCZFQX8Kc2rKuGphPf+6aidDqXTY5YiInDYF/QR87M3z2N87yL+tnVa37RERART0E3LlgjoW1Jfzz79/jczFvyIi04eCfgJiMeNv//IcXt7Tw+825ce980VEJkpBP0Hvu7SRxspS7vz9q2GXIiJyWhT0E5SIx/jUX5zNqq0HWbP99bDLERGZMAX9abhx6RyqyhJ857ebwy5FRGTCFPSnoay4iBVXns2TmzpYs/3Y56WLiOQnBf1p+g9vaaG2vIT/9atNmoEjItOCgv40lRUX8dm3n8OzWw/y9BY9QFxE8p+CfhI+fPlcmqqSfHPlJtJp9epFJL8p6CehpCjOF96xgBfbuvjZi3pouIjkNwX9JH1gcTMXN1dy2y9f5tBgKuxyREROSEE/SbGY8dV/dyF7uwf5p99tCbscEZETUtBPwZvmVXPDoibufmor2w8cCrscEZFxKein6EvXnEdxUYy//+k6TbcUkbykoJ+ihopS/us15/H0lv38aE1b2OWIiBxHQZ8Ff710Lktbavjvv9jAvu6BsMsRETmKgj4LYjHjtg9cxEAqzVce0RCOiOQXBX2WnF1Xzn9+1xv49Ya9PLh6Z9jliIgcpqDPok+99Wzeem4tX//5erbs6wm7HBERQEGfVbGY8a2/uoSy4iI+98ALDAyPhF2SiMjUgt7MtpnZWjN7wcxag7YaM3vczDYHy+rslDo91FeU8s0PXczG3d18/efrwy5HRCQrPfq3u/ul7r4k2P4S8IS7LwCeCLYLylXnNfB3bzuHB1bt5IfPbg+7HBEpcLkYurkOuC9Yvw+4Pgfvkfe++K6FvH1hHV97dD2rt+khJSISnqkGvQO/NrM1ZrYiaGtw990AwbJ+iu8xLcVjxj/cuIjm6jI+84M17DjQF3ZJIlKgphr0y9x9MXANcLOZXTnRA81shZm1mllrR0fHFMvIT5XJBHd/fAmptPPxe59lf+9g2CWJSAGaUtC7+65guQ/4KbAU2GtmswGC5b4THHuXuy9x9yV1dXVTKSOvnVtfzj03Xcae7gH+4/dX65bGInLGTTrozWyGmc0cXQfeBawDHgVuCna7CfjZVIuc7t40r5rvfmQx63d18zf3t9I/pGmXInLmTKVH3wA8bWYvAquAf3P3XwG3Ae80s83AO4Ptgnf1+Q3c/sGLeea1A3zyvtUKexE5Y4ome6C7vwZcMk77AeDqqRQVVe9f3AzAF3/0Ip+8bzX33HQZyeJ4yFWJSNTpytgz7P2Lm/nWX13Cn187wEfveZbXDw2FXZKIRJyCPgQ3LGrmOx9ZzNr2Lj7wf/7EzoOaeikiuTPpoRuZmmsvmk1teQmfum8177/zT3z8ink8uHonuzr7aaxKcsvyhVy/qCnsMkUkAtSjD9HS+TX85DNvYTiV5o7HX6G9sx8H2jv7ufXhtTzyfHvYJYpIBCjoQ7agYSal43wh2z88wu0rN4VQkYhEjYI+D+ztGv/xg7s6+89wJSISRQr6PNBYlRy3vTKZ0GMJRWTKFPR54JblC0kmjh6+iRl09g/zN/e3srtLPXsRmTwFfR64flET//P9F9FUlcSApqokd3zwEr7ynvN5est+3nHH7/mXP25lJK3evYicPsuHoYElS5Z4a2tr2GXkpZ0H+/jKI+v4/SsdXNRUydfedwFvmlcTdlkikgfMbM2Yhz6dkHr0eW5OTRnf/8RlfOcji9jbPcAH7nyGz/xgDdv2Hwq7NBGZJnTB1DRgZrz34kauOq+eu5/ayj8/9Sq/2biXv758Hn/7l+dwVmVp2CWKSB7T0M00tK97gG//5hUeam0jbsaHljTzmbedQ3N1WdilicgZNNGhGwX9NLbjQB93/v5VfrxmJ+5w3aVNfGJZC29sqgy7NBE5AxT0BWRXZz93PfUa/2/1TvqHR1gyr5qb3tLCu994Fom4voYRiSoFfQHq6h/mR607uf+Z7ew42Ef9zBJuWNzEBxc3s6BhZtjliUiWKegL2Eja+d2mfTywagdPbupgJO1cMqeKDy5u4j0XN1IzozjsEkUkCxT0AkBHzyA/e6GdH69p4+U9PcQMLp8/i2suOovlF55FQ4Vm7IhMVwp6OYq7s2F3N79cu4dfrtvNqx2ZefhvmlfNVefVc+WCOi5srCAWs5ArFZGJUtDLSW3e28Ov1u1h5YY9rGvvBmDWjGL+YkEtV76hjmXn1qq3L5LnFPQyYR09g/xhcwdPvdLBU5v3czB4ju3cmjKWzq9haUsNl82voWVWGWbq8YvkCwW9TEo67azf1c2zWw+wautBWre/fjj4a8tLuHROFRc3V3JRcyUXNVVSW14ScsUihUtBL1nh7rza0cuqra/Tuu0gL7Z18tr+Q4z+Z9NYWcpFzZVc2FjJGxpmsvCsmcytKSOusX6RnJto0OteN3JSZsa59TM5t34mH7l8LgA9A8Os39XNuvYuXmrr4qW2Tlau33v4mJKiGOfUlbPwrJksaChnQf1M5teW0VxdRmni+McmikhuKejltM0sTXDF2bO44uxZh9sODabYsq+XTXt72Ly3h017e3nm1QP8dMwDzs2gsTLJvFllzJs1g5ZgObemjKaqJBXJIn0HIJIDCnrJihklRVwyp4pL5lQd1d7VP8yrHb3sONDHtgOH2B4sV67fc3js//BrFMeZXZWksSpJY2UpjVVJZleW0lSV5KzKUupmllBeopOByOlS0EtOVSYTLJ5bzeK51cf9rqt/mB0H+thxsI/dXf20d/azu3OAXV39bNjVzf7eweOOKU3EqC0voW5mCXXB8vB2sF5dlqC6rJiKZELfFYigoJcQVSYTmdk7zePfbXNgeIQ9XZng39M1wP7eQTp6BtnfO0RHzyDbD/QdNSvoWGaZ96guKw6WmfWqsmKqyxJUzSimKpmgvLSIitIiZpYmKC8pYmZpETOKi3TxmESGgl7yVmkiTkvtDFpqZ5x0v+GRNAcPZcK/o3eQzr4hXj80nFn2DfN63xBd/cN09A7yyt5eOvuGODQ0ctLXNIPy4iLKSzPBP/YkMHoiKCuOkzy8jFMW/CQTRUfWi+OUBfuUFMU07CShyFnQm9m7gf8NxIHvufttuXovKWyJeIyGitLTupJ3MDVCV98wXf3DdA+k6B1M0TMwTO9Aip6BzHrPYGa9dyBFz2DmhLHzYB/dAykODaboHz75yeJYMYNk4sjJoTQRo6QocwIoGbteFKM0Mdp+pK2kKB7sd2Tf0f2Ki2Ik4jGK4kZxfPz1RLCu4azwPPJ8O7ev3MSuzn4aq5Lcsnwh1y9qyvn75iTozSwOfBd4J9AGrDazR919Qy7eT+R0lRTFqa+IUz+F2zyk085AaoS+oRH6hzLLvqHUkfXhEfqHUkH7kX36hzNtQ6k0A8MjDKbSDAyn6eofZnA4zWAqzWAq0z44nGYgNUI2L3eJGYdDPzHmBDC6XhSPURw3io75fVHMKIob8VhmPWZGUcyIx4NlzIjb2O3YkfbYkX2KYkYsNv4+R+8XIxaDoljm5BQzgqVhY9ZjRrDMHGfB9lHrwe8txpH1Y14j139tPfJ8O7c+vPZwB6G9s59bH14LkPOwz1WPfimwxd1fAzCzB4HrAAW9REYsZsGwTG5HQN2dVNoPnxQyJ4Dj11PpNEMpZ3gksz6ccoZG0qRG0gyPOMNB2/BI+qj1Y48bSnnm+JHMPr2pFEOpNCNpP/yTOrxMM5KGkXT6cNvYfaYTO3zCOHLiiFnm3/PoiWP0hBAfc3IYbY+ZBfuCceQkE4tltl/e083wyNH/TPqHR7h95aZpG/RNwM4x223A5Tl6L5FIM7PDPevp9PgYdyftkEqnSacJTgo+7gnh2BPF4RPJiJP2zEnIHdLBa46kHXdnJNh2z+yfHt1n7Pp422Ne47j18Y457vUy6yPuh+vKvP+R7cxy9J+DHxfyo3Z19uf830Wugn68v4GO+pRmtgJYATB37twclSEiYcn0fCEeG70aurCvil52229pHyfUG6uSOX/vXD1QtA2YM2a7Gdg1dgd3v8vdl7j7krq6uhyVISKSH25ZvpDkMbcASSbi3LJ8Yc7fO1c9+tXAAjObD7QDNwIfydF7iYjkvdFx+MjMunH3lJl9FlhJ5u+1e919fS7eS0Rkurh+UdMZCfZj5Wy6gLs/BjyWq9cXEZGJydUYvYiI5AkFvYhIxCnoRUQiTkEvIhJxCnoRkYhT0IuIRJyCXkQk4syzef/TyRZh1gFsn+ThtcD+LJaTj6L+GfX5pjd9vvDMc/dT3kMmL4J+Ksys1d2XhF1HLkX9M+rzTW/6fPlPQzciIhGnoBcRibgoBP1dYRdwBkT9M+rzTW/6fHlu2o/Ri4jIyUWhRy8iIicxbYPezO41s31mti7sWnLBzOaY2ZNmttHM1pvZ58OuKZvMrNTMVpnZi8Hn+3rYNeWCmcXN7Hkz+0XYteSCmW0zs7Vm9oKZtYZdT7aZWZWZ/djMXg7+X3xz2DVNxrQdujGzK4Fe4H53f2PY9WSbmc0GZrv7c2Y2E1gDXO/uG0IuLSvMzIAZ7t5rZgngaeDz7v7nkEvLKjP7T8ASoMLd3xt2PdlmZtuAJe6er/PMp8TM7gP+4O7fM7NioMzdO8Ou63RN2x69uz8FHAy7jlxx993u/lyw3gNsBM78o2lyxDN6g81E8DM9ex0nYGbNwHuA74Vdi5w+M6sArgTuAXD3oekY8jCNg76QmFkLsAh4NtxKsisY1ngB2Ac87u6R+nzAPwD/BUiHXUgOOfBrM1tjZivCLibLzgY6gH8Jht++Z2Yzwi5qMhT0ec7MyoGfAF9w9+6w68kmdx9x90uBZmCpmUVmCM7M3gvsc/c1YdeSY8vcfTFwDXBzMKQaFUXAYuBOd18EHAK+FG5Jk6Ogz2PB2PVPgB+6+8Nh15MrwZ/DvwPeHXIp2bQMeF8whv0gcJWZ/SDckrLP3XcFy33AT4Gl4VaUVW1A25i/NH9MJvinHQV9ngq+rLwH2Oju3wq7nmwzszozqwrWk8A7gJfDrSp73P1Wd2929xbgRuC37v7RkMvKKjObEUwUIBjSeBcQmVlw7r4H2GlmC4Omq4FpORmiKOwCJsvMHgDeBtSaWRvwVXe/J9yqsmoZ8DFgbTCODfBld38sxJqyaTZwn5nFyXQ4HnL3SE5BjLAG4KeZPglFwL+6+6/CLSnrPgf8MJhx8xrwiZDrmZRpO71SREQmRkM3IiIRp6AXEYk4Bb2ISMQp6EVEIk5BLyIScQp6EZGIU9CLiEScgl5EJOL+P7IeihVMYkBBAAAAAElFTkSuQmCC", "text/plain": [ "
" ] @@ -355,7 +355,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYAAAAD8CAYAAAB+UHOxAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAG1xJREFUeJzt3X1wHPWd5/H3t+dJD7bkJ/lRfiIxEEwIJF7wHrm7HA6PSWGqNslxt0tce9RRdWGX7N3ebUJSBXck2SN3uZBNEXJFYgdnNxXiImQhVLjgNbAbskAwT+bBGBubYPlRfpJlW5Ilzff+6JY8lmakkZGmx9OfV5Vqun/9m57vyJY++vWve9rcHRERSZ4g7gJERCQeCgARkYRSAIiIJJQCQEQkoRQAIiIJpQAQEUkoBYCISEIpAEREEkoBICKSUOm4CxjJjBkzfNGiRXGXISJyVnnxxRcPuHvLaP2qOgAWLVrExo0b4y5DROSsYma/L6efDgGJiCSUAkBEJKEUACIiCaUAEBFJKAWAiEhCKQBERBJKASAiklA1GQB7Orr49hNb2N5+LO5SRESqVk0GwIHOk3z3yW2803487lJERKpWTQZAXSZ8W929/TFXIiJSvWo0AFIAdCkARERKqskAqM+GAaARgIhIaTUZAAMjAAWAiEhptRkA6fBtdZ3Mx1yJiEj1qskASKcCMinTHICIyAhqMgAAcumUDgGJiIyg7AAws5SZvWxmj0Xri83seTPbamY/M7Ns1J6L1rdF2xcV7OP2qH2LmV093m+mUC4dKABEREYwlhHAF4HNBevfBO5x9yXAYeDmqP1m4LC7fxC4J+qHmV0A3AgsBa4B7jOz1PsrvzQFgIjIyMoKADNrBT4F/DBaN+AK4KGoy1rghmh5ZbROtH1F1H8l8KC797j7DmAbcOl4vIlisulAcwAiIiModwTwHeCvgIHTaqYDR9y9L1pvA+ZFy/OAnQDR9o6o/2B7keeMuzAAdBaQiEgpowaAmX0a2O/uLxY2F+nqo2wb6TmFr3eLmW00s43t7e2jlVdSNqVDQCIiIylnBHA5cL2ZvQs8SHjo5zvAFDNLR31agd3RchswHyDa3gwcKmwv8pxB7n6/uy9z92UtLS1jfkMDMumArpMKABGRUkYNAHe/3d1b3X0R4STuk+7+x8BTwGeibquAR6LlR6N1ou1PurtH7TdGZwktBpYAvxu3dzJETnMAIiIjSo/epaQvAQ+a2deBl4HVUftq4G/NbBvhX/43Arj7G2a2DngT6ANudfcJ+w2tQ0AiIiMbUwC4+9PA09HydoqcxePu3cBnSzz/G8A3xlrkmcjpEJCIyIhq9krgrK4EFhEZUQ0HQEB3n04DFREppWYDIJcO6M87vf0KARGRYmo6AEB3BRMRKaVmAyCb1n2BRURGUrMBMDAC6NZNYUREiqrZAMimdWN4EZGR1HAA6BCQiMhIajYANAksIjIyBYCISELVbABkU+Fb61EAiIgUVbMBoBGAiMjIajYABiaBT+gD4UREiqrZAKjLRKeBKgBERIqq+QA43qMAEBEppmYDIBUY6cA40ds3emcRkQSq2QCAcB7ghEYAIiJF1XYApAKOn9QIQESkmJoOgExKIwARkVJqOgDSKdMIQESkhJoPAF0HICJSXE0HQCYION6jEYCISDG1HQBpBYCISCm1HQAp47gOAYmIFFXTAZBNBZzQJLCISFE1HQCZVEB3b5583uMuRUSk6tR8AIA+ElpEpJgaDwAD0LUAIiJF1HQADNwVTFcDi4gMV9MBkIluCqMRgIjIcLUdACndFUxEpJQaD4BoDkAXg4mIDFPjAaARgIhIKTUdAAOTwBoBiIgMV9MBoBGAiEhpNR4Aug5ARKSUmg6AVGAEpkNAIiLFjBoAZlZnZr8zs1fN7A0z+x9R+2Ize97MtprZz8wsG7XnovVt0fZFBfu6PWrfYmZXT9SbKng9cukUx7oVACIiQ5UzAugBrnD3jwAXA9eY2XLgm8A97r4EOAzcHPW/GTjs7h8E7on6YWYXADcCS4FrgPvMLDWeb6aYXDqgUwEgIjLMqAHgoWPRaib6cuAK4KGofS1wQ7S8Mlon2r7CzCxqf9Dde9x9B7ANuHRc3sUIMumATh0CEhEZpqw5ADNLmdkrwH5gPfAOcMTdB36ztgHzouV5wE6AaHsHML2wvchzJkw2FdDZ3TvRLyMictYpKwDcvd/dLwZaCf9q/1CxbtGjldhWqv00ZnaLmW00s43t7e3llDeibDrgaJdGACIiQ43pLCB3PwI8DSwHpphZOtrUCuyOltuA+QDR9mbgUGF7kecUvsb97r7M3Ze1tLSMpbyisqmAzh6NAEREhirnLKAWM5sSLdcDnwQ2A08Bn4m6rQIeiZYfjdaJtj/p7h613xidJbQYWAL8brzeSCnZdKCzgEREikiP3oU5wNrojJ0AWOfuj5nZm8CDZvZ14GVgddR/NfC3ZraN8C//GwHc/Q0zWwe8CfQBt7r7hF+iO3AWkLsTzkWLiAiUEQDuvgm4pEj7doqcxePu3cBnS+zrG8A3xl7mmcumA/ryTk9fnrrMhJ91KiJy1qjpK4EhDABA1wKIiAxR8wGQGwwATQSLiBSq+QD4w+NP8kz2NhZ/rxXuuRA2rYu7JBGRqlDOJPBZ67z9j/PJfd8iG/SEDR074Ze3hcsXfS6+wkREqkBNjwA+/t59ZL3n9MbeLthwVzwFiYhUkZoOgMk9+4pv6GirbCEiIlWopgOgMzer+Ibm1soWIiJShWo6AJ5Z8AV6g7rTGzP1sOKOeAoSEakiNT0JvGXmtQB85O3vMtcOYs2t4S9/TQCLiNR2AEAYAl/aej7XXTiHb37morjLERGpGjV9CGiAPhFURGS4RARALh3Q0aUAEBEplIwAyKQ4fFwBICJSKBEBUJcOOHLiZNxliIhUlUQEQC6T0iEgEZEhEhEAdZmA4yf76e3Px12KiEjVSEYApMMbwWgUICJySjICILoT2JETCgARkQEJCYDwbXZ0aSJYRGRAIgIgpxGAiMgwiQiAuui2kAoAEZFTkhEAAyMATQKLiAxKRADk0gEGdOhiMBGRQYkIADOjLpPSCEBEpEAiAgCgPpPSHICISIHEBEAuE2gEICJSIDEBkE0FHDmuOQARkQGJCYBcJuCQJoFFRAYlJgAasmkOawQgIjIoMQFQn01x/GQ/3b39cZciIlIVEhMADdHFYAc1ChARAZIUANkoAI71xFyJiEh1SEwA1Gc1AhARKZScABg4BHRMASAiAgkKgIZsGtAhIBGRAYkJgEzKSAemQ0AiIpHEBICZ0ZhLc0AjABERoIwAMLP5ZvaUmW02szfM7ItR+zQzW29mW6PHqVG7mdl3zWybmW0ys48W7GtV1H+rma2auLdVXH0mpTkAEZFIOSOAPuAv3f1DwHLgVjO7APgysMHdlwAbonWAa4El0dctwPchDAzgTuAy4FLgzoHQqJS6TKARgIhIZNQAcPc97v5StNwJbAbmASuBtVG3tcAN0fJK4Mceeg6YYmZzgKuB9e5+yN0PA+uBa8b13YyiPptSAIiIRMY0B2Bmi4BLgOeBWe6+B8KQAGZG3eYBOwue1ha1lWof+hq3mNlGM9vY3t4+lvJG1ZBJc+j4Sdx9XPcrInI2KjsAzGwS8HPgL9z96Ehdi7T5CO2nN7jf7+7L3H1ZS0tLueWVpT6borff6ezpG9f9ioicjcoKADPLEP7y/4m7Pxw174sO7RA97o/a24D5BU9vBXaP0F4xjbnwYrD9R7sr+bIiIlWpnLOADFgNbHb3bxdsehQYOJNnFfBIQfvno7OBlgMd0SGiXwNXmdnUaPL3qqitYiblwovB9h3VPICISLqMPpcDNwGvmdkrUdtXgLuBdWZ2M/Ae8Nlo26+A64BtwAngTwHc/ZCZfQ14Iep3l7sfGpd3UabGKAD2dmgEICIyagC4+zMUP34PsKJIfwduLbGvNcCasRQ4ngZHAJ0KABGRxFwJDJBJBdRlAvZpBCAikqwAgHAUoDkAEZEEBkBDNs1enQUkIpK8AGjMpRQAIiIkMQCyado7e8jndTWwiCRb4gJgUi5Nf951XwARSbzkBUDdwMVgOgwkIsmWuABozOpiMBERSGAATI5GALuOdMVciYhIvBIXAA3ZFOnAaDt8Iu5SRERilbgAMDOa6zO0HdYIQESSLXEBAOFE8E6NAEQk4RIZAE11GdoOaQQgIsmW0ABIc6Srl2O6M5iIJFgiA2BF7z/yTPY2Gv/nDLjnQti0Lu6SREQqrpwbwtSU8/Y/zifb/w/ZIPpE0I6d8MvbwuWLPhdfYSIiFZa4EcDH37uPrA/5OOjeLthwVzwFiYjEJHEBMLlnX/ENHW2VLUREJGaJC4DO3KziG5pbK1uIiEjMEhcAzyz4Ar1B3emNmXpYcUc8BYmIxCRxk8BbZl4LwLJ37mVGfzs2ZR624k5NAItI4iQuACAMgV/0Xc76zft46qZPsHhGY9wliYhUXOIOAQ2Y0pABYMeBYzFXIiISj8QGwNTGLADb24/HXImISDwSGwD1mRT1mRTbDygARCSZEhsAEB4G2qERgIgkVOIDYOv+zrjLEBGJRaIDYMakHAeOneTgsZ7RO4uI1JjEBwDAlr0aBYhI8iQ8AMIzgTYrAEQkgRIdAA3ZNJNyad7aczTuUkREKi7RAQAwrTHLZgWAiCRQ4gNgxqQsW/cfo68/H3cpIiIVpQCYlKOnL8+7B3U9gIgkS+IDoGVyeCbQpraOmCsREamsxAfAtMYs2XTAKzuPxF2KiEhFJT4AAjNmTc7x8nsKABFJllEDwMzWmNl+M3u9oG2ama03s63R49So3czsu2a2zcw2mdlHC56zKuq/1cxWTczbOTMzm+rYvOco3b39cZciIlIx5YwAHgCuGdL2ZWCDuy8BNkTrANcCS6KvW4DvQxgYwJ3AZcClwJ0DoVENZjfV0Zd33tit00FFJDlGDQB3/yfg0JDmlcDaaHktcENB+4899BwwxczmAFcD6939kLsfBtYzPFRiM7s5vEfwq5oHEJEEOdM5gFnuvgcgepwZtc8Ddhb0a4vaSrUPY2a3mNlGM9vY3t5+huWNzaRcmqa6NC/+/nBFXk9EpBqM9ySwFWnzEdqHN7rf7+7L3H1ZS0vLuBY3krlT6vnndw7gXrQsEZGac6YBsC86tEP0uD9qbwPmF/RrBXaP0F41WqfWc/hEL2/v0z2CRSQZzjQAHgUGzuRZBTxS0P756Gyg5UBHdIjo18BVZjY1mvy9KmqrGvOnNgDw7DsHYq5ERKQyyjkN9KfAs8B5ZtZmZjcDdwNXmtlW4MpoHeBXwHZgG/AD4AsA7n4I+BrwQvR1V9RWNZrqMzTXZ3h2+8G4SxERqYj0aB3c/d+V2LSiSF8Hbi2xnzXAmjFVV2HzptTz7DsH6c87qaDYtIWISO1I/JXAhRZOb+Bodx8vvaezgUSk9ikACiyc3kBg8A+b98VdiojIhFMAFMilU7RObeAf3lQAiEjtUwAMsXhGI++0H+fdA7o/gIjUNgXAEItnNALw6zf2xlyJiMjEUgAM0VyfYXZTHX//yq64SxERmVAKgCLOmz2ZzXs62bqvM+5SREQmjAKgiCUzJxEYGgWISE1TABTRmEszf1oDD7+0i/68PhxORGqTAqCEpXOa2NPRzVNv7R+9s4jIWUgBUMI5LZOYXJdm7bPvxl2KiMiEUACUkAqMC+c285utB9jero+IFpHaowAYwdK5TaQC4we/2R53KSIi404BMILGXJqlc5pYt7GNtsMn4i5HRGRcKQBGsWzRVADue/qdmCsRERlfCoBRTK7LcMGcJta9sJMd+nwgEakhCoAyXLZ4GqnA+Npjb8ZdiojIuFEAlKExl+YPFk3jybf28/QWXRcgIrVBAVCmi+dPYVpjlq/+4nU6u3vjLkdE5H1TAJQpFRgrzp/J7o4uvv7Y5rjLERF53xQAYzB3Sj0fWzCVn23cyeOv7Ym7HBGR90UBMEbLz5nOnOY6/su6V9myVx8XLSJnLwXAGKUC47oL55AKjP/4440cONYTd0kiImdEAXAGJtWlue7Ds9nT0cVNq5+no0uTwiJy9lEAnKE5zfV86sNzeHvfMT6/5nmOnDgZd0kiImOiAHgfFk5v5NoLZ/P6rqP80ff/mV1HuuIuSUSkbAqA9+kDLZO44eK57Drcxcp7n+G57QfjLklEpCwKgHHQOrWBz3yslbzDv//Bc9z75Fb6+vNxlyUiMiIFwDiZPinHv102nyUzJ/GtJ97mhu/9ltd3dcRdlohISQqAcZRNB1y9dDbXXTibHQePc/29z/CVX7zG3o7uuEsTERkmHXcBtcbMWDJrMvOnNfDc9oP87IWd/PzFNv5k+UL+9PJFtE5tiLtEERFAATBh6jIpPnHeTC5ZMJXntx/kR7/dwY9+u4Orl87mpuULWX7OdILA4i5TRBJMATDBmuszXLV0Nss/MJ1NbR08vaWdx1/fy6ymHNd/ZC6fvmguH57XrDAQkYpTAFRIU12Gj39wBpctnsaOA8fZsreTNb99lx/8ZgfTGrN84rwWPnHeTJYvnsbMprq4yxWRBFAAVFgmFXDurMmcO2syXb39/P7Acd49eILHX9vLwy/tAmDelHqWLZrKxxZOZencJs6dNZnJdZnRd75pHWy4CzraoLkVVtwBF31ugt+RiJytFAAxqs+kOH9OE+fPaSLvzv6jPezu6GJPRzcbNu/nkVd2D/adO6WOC+Y0sWTWZBZOa2DB9AYWTGtgTnM9qcDCX/6/vA16o6uRO3aG66AQEJGiKh4AZnYN8DdACvihu99d6RqqUWDG7OY6ZjeHh3/cnc7uPg4c6+HA8ZMc7OzhlZ1HePKt/eT91PPSgTFvaj0PdX+Vlv4hH0XR20XvE/+dYx+4gab6TBgUIiKRigaAmaWA7wFXAm3AC2b2qLvrbutDmBlN9Rma6jOc03KqPZ93Onv66OjqHfw62tXL9P72ovtJde7mkq+tB2ByXZop9RmmNmaZ0pBlSn2G5voMjbk0jdkUDbk0k3IpGrJpGqPHSbk0DdkUjbk0uXRALp0ilw40aS0ynmI6fFvpEcClwDZ33w5gZg8CKwEFQJmCwGiOfnEXOrZxFk09e4f1P5xp4V8vbKG7tz/86svT0dVLe2cP3b399PTlOdmXp69wWFGGdGDk0gHZgVDIBNSlU2TTAXWZU0GRSQWkU0Y6MNKpgEzKSAVGOggG28JHI5MKom02ZFv4PDMjZUZg4fchKLKcsqhfEK4XLod9jCA41S8wou0W7SfsZwbGwGO4n/AxbMcouc2ibBxYH6hjsI8pPKVAjIdvKx0A84CdBettwGUVrqEmPbPgC1z5zl+TyZ+66rg3qOP5xX/GxTOnjPr8/rzT25+ntz8MhN7+gvX+PL19Tl8+T3/e6cv7aY/hcp6+fJ7u7n4OnzjVnncn7+Ehrf684w79HrXno+W8M7b4qQ3DAoXSITIQNhQJplP9TgXN4GsMyZrTVodsLPW80/c4dNuphbL6FWkofN5I9RYG59D9lVvvSO2l9j+8pvLqLedFDVh9+CvMzA8/fMuGu2ouAIp9f0772TezW4BbABYsWHDGL7Rs0TQ+tnDqGT//7LMEXpuDFwwj0yvu4NoPf5Zr4y6tDPkoUMIgcfr6nb7+aDkKJ3fIFwTJwPJgwERhkx8MnjBk8gXbfOhzouXB/gXPdwdn4BHcgdPWfbB9YJ3B9VP98vni7YXrlNjf4OuOsM0JGwtrCCs9XcGmItuKR/DQ5sKoPm1/w/qV3reXWBn6Z0Cpesutafhrjf17M1Ltw75lJb6HpV5zQMvBA8U3dLSNuL/xUOkAaAPmF6y3ArsLO7j7/cD9AMuWLXtffxgmbqh90efO2jN+UikjlYIcqbhLEamse1rDwz5DNbdO+EtX+sPgXgCWmNliM8sCNwKPVrgGEZHqseIOyNSf3papD9snWEVHAO7eZ2Z/Bvya8DTQNe7+RiVrEBGpKgOj9gScBYS7/wr4VaVfV0SkasV0+Fb3AxARSSgFgIhIQikAREQSSgEgIpJQCgARkYRSAIiIJJQCQEQkoRQAIiIJZaU+BKoamFk78Pu464jMAEp8alNVUH1nrpprg+qur5prg+qubyJrW+juLaN1quoAqCZmttHdl8VdRymq78xVc21Q3fVVc21Q3fVVQ206BCQiklAKABGRhFIAlO/+uAsYheo7c9VcG1R3fdVcG1R3fbHXpjkAEZGE0ghARCShFACjMLP5ZvaUmW02szfM7Itx1zSUmaXM7GUzeyzuWoYysylm9pCZvRV9D/8w7poKmdl/jv5dXzezn5pZXYy1rDGz/Wb2ekHbNDNbb2Zbo8fYbnRdor7/Hf3bbjKzX5jZlGqqr2DbfzUzN7MZ1VSbmf25mW2J/g/+r0rXpQAYXR/wl+7+IWA5cKuZXRBzTUN9EdgcdxEl/A3w/9z9fOAjVFGdZjYPuA1Y5u4XEt6l7sYYS3oAuGZI25eBDe6+BNgQrcflAYbXtx640N0vAt4Gbq90UQUeYHh9mNl84ErgvUoXVOABhtRmZv8GWAlc5O5LgW9VuigFwCjcfY+7vxQtdxL+ApsXb1WnmFkr8Cngh3HXMpSZNQH/ClgN4O4n3f1IvFUNkwbqzSwNNAC74yrE3f8JODSkeSWwNlpeC9xQ0aIKFKvP3Z9w975o9Tlg4u9kXkKJ7x/APcBfAbFNeJao7T8Bd7t7T9Rnf6XrUgCMgZktAi4Bno+3ktN8h/A/dz7uQoo4B2gHfhQdovqhmTXGXdQAd99F+FfXe8AeoMPdn4i3qmFmufseCP8YAWbGXM9I/gPweNxFFDKz64Fd7v5q3LUUcS7wL83seTP7RzP7g0oXoAAok5lNAn4O/IW7H427HgAz+zSw391fjLuWEtLAR4Hvu/slwHHiPYRxmuh4+kpgMTAXaDSzP4m3qrOTmX2V8HDpT+KuZYCZNQBfBe6Iu5YS0sBUwkPL/w1YZ2ZWyQIUAGUwswzhL/+fuPvDcddT4HLgejN7F3gQuMLM/i7ekk7TBrS5+8CI6SHCQKgWnwR2uHu7u/cCDwP/IuaahtpnZnMAoseKHyYYjZmtAj4N/LFX13nlHyAM91ejn5FW4CUzmx1rVae0AQ976HeEo/iKTlIrAEYRJfJqYLO7fzvuegq5++3u3uruiwgnL59096r5C9bd9wI7zey8qGkF8GaMJQ31HrDczBqif+cVVNEkdeRRYFW0vAp4JMZahjGza4AvAde7+4m46ynk7q+5+0x3XxT9jLQBH43+X1aDvweuADCzc4EsFf7gOgXA6C4HbiL86/qV6Ou6uIs6i/w58BMz2wRcDPx1zPUMikYmDwEvAa8R/jzEdnWmmf0UeBY4z8zazOxm4G7gSjPbSngmy91VVt+9wGRgffSz8X+rrL6qUKK2NcA50amhDwKrKj2C0pXAIiIJpRGAiEhCKQBERBJKASAiklAKABGRhFIAiIgklAJARCShFAAiIgmlABARSaj/D6Gz7+yGqKSPAAAAAElFTkSuQmCC\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYAAAAD8CAYAAAB+UHOxAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAG1xJREFUeJzt3X1wHPWd5/H3t+dJD7bkJ/lRfiIxEEwIJF7wHrm7HA6PSWGqNslxt0tce9RRdWGX7N3ebUJSBXck2SN3uZBNEXJFYgdnNxXiImQhVLjgNbAbskAwT+bBGBubYPlRfpJlW5Ilzff+6JY8lmakkZGmx9OfV5Vqun/9m57vyJY++vWve9rcHRERSZ4g7gJERCQeCgARkYRSAIiIJJQCQEQkoRQAIiIJpQAQEUkoBYCISEIpAEREEkoBICKSUOm4CxjJjBkzfNGiRXGXISJyVnnxxRcPuHvLaP2qOgAWLVrExo0b4y5DROSsYma/L6efDgGJiCSUAkBEJKEUACIiCaUAEBFJKAWAiEhCKQBERBJKASAiklA1GQB7Orr49hNb2N5+LO5SRESqVk0GwIHOk3z3yW2803487lJERKpWTQZAXSZ8W929/TFXIiJSvWo0AFIAdCkARERKqskAqM+GAaARgIhIaTUZAAMjAAWAiEhptRkA6fBtdZ3Mx1yJiEj1qskASKcCMinTHICIyAhqMgAAcumUDgGJiIyg7AAws5SZvWxmj0Xri83seTPbamY/M7Ns1J6L1rdF2xcV7OP2qH2LmV093m+mUC4dKABEREYwlhHAF4HNBevfBO5x9yXAYeDmqP1m4LC7fxC4J+qHmV0A3AgsBa4B7jOz1PsrvzQFgIjIyMoKADNrBT4F/DBaN+AK4KGoy1rghmh5ZbROtH1F1H8l8KC797j7DmAbcOl4vIlisulAcwAiIiModwTwHeCvgIHTaqYDR9y9L1pvA+ZFy/OAnQDR9o6o/2B7keeMuzAAdBaQiEgpowaAmX0a2O/uLxY2F+nqo2wb6TmFr3eLmW00s43t7e2jlVdSNqVDQCIiIylnBHA5cL2ZvQs8SHjo5zvAFDNLR31agd3RchswHyDa3gwcKmwv8pxB7n6/uy9z92UtLS1jfkMDMumArpMKABGRUkYNAHe/3d1b3X0R4STuk+7+x8BTwGeibquAR6LlR6N1ou1PurtH7TdGZwktBpYAvxu3dzJETnMAIiIjSo/epaQvAQ+a2deBl4HVUftq4G/NbBvhX/43Arj7G2a2DngT6ANudfcJ+w2tQ0AiIiMbUwC4+9PA09HydoqcxePu3cBnSzz/G8A3xlrkmcjpEJCIyIhq9krgrK4EFhEZUQ0HQEB3n04DFREppWYDIJcO6M87vf0KARGRYmo6AEB3BRMRKaVmAyCb1n2BRURGUrMBMDAC6NZNYUREiqrZAMimdWN4EZGR1HAA6BCQiMhIajYANAksIjIyBYCISELVbABkU+Fb61EAiIgUVbMBoBGAiMjIajYABiaBT+gD4UREiqrZAKjLRKeBKgBERIqq+QA43qMAEBEppmYDIBUY6cA40ds3emcRkQSq2QCAcB7ghEYAIiJF1XYApAKOn9QIQESkmJoOgExKIwARkVJqOgDSKdMIQESkhJoPAF0HICJSXE0HQCYION6jEYCISDG1HQBpBYCISCm1HQAp47gOAYmIFFXTAZBNBZzQJLCISFE1HQCZVEB3b5583uMuRUSk6tR8AIA+ElpEpJgaDwAD0LUAIiJF1HQADNwVTFcDi4gMV9MBkIluCqMRgIjIcLUdACndFUxEpJQaD4BoDkAXg4mIDFPjAaARgIhIKTUdAAOTwBoBiIgMV9MBoBGAiEhpNR4Aug5ARKSUmg6AVGAEpkNAIiLFjBoAZlZnZr8zs1fN7A0z+x9R+2Ize97MtprZz8wsG7XnovVt0fZFBfu6PWrfYmZXT9SbKng9cukUx7oVACIiQ5UzAugBrnD3jwAXA9eY2XLgm8A97r4EOAzcHPW/GTjs7h8E7on6YWYXADcCS4FrgPvMLDWeb6aYXDqgUwEgIjLMqAHgoWPRaib6cuAK4KGofS1wQ7S8Mlon2r7CzCxqf9Dde9x9B7ANuHRc3sUIMumATh0CEhEZpqw5ADNLmdkrwH5gPfAOcMTdB36ztgHzouV5wE6AaHsHML2wvchzJkw2FdDZ3TvRLyMictYpKwDcvd/dLwZaCf9q/1CxbtGjldhWqv00ZnaLmW00s43t7e3llDeibDrgaJdGACIiQ43pLCB3PwI8DSwHpphZOtrUCuyOltuA+QDR9mbgUGF7kecUvsb97r7M3Ze1tLSMpbyisqmAzh6NAEREhirnLKAWM5sSLdcDnwQ2A08Bn4m6rQIeiZYfjdaJtj/p7h613xidJbQYWAL8brzeSCnZdKCzgEREikiP3oU5wNrojJ0AWOfuj5nZm8CDZvZ14GVgddR/NfC3ZraN8C//GwHc/Q0zWwe8CfQBt7r7hF+iO3AWkLsTzkWLiAiUEQDuvgm4pEj7doqcxePu3cBnS+zrG8A3xl7mmcumA/ryTk9fnrrMhJ91KiJy1qjpK4EhDABA1wKIiAxR8wGQGwwATQSLiBSq+QD4w+NP8kz2NhZ/rxXuuRA2rYu7JBGRqlDOJPBZ67z9j/PJfd8iG/SEDR074Ze3hcsXfS6+wkREqkBNjwA+/t59ZL3n9MbeLthwVzwFiYhUkZoOgMk9+4pv6GirbCEiIlWopgOgMzer+Ibm1soWIiJShWo6AJ5Z8AV6g7rTGzP1sOKOeAoSEakiNT0JvGXmtQB85O3vMtcOYs2t4S9/TQCLiNR2AEAYAl/aej7XXTiHb37morjLERGpGjV9CGiAPhFURGS4RARALh3Q0aUAEBEplIwAyKQ4fFwBICJSKBEBUJcOOHLiZNxliIhUlUQEQC6T0iEgEZEhEhEAdZmA4yf76e3Px12KiEjVSEYApMMbwWgUICJySjICILoT2JETCgARkQEJCYDwbXZ0aSJYRGRAIgIgpxGAiMgwiQiAuui2kAoAEZFTkhEAAyMATQKLiAxKRADk0gEGdOhiMBGRQYkIADOjLpPSCEBEpEAiAgCgPpPSHICISIHEBEAuE2gEICJSIDEBkE0FHDmuOQARkQGJCYBcJuCQJoFFRAYlJgAasmkOawQgIjIoMQFQn01x/GQ/3b39cZciIlIVEhMADdHFYAc1ChARAZIUANkoAI71xFyJiEh1SEwA1Gc1AhARKZScABg4BHRMASAiAgkKgIZsGtAhIBGRAYkJgEzKSAemQ0AiIpHEBICZ0ZhLc0AjABERoIwAMLP5ZvaUmW02szfM7ItR+zQzW29mW6PHqVG7mdl3zWybmW0ys48W7GtV1H+rma2auLdVXH0mpTkAEZFIOSOAPuAv3f1DwHLgVjO7APgysMHdlwAbonWAa4El0dctwPchDAzgTuAy4FLgzoHQqJS6TKARgIhIZNQAcPc97v5StNwJbAbmASuBtVG3tcAN0fJK4Mceeg6YYmZzgKuB9e5+yN0PA+uBa8b13YyiPptSAIiIRMY0B2Bmi4BLgOeBWe6+B8KQAGZG3eYBOwue1ha1lWof+hq3mNlGM9vY3t4+lvJG1ZBJc+j4Sdx9XPcrInI2KjsAzGwS8HPgL9z96Ehdi7T5CO2nN7jf7+7L3H1ZS0tLueWVpT6borff6ezpG9f9ioicjcoKADPLEP7y/4m7Pxw174sO7RA97o/a24D5BU9vBXaP0F4xjbnwYrD9R7sr+bIiIlWpnLOADFgNbHb3bxdsehQYOJNnFfBIQfvno7OBlgMd0SGiXwNXmdnUaPL3qqitYiblwovB9h3VPICISLqMPpcDNwGvmdkrUdtXgLuBdWZ2M/Ae8Nlo26+A64BtwAngTwHc/ZCZfQ14Iep3l7sfGpd3UabGKAD2dmgEICIyagC4+zMUP34PsKJIfwduLbGvNcCasRQ4ngZHAJ0KABGRxFwJDJBJBdRlAvZpBCAikqwAgHAUoDkAEZEEBkBDNs1enQUkIpK8AGjMpRQAIiIkMQCyado7e8jndTWwiCRb4gJgUi5Nf951XwARSbzkBUDdwMVgOgwkIsmWuABozOpiMBERSGAATI5GALuOdMVciYhIvBIXAA3ZFOnAaDt8Iu5SRERilbgAMDOa6zO0HdYIQESSLXEBAOFE8E6NAEQk4RIZAE11GdoOaQQgIsmW0ABIc6Srl2O6M5iIJFgiA2BF7z/yTPY2Gv/nDLjnQti0Lu6SREQqrpwbwtSU8/Y/zifb/w/ZIPpE0I6d8MvbwuWLPhdfYSIiFZa4EcDH37uPrA/5OOjeLthwVzwFiYjEJHEBMLlnX/ENHW2VLUREJGaJC4DO3KziG5pbK1uIiEjMEhcAzyz4Ar1B3emNmXpYcUc8BYmIxCRxk8BbZl4LwLJ37mVGfzs2ZR624k5NAItI4iQuACAMgV/0Xc76zft46qZPsHhGY9wliYhUXOIOAQ2Y0pABYMeBYzFXIiISj8QGwNTGLADb24/HXImISDwSGwD1mRT1mRTbDygARCSZEhsAEB4G2qERgIgkVOIDYOv+zrjLEBGJRaIDYMakHAeOneTgsZ7RO4uI1JjEBwDAlr0aBYhI8iQ8AMIzgTYrAEQkgRIdAA3ZNJNyad7aczTuUkREKi7RAQAwrTHLZgWAiCRQ4gNgxqQsW/cfo68/H3cpIiIVpQCYlKOnL8+7B3U9gIgkS+IDoGVyeCbQpraOmCsREamsxAfAtMYs2XTAKzuPxF2KiEhFJT4AAjNmTc7x8nsKABFJllEDwMzWmNl+M3u9oG2ama03s63R49So3czsu2a2zcw2mdlHC56zKuq/1cxWTczbOTMzm+rYvOco3b39cZciIlIx5YwAHgCuGdL2ZWCDuy8BNkTrANcCS6KvW4DvQxgYwJ3AZcClwJ0DoVENZjfV0Zd33tit00FFJDlGDQB3/yfg0JDmlcDaaHktcENB+4899BwwxczmAFcD6939kLsfBtYzPFRiM7s5vEfwq5oHEJEEOdM5gFnuvgcgepwZtc8Ddhb0a4vaSrUPY2a3mNlGM9vY3t5+huWNzaRcmqa6NC/+/nBFXk9EpBqM9ySwFWnzEdqHN7rf7+7L3H1ZS0vLuBY3krlT6vnndw7gXrQsEZGac6YBsC86tEP0uD9qbwPmF/RrBXaP0F41WqfWc/hEL2/v0z2CRSQZzjQAHgUGzuRZBTxS0P756Gyg5UBHdIjo18BVZjY1mvy9KmqrGvOnNgDw7DsHYq5ERKQyyjkN9KfAs8B5ZtZmZjcDdwNXmtlW4MpoHeBXwHZgG/AD4AsA7n4I+BrwQvR1V9RWNZrqMzTXZ3h2+8G4SxERqYj0aB3c/d+V2LSiSF8Hbi2xnzXAmjFVV2HzptTz7DsH6c87qaDYtIWISO1I/JXAhRZOb+Bodx8vvaezgUSk9ikACiyc3kBg8A+b98VdiojIhFMAFMilU7RObeAf3lQAiEjtUwAMsXhGI++0H+fdA7o/gIjUNgXAEItnNALw6zf2xlyJiMjEUgAM0VyfYXZTHX//yq64SxERmVAKgCLOmz2ZzXs62bqvM+5SREQmjAKgiCUzJxEYGgWISE1TABTRmEszf1oDD7+0i/68PhxORGqTAqCEpXOa2NPRzVNv7R+9s4jIWUgBUMI5LZOYXJdm7bPvxl2KiMiEUACUkAqMC+c285utB9jero+IFpHaowAYwdK5TaQC4we/2R53KSIi404BMILGXJqlc5pYt7GNtsMn4i5HRGRcKQBGsWzRVADue/qdmCsRERlfCoBRTK7LcMGcJta9sJMd+nwgEakhCoAyXLZ4GqnA+Npjb8ZdiojIuFEAlKExl+YPFk3jybf28/QWXRcgIrVBAVCmi+dPYVpjlq/+4nU6u3vjLkdE5H1TAJQpFRgrzp/J7o4uvv7Y5rjLERF53xQAYzB3Sj0fWzCVn23cyeOv7Ym7HBGR90UBMEbLz5nOnOY6/su6V9myVx8XLSJnLwXAGKUC47oL55AKjP/4440cONYTd0kiImdEAXAGJtWlue7Ds9nT0cVNq5+no0uTwiJy9lEAnKE5zfV86sNzeHvfMT6/5nmOnDgZd0kiImOiAHgfFk5v5NoLZ/P6rqP80ff/mV1HuuIuSUSkbAqA9+kDLZO44eK57Drcxcp7n+G57QfjLklEpCwKgHHQOrWBz3yslbzDv//Bc9z75Fb6+vNxlyUiMiIFwDiZPinHv102nyUzJ/GtJ97mhu/9ltd3dcRdlohISQqAcZRNB1y9dDbXXTibHQePc/29z/CVX7zG3o7uuEsTERkmHXcBtcbMWDJrMvOnNfDc9oP87IWd/PzFNv5k+UL+9PJFtE5tiLtEERFAATBh6jIpPnHeTC5ZMJXntx/kR7/dwY9+u4Orl87mpuULWX7OdILA4i5TRBJMATDBmuszXLV0Nss/MJ1NbR08vaWdx1/fy6ymHNd/ZC6fvmguH57XrDAQkYpTAFRIU12Gj39wBpctnsaOA8fZsreTNb99lx/8ZgfTGrN84rwWPnHeTJYvnsbMprq4yxWRBFAAVFgmFXDurMmcO2syXb39/P7Acd49eILHX9vLwy/tAmDelHqWLZrKxxZOZencJs6dNZnJdZnRd75pHWy4CzraoLkVVtwBF31ugt+RiJytFAAxqs+kOH9OE+fPaSLvzv6jPezu6GJPRzcbNu/nkVd2D/adO6WOC+Y0sWTWZBZOa2DB9AYWTGtgTnM9qcDCX/6/vA16o6uRO3aG66AQEJGiKh4AZnYN8DdACvihu99d6RqqUWDG7OY6ZjeHh3/cnc7uPg4c6+HA8ZMc7OzhlZ1HePKt/eT91PPSgTFvaj0PdX+Vlv4hH0XR20XvE/+dYx+4gab6TBgUIiKRigaAmaWA7wFXAm3AC2b2qLvrbutDmBlN9Rma6jOc03KqPZ93Onv66OjqHfw62tXL9P72ovtJde7mkq+tB2ByXZop9RmmNmaZ0pBlSn2G5voMjbk0jdkUDbk0k3IpGrJpGqPHSbk0DdkUjbk0uXRALp0ilw40aS0ynmI6fFvpEcClwDZ33w5gZg8CKwEFQJmCwGiOfnEXOrZxFk09e4f1P5xp4V8vbKG7tz/86svT0dVLe2cP3b399PTlOdmXp69wWFGGdGDk0gHZgVDIBNSlU2TTAXWZU0GRSQWkU0Y6MNKpgEzKSAVGOggG28JHI5MKom02ZFv4PDMjZUZg4fchKLKcsqhfEK4XLod9jCA41S8wou0W7SfsZwbGwGO4n/AxbMcouc2ibBxYH6hjsI8pPKVAjIdvKx0A84CdBettwGUVrqEmPbPgC1z5zl+TyZ+66rg3qOP5xX/GxTOnjPr8/rzT25+ntz8MhN7+gvX+PL19Tl8+T3/e6cv7aY/hcp6+fJ7u7n4OnzjVnncn7+Ehrf684w79HrXno+W8M7b4qQ3DAoXSITIQNhQJplP9TgXN4GsMyZrTVodsLPW80/c4dNuphbL6FWkofN5I9RYG59D9lVvvSO2l9j+8pvLqLedFDVh9+CvMzA8/fMuGu2ouAIp9f0772TezW4BbABYsWHDGL7Rs0TQ+tnDqGT//7LMEXpuDFwwj0yvu4NoPf5Zr4y6tDPkoUMIgcfr6nb7+aDkKJ3fIFwTJwPJgwERhkx8MnjBk8gXbfOhzouXB/gXPdwdn4BHcgdPWfbB9YJ3B9VP98vni7YXrlNjf4OuOsM0JGwtrCCs9XcGmItuKR/DQ5sKoPm1/w/qV3reXWBn6Z0Cpesutafhrjf17M1Ltw75lJb6HpV5zQMvBA8U3dLSNuL/xUOkAaAPmF6y3ArsLO7j7/cD9AMuWLXtffxgmbqh90efO2jN+UikjlYIcqbhLEamse1rDwz5DNbdO+EtX+sPgXgCWmNliM8sCNwKPVrgGEZHqseIOyNSf3papD9snWEVHAO7eZ2Z/Bvya8DTQNe7+RiVrEBGpKgOj9gScBYS7/wr4VaVfV0SkasV0+Fb3AxARSSgFgIhIQikAREQSSgEgIpJQCgARkYRSAIiIJJQCQEQkoRQAIiIJZaU+BKoamFk78Pu464jMAEp8alNVUH1nrpprg+qur5prg+qubyJrW+juLaN1quoAqCZmttHdl8VdRymq78xVc21Q3fVVc21Q3fVVQ206BCQiklAKABGRhFIAlO/+uAsYheo7c9VcG1R3fdVcG1R3fbHXpjkAEZGE0ghARCShFACjMLP5ZvaUmW02szfM7Itx1zSUmaXM7GUzeyzuWoYysylm9pCZvRV9D/8w7poKmdl/jv5dXzezn5pZXYy1rDGz/Wb2ekHbNDNbb2Zbo8fYbnRdor7/Hf3bbjKzX5jZlGqqr2DbfzUzN7MZ1VSbmf25mW2J/g/+r0rXpQAYXR/wl+7+IWA5cKuZXRBzTUN9EdgcdxEl/A3w/9z9fOAjVFGdZjYPuA1Y5u4XEt6l7sYYS3oAuGZI25eBDe6+BNgQrcflAYbXtx640N0vAt4Gbq90UQUeYHh9mNl84ErgvUoXVOABhtRmZv8GWAlc5O5LgW9VuigFwCjcfY+7vxQtdxL+ApsXb1WnmFkr8Cngh3HXMpSZNQH/ClgN4O4n3f1IvFUNkwbqzSwNNAC74yrE3f8JODSkeSWwNlpeC9xQ0aIKFKvP3Z9w975o9Tlg4u9kXkKJ7x/APcBfAbFNeJao7T8Bd7t7T9Rnf6XrUgCMgZktAi4Bno+3ktN8h/A/dz7uQoo4B2gHfhQdovqhmTXGXdQAd99F+FfXe8AeoMPdn4i3qmFmufseCP8YAWbGXM9I/gPweNxFFDKz64Fd7v5q3LUUcS7wL83seTP7RzP7g0oXoAAok5lNAn4O/IW7H427HgAz+zSw391fjLuWEtLAR4Hvu/slwHHiPYRxmuh4+kpgMTAXaDSzP4m3qrOTmX2V8HDpT+KuZYCZNQBfBe6Iu5YS0sBUwkPL/w1YZ2ZWyQIUAGUwswzhL/+fuPvDcddT4HLgejN7F3gQuMLM/i7ekk7TBrS5+8CI6SHCQKgWnwR2uHu7u/cCDwP/IuaahtpnZnMAoseKHyYYjZmtAj4N/LFX13nlHyAM91ejn5FW4CUzmx1rVae0AQ976HeEo/iKTlIrAEYRJfJqYLO7fzvuegq5++3u3uruiwgnL59096r5C9bd9wI7zey8qGkF8GaMJQ31HrDczBqif+cVVNEkdeRRYFW0vAp4JMZahjGza4AvAde7+4m46ynk7q+5+0x3XxT9jLQBH43+X1aDvweuADCzc4EsFf7gOgXA6C4HbiL86/qV6Ou6uIs6i/w58BMz2wRcDPx1zPUMikYmDwEvAa8R/jzEdnWmmf0UeBY4z8zazOxm4G7gSjPbSngmy91VVt+9wGRgffSz8X+rrL6qUKK2NcA50amhDwKrKj2C0pXAIiIJpRGAiEhCKQBERBJKASAiklAKABGRhFIAiIgklAJARCShFAAiIgmlABARSaj/D6Gz7+yGqKSPAAAAAElFTkSuQmCC", "text/plain": [ "
" ] @@ -396,7 +396,7 @@ "source": [ "### Outliers: solution\n", "\n", - "Compute the estimator conditioned on the event the the top most transaction captures the first slot. This decreases `total_weight` on the one hand (thus increasing `p`), while increasing `inclusion_interval` on the other, by capturing a block slot. If this estimator gives lower prediction times we switch to it, and then repeat the process with the next highest transaction. The process convegres when the estimator is no longer improving or if all block slots are captured. " + "Compute the estimator conditioned on the event the top most transaction captures the first slot. This decreases `total_weight` on the one hand (thus increasing `p`), while increasing `inclusion_interval` on the other, by capturing a block slot. If this estimator gives lower prediction times we switch to it, and then repeat the process with the next highest transaction. The process converges when the estimator is no longer improving or if all block slots are captured. " ] }, { @@ -406,7 +406,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYAAAAD8CAYAAAB+UHOxAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAHmVJREFUeJzt3X2QXXWd5/H39z72U/op6TzQnZggEVREwR7Ah3IdIw5RxzCzYuG4Gl1qU7MwPoyzpehUyYxTM+PUuAO6A9REQeMui7DImjgLg5mAhSgBwlMMhJAQIOkkJB066Tx2p/ve7/5xTie307e7k7597+m+5/OqunXP+Z3fOed7fehPfufR3B0REYmfRNQFiIhINBQAIiIxpQAQEYkpBYCISEwpAEREYkoBICISUwoAEZGYGjcAzOwOM9tnZpuKLPtvZuZmNiucNzP7vpltM7ONZnZJQd/lZrY1/Cyf3J8hIiJn60xGAD8Grjy90czmA1cAOwqalwKLw88K4LawbytwI3AZcClwo5m1lFK4iIiUJjVeB3d/xMwWFll0E/A1YHVB2zLgJx7cXrzezJrNbB7wQWCtu/cAmNlaglC5a6x9z5o1yxcuLLZrEREZzVNPPbXf3dvG6zduABRjZp8Adrn7c2ZWuKgd2Fkw3xW2jdY+poULF7Jhw4aJlCgiEltm9tqZ9DvrADCzOuAvgY8UW1ykzcdoL7b9FQSHj1iwYMHZliciImdoIlcBvRlYBDxnZq8CHcDTZjaX4F/28wv6dgC7x2gfwd1Xununu3e2tY07ghERkQk66wBw99+5+2x3X+juCwn+uF/i7q8Da4DPhVcDXQ70uvse4EHgI2bWEp78/UjYJiIiETmTy0DvAh4DzjezLjO7dozu9wPbgW3AD4DrAMKTv38DPBl+vj10QlhERKJhU/l9AJ2dna6TwCIiZ8fMnnL3zvH66U5gEZGYUgCIiMRUVQbA4b4Bblr7Es/uPBh1KSIiU1ZVBkAu73xv3Vaefu1A1KWIiExZVRkA9dng/rYj/YMRVyIiMnVVZQCkkwlq0gkFgIjIGKoyACAYBRzuUwCIiIymegMgk+Jw30DUZYiITFlVGwC1maQOAYmIjKFqA6Auk+SIDgGJiIyqqgPgkA4BiYiMqooDQCeBRUTGUsUBoHMAIiJjqeoAONo/yFR+2qmISJSqNgDqsynyDsdO5KIuRURkSqraAKjLJAE9DkJEZDRVGwD1meB5QDoRLCJSXNUGgEYAIiJjq9oAGHoiqB4HISJSXNUGwMkRgA4BiYgUNW4AmNkdZrbPzDYVtP2jmb1oZhvN7P+aWXPBsm+Y2TYz22Jmf1DQfmXYts3Mbpj8nzJc3dA5AB0CEhEp6kxGAD8GrjytbS1wobtfBLwEfAPAzN4GXAO8PVznVjNLmlkSuAVYCrwN+HTYt2zqssEIQCeBRUSKGzcA3P0RoOe0tl+6+9Bf1vVARzi9DPipu/e7+yvANuDS8LPN3be7+wngp2Hfshm6CkiHgEREipuMcwD/GXggnG4HdhYs6wrbRmsvm2TCSCeNI/06CSwiUkxJAWBmfwkMAncONRXp5mO0F9vmCjPbYGYburu7SymPbErPAxIRGc2EA8DMlgMfBz7jpx640wXML+jWAeweo30Ed1/p7p3u3tnW1jbR8gDIpBIc0iEgEZGiJhQAZnYl8HXgE+5+rGDRGuAaM8ua2SJgMfAE8CSw2MwWmVmG4ETxmtJKH186aToHICIyitR4HczsLuCDwCwz6wJuJLjqJwusNTOA9e7+p+7+vJndA7xAcGjoenfPhdv5M+BBIAnc4e7Pl+H3DJNJJjh0XOcARESKGTcA3P3TRZpvH6P/3wJ/W6T9fuD+s6quRNl0kl4FgIhIUVV7JzBATSqhABARGUVVB0A2HbwXWC+FEREZqboDIJVgIOf0DeSjLkVEZMqp6gCoSQWPg9BhIBGRkao6ALLp4OcpAERERqruAEgpAERERlPVAVCT1iEgEZHRVHUAaAQgIjK6qg4AjQBEREZX1QGQ0QhARGRUVR0ACTNq0noekIhIMVUdABC8E0ABICIyUgwCQM8DEhEpRgEgIhJTVR8AmVSCg8cUACIip6v6AKjROwFERIqq+gDIphIc6lMAiIicrvoDIJ2kfzBP30Au6lJERKaUqg+AmvBmMF0KKiIyXNUHQG34OIgDOhEsIjLMuAFgZneY2T4z21TQ1mpma81sa/jdErabmX3fzLaZ2UYzu6RgneVh/61mtrw8P2ekoecB9Rw9UaldiohMC2cyAvgxcOVpbTcA69x9MbAunAdYCiwOPyuA2yAIDOBG4DLgUuDGodAot9rM0AhAASAiUmjcAHD3R4Ce05qXAavC6VXAVQXtP/HAeqDZzOYBfwCsdfcedz8ArGVkqJRFrUYAIiJFTfQcwBx33wMQfs8O29uBnQX9usK20drLbugQ0AEFgIjIMJN9EtiKtPkY7SM3YLbCzDaY2Ybu7u6SC0omjGwqQY8OAYmIDDPRANgbHtoh/N4XtncB8wv6dQC7x2gfwd1Xununu3e2tbVNsLzh6jJJjQBERE4z0QBYAwxdybMcWF3Q/rnwaqDLgd7wENGDwEfMrCU8+fuRsK0iatJJenQZqIjIMKnxOpjZXcAHgVlm1kVwNc93gHvM7FpgB3B12P1+4KPANuAY8AUAd+8xs78Bngz7fdvdTz+xXDbZVIKeo/2V2p2IyLQwbgC4+6dHWbSkSF8Hrh9lO3cAd5xVdZOkNp3UVUAiIqep+juBIbgX4MBRHQISESkUiwCoSSc5PpDTA+FERArEIgB0M5iIyEjxCICMAkBE5HSxCICTdwPrZjARkZNiEQA6BCQiMlKsAkB3A4uInBKLAMimExgaAYiIFIpFACTMqMsm6T6iABARGRKLAACoy6TYf0SPgxARGRKLADh/3wP8YvBP+ZftH4abLoSN90RdkohI5MZ9FtB0d/6+B7ji5b8j7X1BQ+9O+MWXgumLPhVdYSIiEav6EcD7d9xKOt83vHHgOKz7djQFiYhMEVUfADP69xZf0NtV2UJERKaYqg+Aw9k5xRc0dVS2EBGRKabqA+DRBdcxkKgZ3piuhSXfiqYgEZEpoupPAm+ZvRSA97x6C00n9tFXN4+6pX+tE8AiEntVPwKAIARue9dqzu2/k7vff7/++IuIEJMAAKhJJ0gYdB/WzWAiIhCjADAzGrK6G1hEZEhJAWBmf25mz5vZJjO7y8xqzGyRmT1uZlvN7G4zy4R9s+H8tnD5wsn4AWejNpPUCEBEJDThADCzduBLQKe7XwgkgWuAfwBucvfFwAHg2nCVa4ED7n4ecFPYr6Jq00n2KQBERIDSDwGlgFozSwF1wB7gQ8C94fJVwFXh9LJwnnD5EjOzEvd/VuoyKQWAiEhowgHg7ruA7wI7CP7w9wJPAQfdfTDs1gW0h9PtwM5w3cGw/8yJ7n8iGrIpeo6cYDCXr+RuRUSmpFIOAbUQ/Kt+EXAOUA8sLdLVh1YZY1nhdleY2QYz29Dd3T3R8oqqzybJubNf7wUQESnpENCHgVfcvdvdB4D7gPcCzeEhIYAOYHc43QXMBwiXNwE9p2/U3Ve6e6e7d7a1tZVQ3kgNNUFZe3qPT+p2RUSmo1ICYAdwuZnVhcfylwAvAA8Dnwz7LAdWh9NrwnnC5Q+5+4gRQDnNyKYBeL23b5yeIiLVr5RzAI8TnMx9GvhduK2VwNeBr5rZNoJj/LeHq9wOzAzbvwrcUELdE9KQHRoBKABEREp6FpC73wjceFrzduDSIn37gKtL2V+patIJUglj7yEFgIhIbO4EhuBu4Bk1KY0ARESIWQAA1GdTOgksIkJsA0AjABGR2AVAQzbF3kN95PMVvQBJRGTKiWUADOScnmO6GUxE4i2WAQC6F0BEJH4BUKN7AUREIIYBMCMcAew+qCuBRCTeYhcAdZkkqYTRdeBY1KWIiEQqdgFgZjTVptnZoxGAiMRb7AIAgvMAO3o0AhCReItlADTWpNmpQ0AiEnOxDICm2jSH+wbpPT4QdSkiIpGJZQA0hpeC6kSwiMRZPAOgNngxjE4Ei0icxToANAIQkTiLZQDUpBJkUwm6DmgEICLxFcsAMLPgSiBdCioiMRbLAACYoXsBRCTmYhsATXVpdvQc03sBRCS2SgoAM2s2s3vN7EUz22xm7zGzVjNba2Zbw++WsK+Z2ffNbJuZbTSzSybnJ0xMS22G/sE8e/SCeBGJqVJHAN8D/s3dLwDeCWwGbgDWuftiYF04D7AUWBx+VgC3lbjvkjTXBVcCvdJ9NMoyREQiM+EAMLNG4APA7QDufsLdDwLLgFVht1XAVeH0MuAnHlgPNJvZvAlXXqKW+gwAr+w/ElUJIiKRKmUEcC7QDfzIzJ4xsx+aWT0wx933AITfs8P+7cDOgvW7wrZI1GeSZJIJXtYIQERiqpQASAGXALe5+8XAUU4d7inGirSNOANrZivMbIOZbeju7i6hvLGZGS31aV7ZrwAQkXgqJQC6gC53fzycv5cgEPYOHdoJv/cV9J9fsH4HsPv0jbr7SnfvdPfOtra2EsobX1NNmu3dOgQkIvE04QBw99eBnWZ2fti0BHgBWAMsD9uWA6vD6TXA58KrgS4HeocOFUWluT7DroPH6R/MRVmGiEgkUiWu/0XgTjPLANuBLxCEyj1mdi2wA7g67Hs/8FFgG3As7Buplro0eYedPcc4b/aMqMsREamokgLA3Z8FOossWlKkrwPXl7K/ydZcF1wJ9HL3UQWAiMRObO8EBmgNA2DbPp0HEJH4iXUAZFIJmmrTvPj64ahLERGpuFgHAEBrfYYX9xyKugwRkYqLfQDMrM+wff9RTgzmoy5FRKSiFAANGXJ5Z7seCSEiMRP7AJjVkAVgi84DiEjMxD4AWuoyJEwBICLxE/sASCaM1vqMAkBEYif2AQDBlUAv6EogEYkZBQCwzH7D/zm+Av+rZrjpQth4T9QliYiUXanPApr2zt/3AB/uvZlMoj9o6N0Jv/hSMH3Rp6IrTESkzGI/Anj/jlvJeP/wxoHjsO7b0RQkIlIhsQ+AGf17iy/o7apsISIiFRb7ADicnVN8QVNHZQsREamw2AfAowuuYyBRM7wxXQtLvhVNQSIiFRL7k8BbZi8F4PJXbqF5YB/99fOovfKvdQJYRKpe7EcAEITAynev5s39d/Ivl6zWH38RiQUFQCibSjKrIcNTrx2IuhQRkYpQABSY11TLU68dYDCnR0OLSPVTABQ4p7mWYydyekOYiMRCyQFgZkkze8bM/jWcX2Rmj5vZVjO728wyYXs2nN8WLl9Y6r4n2znNwdVAT77aE3ElIiLlNxkjgC8Dmwvm/wG4yd0XAweAa8P2a4ED7n4ecFPYb0qZUZOmqTbNhld1HkBEql9JAWBmHcDHgB+G8wZ8CLg37LIKuCqcXhbOEy5fEvafUuY21vDEKz24e9SliIiUVakjgJuBrwFDZ01nAgfdfTCc7wLaw+l2YCdAuLw37D+lnNNcQ/eRfl5941jUpYiIlNWEA8DMPg7sc/enCpuLdPUzWFa43RVmtsHMNnR3d0+0vAmb31oHwKNbK79vEZFKKmUE8D7gE2b2KvBTgkM/NwPNZjZ0h3EHsDuc7gLmA4TLm4ARZ1vdfaW7d7p7Z1tbWwnlTUxzbXAe4JGt+yu+bxGRSppwALj7N9y9w90XAtcAD7n7Z4CHgU+G3ZYDq8PpNeE84fKHfAoeaDcz5rfU8tjLbzCg+wFEpIqV4z6ArwNfNbNtBMf4bw/bbwdmhu1fBW4ow74nxYLWOo70D/LczoNRlyIiUjaT8jA4d/8V8KtwejtwaZE+fcDVk7G/cpvfWocZPLJ1P50LW6MuR0SkLHQncBE16SRzG2v41ZZ9UZciIlI2CoBRLJxZz8auXl7v7Yu6FBGRslAAjOLNbfUArN08yisjRUSmOQXAKFrrM7TWpXlw0+tRlyIiUhYKgFGYGYvaGnhs+xv0Hh+IuhwRkUmnABjDm9vqyeWdh17UYSARqT4KgDHMbayhsSbF6md2j99ZRGSaUQCMwcx4y5wZ/HrrfroP90ddjojIpFIAjOOCuTPIufOL5zQKEJHqogAYx8yGLHMas9z3dFfUpYiITCoFwBl4y5wZbNp9iC16V7CIVBEFwBl469xGUgnjf65/NepSREQmjQLgDNRmkiye3cB9T+/icJ/uCRCR6qAAOEMXdTRz7ESO+57eFXUpIiKTQgFwhuY21TC3sYZVv32VfH7KvcdGROSsKQDOwjvnN7F9/1E9IE5EqoIC4Cy8ZfYMmuvS/PND25iCb7MUETkrCoCzkEgY717Qwu929fJrvTReRKY5BcBZumDeDGbUpLj531/SKEBEpjUFwFlKJRL83sJWnt5xkAef17kAEZm+JhwAZjbfzB42s81m9ryZfTlsbzWztWa2NfxuCdvNzL5vZtvMbKOZXTJZP6LS3j6vkZn1Gf7+gc0M5PJRlyMiMiGljAAGgb9w97cClwPXm9nbgBuAde6+GFgXzgMsBRaHnxXAbSXsO1KJhPHe82by2hvHuHP9a1GXIyIyIRMOAHff4+5Ph9OHgc1AO7AMWBV2WwVcFU4vA37igfVAs5nNm3DlEVs0s54FrXV895cvsfeQXhwvItPPpJwDMLOFwMXA48Acd98DQUgAs8Nu7cDOgtW6wrZpycz4/fPb6BvI8Vdrno+6HBGRs1ZyAJhZA/Az4CvufmisrkXaRlxGY2YrzGyDmW3o7u4utbyyaq7LcOmiVh7Y9DoPPq+Xx4vI9FJSAJhZmuCP/53ufl/YvHfo0E74vS9s7wLmF6zeAYx4y4q7r3T3TnfvbGtrK6W8irhkQQttM7Lc8LON7NOhIBGZRkq5CsiA24HN7v5PBYvWAMvD6eXA6oL2z4VXA10O9A4dKprOkgnjyrfP5Uj/IF+5+1k9J0hEpo1SRgDvAz4LfMjMng0/HwW+A1xhZluBK8J5gPuB7cA24AfAdSXse0pprc/wgcVt/PblN7jl4W1RlyMickZSE13R3R+l+HF9gCVF+jtw/UT3N9W9/ZxGdh08zn9f+xKL5zRw5YXT9gInEYkJ3Qk8ScyMJRfMZl5TDV+5+1k27eqNuiQRkTEpACZRKpngY++YRyaV4HN3PMHL3UeiLklEZFQKgElWn01x1Tvb6R/I8Sc/WM+ON45FXZKISFEKgDJoqc9w1cXtHDo+yKdWPsa2fYejLklEZAQFQJnMasjyRxe3c7hvgP9422M89dqBqEsSERlGAVBGbTOyXP3u+SQM/uQH6/n5M3qhvIhMHQqAMmuqTfPJd3cwqyHLV+5+lhtXb+LEoB4hLSLRUwBUQF0mxR9d3M7FC5pZ9dhr/PGtv2HL6zovICLRUgBUSDJhfGBxGx97xzy27z/Kx//Hr7nl4W16oYyIREYBUGHnzW7gM5ctYOHMev7xwS1cefMj/GrLvvFXFBGZZAqACNRlUnz0HfP4w4vm0XP0BJ//0ZN8/o4n2Nh1MOrSRCRGJvwsICnduW0NLJhZx3M7e1n/yht84p9/w++f38YXlyzmkgUtUZcnIlVOARCxVCLBu9/UwoXtjTzX1cvjr/Tw8K2/5Z0dTXz2PQv5+EXzqEkng84b74F134beLmjqgCXfgos+Fe0PEJFpy4KHdE5NnZ2dvmHDhgmt+9Lew/y/jdPvdQMnBvO8sOcQm3b18sbREzTVpln2rnP4/IwnWPTYN7GB46c6p2vhD7+vEBCRYczsKXfvHK+fzgFMMZlUgnfNb+Yzly3gjy9uZ3Zjlv/9+A4yv/rb4X/8AQaOByMCEZEJ0CGgKcrMmN9ax/zWOvoHc7Q//kbRft7bxe6Dx2lvrq1whSIy3SkApoFsKsnh7Bwa+0e+eH5Xfibv/85DzGuq4bJFrfzeolYuWdDCebMbSCc1wBOR0SkApolHF1zHFS//Hen8qRfPDyRqeHT+f+U/WBu7Dh7n3zfv4+fP7gYgk0xw/twZXNjexNvPaeSt8xo5r62Bprp0VD9BRKYYBcA0sWX2UgDev+NWZvTv5XB2Do8uuI49s5fyLuBd85txdw4eH2DvoT66D/fTfaSfnz+zi7ue2HFyOy11ac6b3cCb24LPm2bW0d5SS0dzHY21KcxGe8uniFQbBcA0smX20pNBUIyZ0VKXoaUuwwVzgzZ353DfIPuP9HPg2AAHjp1gz8E+Xth9iKMncsPWr8skaW+upaOllnOaa5nXVMOshixtM4LPrIbgk0np0JJINah4AJjZlcD3gCTwQ3f/TqVriBMzo7E2TWPtyEM/fQM5eo8PcKhvgMN9gxw+Psjh/gFe2HOI9dt7OD6QK7JFaKxJ0TYjy8yGLE21aZpr0zSFn+a6YF/NdZmTbTNqUtRnUtSkExphiEwhFQ0AM0sCtwBXAF3Ak2a2xt1fqGQdEqhJJ6lJJ5nTWFN0+WAuz7ETufAzyLETOY6G38f6c+w6cJxX9h+lfyDH8YEcA7mx7ylJGNRmktRnUtRnU9RnkzRkg3Coy6ZoyCapy6SoTSfJphLUpJNk0wlqUsF3NjV8/uR3Qf900kgnEiQSChqZRiK6ybPSI4BLgW3uvh3AzH4KLAMUAFNQKpmgsTZRdPRQTC7v9A3k6B/MD/s+kcszMJhnIOfBdC7PicE8R/oGOXB0gMF8MD+Q8/A7T6m3JybNSCWDTzqRIJ1MBNPJRBASyUTR6VRBWyqRIJmA5NC3GYmEkUoE30kzkonwEy5LDi0Plw31P7U83J4F04mwzoSd+piBWbDMgETCCPIs+B7qc7IvRiJxqr9Z0G/oOxGOuoa2Y5xaPrSdoXYr2E6xWoZGcMF08J+1RnUl2ngP/OJLwX09AL07g3koewhUOgDagZ0F813AZRWuQcokmbDwX/albcfdyTsM5vPk8s5gzoPvvDOYz4+czzu5XDCfcyefd/Lu5PMMm88VtJ0YzHN8IHdyX0GfsP/JdSDvjo/yPTQ9de+lr6yhGBgKpaEGK2yjMDiGlg8F2akNDbWdWt+Gbf9k15PBdGpbBbs+uV87vZaCjZxe2+lBR5HlheueKttGto23HLjj4DeZnR/lJs8qC4Bi/1QY9v8fM1sBrABYsGDBhHd07qx6/ssHzp3w+iJnKl8QPCdDJp8n56eW5fLBJ18wXRhIubzjhSHDqSA8FTxBe9BWGEQAwwPr5LrDtuMj1g0C7PT2YBmc2v7QsmBPhdOE00GDe2Hb8L6EtQxfHsz4yeUjt0/YVrj9wr4Mq+X0bZ3amY9Y3yko/eR+C9dnxG899WMK/3AVe6JOsb7D/rMIv9t69o9cGYLDQWVW6QDoAuYXzHcAuws7uPtKYCUEzwKa6I5SyQQNuhFKRKa6mzqCwz6na+oo+64r/RfySWCxmS0yswxwDbCmwjWIiEwdS74VPNixULo2aC+zio4A3H3QzP4MeJDgMtA73P35StYgIjKlDB3nj8FVQLj7/cD9ld6viMiUddGnInmsuw6Si4jElAJARCSmFAAiIjGlABARiSkFgIhITCkARERiSgEgIhJTCgARkZgyL/YUoynCzLqB16KuYwJmAaM84anqxOm3gn5vNaum3/omd28br9OUDoDpysw2uHtn1HVUQpx+K+j3VrM4/dYhOgQkIhJTCgARkZhSAJTHyqgLqKA4/VbQ761mcfqtgM4BiIjElkYAIiIxpQCYJGY238weNrPNZva8mX056prKzcySZvaMmf1r1LWUm5k1m9m9ZvZi+N/xe6KuqZzM7M/D/x1vMrO7zKwm6pomk5ndYWb7zGxTQVurma01s63hd0uUNVaCAmDyDAJ/4e5vBS4Hrjezt0VcU7l9GdgcdREV8j3g39z9AuCdVPHvNrN24EtAp7tfSPD2vmuirWrS/Ri48rS2G4B17r4YWBfOVzUFwCRx9z3u/nQ4fZjgD0R7tFWVj5l1AB8Dfhh1LeVmZo3AB4DbAdz9hLsfjLaqsksBtWaWAuqA3RHXM6nc/RGg57TmZcCqcHoVcFVFi4qAAqAMzGwhcDHweLSVlNXNwNeAfNSFVMC5QDfwo/CQ1w/NrD7qosrF3XcB3wV2AHuAXnf/ZbRVVcQcd98DwT/ogNkR11N2CoBJZmYNwM+Ar7j7oajrKQcz+ziwz92firqWCkkBlwC3ufvFwFGq+PBAeOx7GbAIOAeoN7P/FG1VUg4KgElkZmmCP/53uvt9UddTRu8DPmFmrwI/BT5kZv8r2pLKqgvocvehEd29BIFQrT4MvOLu3e4+ANwHvDfimiphr5nNAwi/90VcT9kpACaJmRnBMeLN7v5PUddTTu7+DXfvcPeFBCcHH3L3qv0Xoru/Duw0s/PDpiXACxGWVG47gMvNrC783/USqvikd4E1wPJwejmwOsJaKiIVdQFV5H3AZ4HfmdmzYds33f3+CGuSyfNF4E4zywDbgS9EXE/ZuPvjZnYv8DTB1W3PUGV3yZrZXcAHgVlm1gXcCHwHuMfMriUIwaujq7AydCewiEhM6RCQiEhMKQBERGJKASAiElMKABGRmFIAiIjElAJARCSmFAAiIjGlABARian/D+hfAG5Faoo0AAAAAElFTkSuQmCC\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYAAAAD8CAYAAAB+UHOxAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAHmVJREFUeJzt3X2QXXWd5/H39z72U/op6TzQnZggEVREwR7Ah3IdIw5RxzCzYuG4Gl1qU7MwPoyzpehUyYxTM+PUuAO6A9REQeMui7DImjgLg5mAhSgBwlMMhJAQIOkkJB066Tx2p/ve7/5xTie307e7k7597+m+5/OqunXP+Z3fOed7fehPfufR3B0REYmfRNQFiIhINBQAIiIxpQAQEYkpBYCISEwpAEREYkoBICISUwoAEZGYGjcAzOwOM9tnZpuKLPtvZuZmNiucNzP7vpltM7ONZnZJQd/lZrY1/Cyf3J8hIiJn60xGAD8Grjy90czmA1cAOwqalwKLw88K4LawbytwI3AZcClwo5m1lFK4iIiUJjVeB3d/xMwWFll0E/A1YHVB2zLgJx7cXrzezJrNbB7wQWCtu/cAmNlaglC5a6x9z5o1yxcuLLZrEREZzVNPPbXf3dvG6zduABRjZp8Adrn7c2ZWuKgd2Fkw3xW2jdY+poULF7Jhw4aJlCgiEltm9tqZ9DvrADCzOuAvgY8UW1ykzcdoL7b9FQSHj1iwYMHZliciImdoIlcBvRlYBDxnZq8CHcDTZjaX4F/28wv6dgC7x2gfwd1Xununu3e2tY07ghERkQk66wBw99+5+2x3X+juCwn+uF/i7q8Da4DPhVcDXQ70uvse4EHgI2bWEp78/UjYJiIiETmTy0DvAh4DzjezLjO7dozu9wPbgW3AD4DrAMKTv38DPBl+vj10QlhERKJhU/l9AJ2dna6TwCIiZ8fMnnL3zvH66U5gEZGYUgCIiMRUVQbA4b4Bblr7Es/uPBh1KSIiU1ZVBkAu73xv3Vaefu1A1KWIiExZVRkA9dng/rYj/YMRVyIiMnVVZQCkkwlq0gkFgIjIGKoyACAYBRzuUwCIiIymegMgk+Jw30DUZYiITFlVGwC1maQOAYmIjKFqA6Auk+SIDgGJiIyqqgPgkA4BiYiMqooDQCeBRUTGUsUBoHMAIiJjqeoAONo/yFR+2qmISJSqNgDqsynyDsdO5KIuRURkSqraAKjLJAE9DkJEZDRVGwD1meB5QDoRLCJSXNUGgEYAIiJjq9oAGHoiqB4HISJSXNUGwMkRgA4BiYgUNW4AmNkdZrbPzDYVtP2jmb1oZhvN7P+aWXPBsm+Y2TYz22Jmf1DQfmXYts3Mbpj8nzJc3dA5AB0CEhEp6kxGAD8GrjytbS1wobtfBLwEfAPAzN4GXAO8PVznVjNLmlkSuAVYCrwN+HTYt2zqssEIQCeBRUSKGzcA3P0RoOe0tl+6+9Bf1vVARzi9DPipu/e7+yvANuDS8LPN3be7+wngp2Hfshm6CkiHgEREipuMcwD/GXggnG4HdhYs6wrbRmsvm2TCSCeNI/06CSwiUkxJAWBmfwkMAncONRXp5mO0F9vmCjPbYGYburu7SymPbErPAxIRGc2EA8DMlgMfBz7jpx640wXML+jWAeweo30Ed1/p7p3u3tnW1jbR8gDIpBIc0iEgEZGiJhQAZnYl8HXgE+5+rGDRGuAaM8ua2SJgMfAE8CSw2MwWmVmG4ETxmtJKH186aToHICIyitR4HczsLuCDwCwz6wJuJLjqJwusNTOA9e7+p+7+vJndA7xAcGjoenfPhdv5M+BBIAnc4e7Pl+H3DJNJJjh0XOcARESKGTcA3P3TRZpvH6P/3wJ/W6T9fuD+s6quRNl0kl4FgIhIUVV7JzBATSqhABARGUVVB0A2HbwXWC+FEREZqboDIJVgIOf0DeSjLkVEZMqp6gCoSQWPg9BhIBGRkao6ALLp4OcpAERERqruAEgpAERERlPVAVCT1iEgEZHRVHUAaAQgIjK6qg4AjQBEREZX1QGQ0QhARGRUVR0ACTNq0noekIhIMVUdABC8E0ABICIyUgwCQM8DEhEpRgEgIhJTVR8AmVSCg8cUACIip6v6AKjROwFERIqq+gDIphIc6lMAiIicrvoDIJ2kfzBP30Au6lJERKaUqg+AmvBmMF0KKiIyXNUHQG34OIgDOhEsIjLMuAFgZneY2T4z21TQ1mpma81sa/jdErabmX3fzLaZ2UYzu6RgneVh/61mtrw8P2ekoecB9Rw9UaldiohMC2cyAvgxcOVpbTcA69x9MbAunAdYCiwOPyuA2yAIDOBG4DLgUuDGodAot9rM0AhAASAiUmjcAHD3R4Ce05qXAavC6VXAVQXtP/HAeqDZzOYBfwCsdfcedz8ArGVkqJRFrUYAIiJFTfQcwBx33wMQfs8O29uBnQX9usK20drLbugQ0AEFgIjIMJN9EtiKtPkY7SM3YLbCzDaY2Ybu7u6SC0omjGwqQY8OAYmIDDPRANgbHtoh/N4XtncB8wv6dQC7x2gfwd1Xununu3e2tbVNsLzh6jJJjQBERE4z0QBYAwxdybMcWF3Q/rnwaqDLgd7wENGDwEfMrCU8+fuRsK0iatJJenQZqIjIMKnxOpjZXcAHgVlm1kVwNc93gHvM7FpgB3B12P1+4KPANuAY8AUAd+8xs78Bngz7fdvdTz+xXDbZVIKeo/2V2p2IyLQwbgC4+6dHWbSkSF8Hrh9lO3cAd5xVdZOkNp3UVUAiIqep+juBIbgX4MBRHQISESkUiwCoSSc5PpDTA+FERArEIgB0M5iIyEjxCICMAkBE5HSxCICTdwPrZjARkZNiEQA6BCQiMlKsAkB3A4uInBKLAMimExgaAYiIFIpFACTMqMsm6T6iABARGRKLAACoy6TYf0SPgxARGRKLADh/3wP8YvBP+ZftH4abLoSN90RdkohI5MZ9FtB0d/6+B7ji5b8j7X1BQ+9O+MWXgumLPhVdYSIiEav6EcD7d9xKOt83vHHgOKz7djQFiYhMEVUfADP69xZf0NtV2UJERKaYqg+Aw9k5xRc0dVS2EBGRKabqA+DRBdcxkKgZ3piuhSXfiqYgEZEpoupPAm+ZvRSA97x6C00n9tFXN4+6pX+tE8AiEntVPwKAIARue9dqzu2/k7vff7/++IuIEJMAAKhJJ0gYdB/WzWAiIhCjADAzGrK6G1hEZEhJAWBmf25mz5vZJjO7y8xqzGyRmT1uZlvN7G4zy4R9s+H8tnD5wsn4AWejNpPUCEBEJDThADCzduBLQKe7XwgkgWuAfwBucvfFwAHg2nCVa4ED7n4ecFPYr6Jq00n2KQBERIDSDwGlgFozSwF1wB7gQ8C94fJVwFXh9LJwnnD5EjOzEvd/VuoyKQWAiEhowgHg7ruA7wI7CP7w9wJPAQfdfTDs1gW0h9PtwM5w3cGw/8yJ7n8iGrIpeo6cYDCXr+RuRUSmpFIOAbUQ/Kt+EXAOUA8sLdLVh1YZY1nhdleY2QYz29Dd3T3R8oqqzybJubNf7wUQESnpENCHgVfcvdvdB4D7gPcCzeEhIYAOYHc43QXMBwiXNwE9p2/U3Ve6e6e7d7a1tZVQ3kgNNUFZe3qPT+p2RUSmo1ICYAdwuZnVhcfylwAvAA8Dnwz7LAdWh9NrwnnC5Q+5+4gRQDnNyKYBeL23b5yeIiLVr5RzAI8TnMx9GvhduK2VwNeBr5rZNoJj/LeHq9wOzAzbvwrcUELdE9KQHRoBKABEREp6FpC73wjceFrzduDSIn37gKtL2V+patIJUglj7yEFgIhIbO4EhuBu4Bk1KY0ARESIWQAA1GdTOgksIkJsA0AjABGR2AVAQzbF3kN95PMVvQBJRGTKiWUADOScnmO6GUxE4i2WAQC6F0BEJH4BUKN7AUREIIYBMCMcAew+qCuBRCTeYhcAdZkkqYTRdeBY1KWIiEQqdgFgZjTVptnZoxGAiMRb7AIAgvMAO3o0AhCReItlADTWpNmpQ0AiEnOxDICm2jSH+wbpPT4QdSkiIpGJZQA0hpeC6kSwiMRZPAOgNngxjE4Ei0icxToANAIQkTiLZQDUpBJkUwm6DmgEICLxFcsAMLPgSiBdCioiMRbLAACYoXsBRCTmYhsATXVpdvQc03sBRCS2SgoAM2s2s3vN7EUz22xm7zGzVjNba2Zbw++WsK+Z2ffNbJuZbTSzSybnJ0xMS22G/sE8e/SCeBGJqVJHAN8D/s3dLwDeCWwGbgDWuftiYF04D7AUWBx+VgC3lbjvkjTXBVcCvdJ9NMoyREQiM+EAMLNG4APA7QDufsLdDwLLgFVht1XAVeH0MuAnHlgPNJvZvAlXXqKW+gwAr+w/ElUJIiKRKmUEcC7QDfzIzJ4xsx+aWT0wx933AITfs8P+7cDOgvW7wrZI1GeSZJIJXtYIQERiqpQASAGXALe5+8XAUU4d7inGirSNOANrZivMbIOZbeju7i6hvLGZGS31aV7ZrwAQkXgqJQC6gC53fzycv5cgEPYOHdoJv/cV9J9fsH4HsPv0jbr7SnfvdPfOtra2EsobX1NNmu3dOgQkIvE04QBw99eBnWZ2fti0BHgBWAMsD9uWA6vD6TXA58KrgS4HeocOFUWluT7DroPH6R/MRVmGiEgkUiWu/0XgTjPLANuBLxCEyj1mdi2wA7g67Hs/8FFgG3As7Buplro0eYedPcc4b/aMqMsREamokgLA3Z8FOossWlKkrwPXl7K/ydZcF1wJ9HL3UQWAiMRObO8EBmgNA2DbPp0HEJH4iXUAZFIJmmrTvPj64ahLERGpuFgHAEBrfYYX9xyKugwRkYqLfQDMrM+wff9RTgzmoy5FRKSiFAANGXJ5Z7seCSEiMRP7AJjVkAVgi84DiEjMxD4AWuoyJEwBICLxE/sASCaM1vqMAkBEYif2AQDBlUAv6EogEYkZBQCwzH7D/zm+Av+rZrjpQth4T9QliYiUXanPApr2zt/3AB/uvZlMoj9o6N0Jv/hSMH3Rp6IrTESkzGI/Anj/jlvJeP/wxoHjsO7b0RQkIlIhsQ+AGf17iy/o7apsISIiFRb7ADicnVN8QVNHZQsREamw2AfAowuuYyBRM7wxXQtLvhVNQSIiFRL7k8BbZi8F4PJXbqF5YB/99fOovfKvdQJYRKpe7EcAEITAynev5s39d/Ivl6zWH38RiQUFQCibSjKrIcNTrx2IuhQRkYpQABSY11TLU68dYDCnR0OLSPVTABQ4p7mWYydyekOYiMRCyQFgZkkze8bM/jWcX2Rmj5vZVjO728wyYXs2nN8WLl9Y6r4n2znNwdVAT77aE3ElIiLlNxkjgC8Dmwvm/wG4yd0XAweAa8P2a4ED7n4ecFPYb0qZUZOmqTbNhld1HkBEql9JAWBmHcDHgB+G8wZ8CLg37LIKuCqcXhbOEy5fEvafUuY21vDEKz24e9SliIiUVakjgJuBrwFDZ01nAgfdfTCc7wLaw+l2YCdAuLw37D+lnNNcQ/eRfl5941jUpYiIlNWEA8DMPg7sc/enCpuLdPUzWFa43RVmtsHMNnR3d0+0vAmb31oHwKNbK79vEZFKKmUE8D7gE2b2KvBTgkM/NwPNZjZ0h3EHsDuc7gLmA4TLm4ARZ1vdfaW7d7p7Z1tbWwnlTUxzbXAe4JGt+yu+bxGRSppwALj7N9y9w90XAtcAD7n7Z4CHgU+G3ZYDq8PpNeE84fKHfAoeaDcz5rfU8tjLbzCg+wFEpIqV4z6ArwNfNbNtBMf4bw/bbwdmhu1fBW4ow74nxYLWOo70D/LczoNRlyIiUjaT8jA4d/8V8KtwejtwaZE+fcDVk7G/cpvfWocZPLJ1P50LW6MuR0SkLHQncBE16SRzG2v41ZZ9UZciIlI2CoBRLJxZz8auXl7v7Yu6FBGRslAAjOLNbfUArN08yisjRUSmOQXAKFrrM7TWpXlw0+tRlyIiUhYKgFGYGYvaGnhs+xv0Hh+IuhwRkUmnABjDm9vqyeWdh17UYSARqT4KgDHMbayhsSbF6md2j99ZRGSaUQCMwcx4y5wZ/HrrfroP90ddjojIpFIAjOOCuTPIufOL5zQKEJHqogAYx8yGLHMas9z3dFfUpYiITCoFwBl4y5wZbNp9iC16V7CIVBEFwBl469xGUgnjf65/NepSREQmjQLgDNRmkiye3cB9T+/icJ/uCRCR6qAAOEMXdTRz7ESO+57eFXUpIiKTQgFwhuY21TC3sYZVv32VfH7KvcdGROSsKQDOwjvnN7F9/1E9IE5EqoIC4Cy8ZfYMmuvS/PND25iCb7MUETkrCoCzkEgY717Qwu929fJrvTReRKY5BcBZumDeDGbUpLj531/SKEBEpjUFwFlKJRL83sJWnt5xkAef17kAEZm+JhwAZjbfzB42s81m9ryZfTlsbzWztWa2NfxuCdvNzL5vZtvMbKOZXTJZP6LS3j6vkZn1Gf7+gc0M5PJRlyMiMiGljAAGgb9w97cClwPXm9nbgBuAde6+GFgXzgMsBRaHnxXAbSXsO1KJhPHe82by2hvHuHP9a1GXIyIyIRMOAHff4+5Ph9OHgc1AO7AMWBV2WwVcFU4vA37igfVAs5nNm3DlEVs0s54FrXV895cvsfeQXhwvItPPpJwDMLOFwMXA48Acd98DQUgAs8Nu7cDOgtW6wrZpycz4/fPb6BvI8Vdrno+6HBGRs1ZyAJhZA/Az4CvufmisrkXaRlxGY2YrzGyDmW3o7u4utbyyaq7LcOmiVh7Y9DoPPq+Xx4vI9FJSAJhZmuCP/53ufl/YvHfo0E74vS9s7wLmF6zeAYx4y4q7r3T3TnfvbGtrK6W8irhkQQttM7Lc8LON7NOhIBGZRkq5CsiA24HN7v5PBYvWAMvD6eXA6oL2z4VXA10O9A4dKprOkgnjyrfP5Uj/IF+5+1k9J0hEpo1SRgDvAz4LfMjMng0/HwW+A1xhZluBK8J5gPuB7cA24AfAdSXse0pprc/wgcVt/PblN7jl4W1RlyMickZSE13R3R+l+HF9gCVF+jtw/UT3N9W9/ZxGdh08zn9f+xKL5zRw5YXT9gInEYkJ3Qk8ScyMJRfMZl5TDV+5+1k27eqNuiQRkTEpACZRKpngY++YRyaV4HN3PMHL3UeiLklEZFQKgElWn01x1Tvb6R/I8Sc/WM+ON45FXZKISFEKgDJoqc9w1cXtHDo+yKdWPsa2fYejLklEZAQFQJnMasjyRxe3c7hvgP9422M89dqBqEsSERlGAVBGbTOyXP3u+SQM/uQH6/n5M3qhvIhMHQqAMmuqTfPJd3cwqyHLV+5+lhtXb+LEoB4hLSLRUwBUQF0mxR9d3M7FC5pZ9dhr/PGtv2HL6zovICLRUgBUSDJhfGBxGx97xzy27z/Kx//Hr7nl4W16oYyIREYBUGHnzW7gM5ctYOHMev7xwS1cefMj/GrLvvFXFBGZZAqACNRlUnz0HfP4w4vm0XP0BJ//0ZN8/o4n2Nh1MOrSRCRGJvwsICnduW0NLJhZx3M7e1n/yht84p9/w++f38YXlyzmkgUtUZcnIlVOARCxVCLBu9/UwoXtjTzX1cvjr/Tw8K2/5Z0dTXz2PQv5+EXzqEkng84b74F134beLmjqgCXfgos+Fe0PEJFpy4KHdE5NnZ2dvmHDhgmt+9Lew/y/jdPvdQMnBvO8sOcQm3b18sbREzTVpln2rnP4/IwnWPTYN7GB46c6p2vhD7+vEBCRYczsKXfvHK+fzgFMMZlUgnfNb+Yzly3gjy9uZ3Zjlv/9+A4yv/rb4X/8AQaOByMCEZEJ0CGgKcrMmN9ax/zWOvoHc7Q//kbRft7bxe6Dx2lvrq1whSIy3SkApoFsKsnh7Bwa+0e+eH5Xfibv/85DzGuq4bJFrfzeolYuWdDCebMbSCc1wBOR0SkApolHF1zHFS//Hen8qRfPDyRqeHT+f+U/WBu7Dh7n3zfv4+fP7gYgk0xw/twZXNjexNvPaeSt8xo5r62Bprp0VD9BRKYYBcA0sWX2UgDev+NWZvTv5XB2Do8uuI49s5fyLuBd85txdw4eH2DvoT66D/fTfaSfnz+zi7ue2HFyOy11ac6b3cCb24LPm2bW0d5SS0dzHY21KcxGe8uniFQbBcA0smX20pNBUIyZ0VKXoaUuwwVzgzZ353DfIPuP9HPg2AAHjp1gz8E+Xth9iKMncsPWr8skaW+upaOllnOaa5nXVMOshixtM4LPrIbgk0np0JJINah4AJjZlcD3gCTwQ3f/TqVriBMzo7E2TWPtyEM/fQM5eo8PcKhvgMN9gxw+Psjh/gFe2HOI9dt7OD6QK7JFaKxJ0TYjy8yGLE21aZpr0zSFn+a6YF/NdZmTbTNqUtRnUtSkExphiEwhFQ0AM0sCtwBXAF3Ak2a2xt1fqGQdEqhJJ6lJJ5nTWFN0+WAuz7ETufAzyLETOY6G38f6c+w6cJxX9h+lfyDH8YEcA7mx7ylJGNRmktRnUtRnU9RnkzRkg3Coy6ZoyCapy6SoTSfJphLUpJNk0wlqUsF3NjV8/uR3Qf900kgnEiQSChqZRiK6ybPSI4BLgW3uvh3AzH4KLAMUAFNQKpmgsTZRdPRQTC7v9A3k6B/MD/s+kcszMJhnIOfBdC7PicE8R/oGOXB0gMF8MD+Q8/A7T6m3JybNSCWDTzqRIJ1MBNPJRBASyUTR6VRBWyqRIJmA5NC3GYmEkUoE30kzkonwEy5LDi0Plw31P7U83J4F04mwzoSd+piBWbDMgETCCPIs+B7qc7IvRiJxqr9Z0G/oOxGOuoa2Y5xaPrSdoXYr2E6xWoZGcMF08J+1RnUl2ngP/OJLwX09AL07g3koewhUOgDagZ0F813AZRWuQcokmbDwX/albcfdyTsM5vPk8s5gzoPvvDOYz4+czzu5XDCfcyefd/Lu5PMMm88VtJ0YzHN8IHdyX0GfsP/JdSDvjo/yPTQ9de+lr6yhGBgKpaEGK2yjMDiGlg8F2akNDbWdWt+Gbf9k15PBdGpbBbs+uV87vZaCjZxe2+lBR5HlheueKttGto23HLjj4DeZnR/lJs8qC4Bi/1QY9v8fM1sBrABYsGDBhHd07qx6/ssHzp3w+iJnKl8QPCdDJp8n56eW5fLBJ18wXRhIubzjhSHDqSA8FTxBe9BWGEQAwwPr5LrDtuMj1g0C7PT2YBmc2v7QsmBPhdOE00GDe2Hb8L6EtQxfHsz4yeUjt0/YVrj9wr4Mq+X0bZ3amY9Y3yko/eR+C9dnxG899WMK/3AVe6JOsb7D/rMIv9t69o9cGYLDQWVW6QDoAuYXzHcAuws7uPtKYCUEzwKa6I5SyQQNuhFKRKa6mzqCwz6na+oo+64r/RfySWCxmS0yswxwDbCmwjWIiEwdS74VPNixULo2aC+zio4A3H3QzP4MeJDgMtA73P35StYgIjKlDB3nj8FVQLj7/cD9ld6viMiUddGnInmsuw6Si4jElAJARCSmFAAiIjGlABARiSkFgIhITCkARERiSgEgIhJTCgARkZgyL/YUoynCzLqB16KuYwJmAaM84anqxOm3gn5vNaum3/omd28br9OUDoDpysw2uHtn1HVUQpx+K+j3VrM4/dYhOgQkIhJTCgARkZhSAJTHyqgLqKA4/VbQ761mcfqtgM4BiIjElkYAIiIxpQCYJGY238weNrPNZva8mX056prKzcySZvaMmf1r1LWUm5k1m9m9ZvZi+N/xe6KuqZzM7M/D/x1vMrO7zKwm6pomk5ndYWb7zGxTQVurma01s63hd0uUNVaCAmDyDAJ/4e5vBS4Hrjezt0VcU7l9GdgcdREV8j3g39z9AuCdVPHvNrN24EtAp7tfSPD2vmuirWrS/Ri48rS2G4B17r4YWBfOVzUFwCRx9z3u/nQ4fZjgD0R7tFWVj5l1AB8Dfhh1LeVmZo3AB4DbAdz9hLsfjLaqsksBtWaWAuqA3RHXM6nc/RGg57TmZcCqcHoVcFVFi4qAAqAMzGwhcDHweLSVlNXNwNeAfNSFVMC5QDfwo/CQ1w/NrD7qosrF3XcB3wV2AHuAXnf/ZbRVVcQcd98DwT/ogNkR11N2CoBJZmYNwM+Ar7j7oajrKQcz+ziwz92firqWCkkBlwC3ufvFwFGq+PBAeOx7GbAIOAeoN7P/FG1VUg4KgElkZmmCP/53uvt9UddTRu8DPmFmrwI/BT5kZv8r2pLKqgvocvehEd29BIFQrT4MvOLu3e4+ANwHvDfimiphr5nNAwi/90VcT9kpACaJmRnBMeLN7v5PUddTTu7+DXfvcPeFBCcHH3L3qv0Xoru/Duw0s/PDpiXACxGWVG47gMvNrC783/USqvikd4E1wPJwejmwOsJaKiIVdQFV5H3AZ4HfmdmzYds33f3+CGuSyfNF4E4zywDbgS9EXE/ZuPvjZnYv8DTB1W3PUGV3yZrZXcAHgVlm1gXcCHwHuMfMriUIwaujq7AydCewiEhM6RCQiEhMKQBERGJKASAiElMKABGRmFIAiIjElAJARCSmFAAiIjGlABARian/D+hfAG5Faoo0AAAAAElFTkSuQmCC", "text/plain": [ "
" ] diff --git a/mining/src/mempool/model/frontier/search_tree.rs b/mining/src/mempool/model/frontier/search_tree.rs index fc18b2118a..edf34c2710 100644 --- a/mining/src/mempool/model/frontier/search_tree.rs +++ b/mining/src/mempool/model/frontier/search_tree.rs @@ -157,8 +157,9 @@ type InnerTree = BPlusTree>; /// is recomputed from subtree weights for each item insertion/removal /// /// Computing the prefix weight is a crucial operation if the tree is used for random sampling and -/// the tree is highly imbalanced in terms of weight variance. See [`Frontier::sample_inplace`] for -/// more details. +/// the tree is highly imbalanced in terms of weight variance. +/// See [`Frontier::sample_inplace()`](crate::mempool::model::frontier::Frontier::sample_inplace) +/// for more details. pub struct SearchTree { tree: InnerTree, } diff --git a/notify/src/address/tracker.rs b/notify/src/address/tracker.rs index f103b163cb..a2b1c64ddb 100644 --- a/notify/src/address/tracker.rs +++ b/notify/src/address/tracker.rs @@ -384,11 +384,11 @@ impl Inner { } } -/// Tracker of a set of [`Address`](kaspa_addresses::Address), indexing and counting registrations +/// Tracker of a set of [`Address`], indexing and counting registrations /// /// #### Implementation design /// -/// Each [`Address`](kaspa_addresses::Address) is stored internally as a [`ScriptPubKey`](kaspa_consensus_core::tx::ScriptPublicKey). +/// Each [`Address`] is stored internally as a [`ScriptPubKey`](kaspa_consensus_core::tx::ScriptPublicKey). /// This prevents inter-network duplication and optimizes UTXOs filtering efficiency. /// /// But consequently the address network prefix gets lost and must be globally provided when querying for addresses by indexes. diff --git a/notify/src/notifier.rs b/notify/src/notifier.rs index 220fd261be..6927ea1a13 100644 --- a/notify/src/notifier.rs +++ b/notify/src/notifier.rs @@ -75,8 +75,8 @@ pub type DynNotify = Arc>; /// /// - a vector of [`DynCollector`] /// - a vector of [`Subscriber`] -/// - a pool of [`Broadcaster`] -/// - a map of [`Listener`] +/// - a pool of `Broadcaster` +/// - a map of `Listener` /// /// Collectors and subscribers form the scaffold. They are provided to the ctor, are immutable and share its /// lifespan. Both do materialize a connection to the notifier _parents_, collectors for incoming notifications diff --git a/protocol/flows/src/flowcontext/transactions.rs b/protocol/flows/src/flowcontext/transactions.rs index d3112f0af0..110b378b70 100644 --- a/protocol/flows/src/flowcontext/transactions.rs +++ b/protocol/flows/src/flowcontext/transactions.rs @@ -73,7 +73,7 @@ impl TransactionsSpread { /// within transaction Inv messages. /// /// The broadcast itself may happen only during a subsequent call to this function since it is done at most - /// every [`BROADCAST_INTERVAL`] milliseconds or when the queue length is larger than the Inv message + /// every `BROADCAST_INTERVAL` milliseconds or when the queue length is larger than the Inv message /// capacity. /// /// _GO-KASPAD: EnqueueTransactionIDsForPropagation_ diff --git a/rpc/core/src/api/connection.rs b/rpc/core/src/api/connection.rs index 5b4254288d..fba2aa71a0 100644 --- a/rpc/core/src/api/connection.rs +++ b/rpc/core/src/api/connection.rs @@ -1,3 +1,7 @@ +//! +//! Generic connection trait representing a connection to a client (where available). +//! + use std::sync::Arc; pub trait RpcConnection: Send + Sync { diff --git a/rpc/core/src/api/ctl.rs b/rpc/core/src/api/ctl.rs index 49241e7d92..d7705127b9 100644 --- a/rpc/core/src/api/ctl.rs +++ b/rpc/core/src/api/ctl.rs @@ -1,3 +1,7 @@ +//! +//! Client-side RPC helper for handling connection and disconnection events. +//! + use crate::error::RpcResult; use std::sync::{Arc, Mutex}; use workflow_core::channel::Multiplexer; diff --git a/rpc/core/src/api/mod.rs b/rpc/core/src/api/mod.rs index 1373bd6e0b..a75056a841 100644 --- a/rpc/core/src/api/mod.rs +++ b/rpc/core/src/api/mod.rs @@ -1,3 +1,7 @@ +//! +//! API module for the RPC server. Implements core RPC primitives. +//! + pub mod connection; pub mod ctl; pub mod notifications; diff --git a/rpc/core/src/api/notifications.rs b/rpc/core/src/api/notifications.rs index e07a7c4d98..503af0de85 100644 --- a/rpc/core/src/api/notifications.rs +++ b/rpc/core/src/api/notifications.rs @@ -1,3 +1,7 @@ +//! +//! RPC notifications that can be sent to clients. +//! + use crate::model::message::*; use derive_more::Display; use kaspa_notify::{ diff --git a/rpc/core/src/api/ops.rs b/rpc/core/src/api/ops.rs index 822798a1d2..26ca356eb0 100644 --- a/rpc/core/src/api/ops.rs +++ b/rpc/core/src/api/ops.rs @@ -1,3 +1,7 @@ +//! +//! RPC Operations used to identify RPC methods during transport and in various RPC-related macros. +//! + use borsh::{BorshDeserialize, BorshSerialize}; use kaspa_notify::events::EventType; use serde::{Deserialize, Serialize}; diff --git a/rpc/core/src/api/rpc.rs b/rpc/core/src/api/rpc.rs index 85713e5477..cadc9e00cd 100644 --- a/rpc/core/src/api/rpc.rs +++ b/rpc/core/src/api/rpc.rs @@ -1,8 +1,10 @@ -//! The client API +//! +//! The main [`RpcApi`] trait that defines all RPC methods available in the Rusty Kaspa p2p node. //! //! Rpc = External RPC Service -//! All data provided by the RCP server can be trusted by the client -//! No data submitted by the client to the server can be trusted +//! All data provided by the RPC server can be trusted by the client +//! No data submitted by the client to the node can be trusted +//! use crate::api::connection::DynRpcConnection; use crate::{model::*, notify::connection::ChannelConnection, RpcResult}; diff --git a/rpc/core/src/convert/block.rs b/rpc/core/src/convert/block.rs index 8888fe2bb8..8cb0ab01e3 100644 --- a/rpc/core/src/convert/block.rs +++ b/rpc/core/src/convert/block.rs @@ -1,3 +1,5 @@ +//! Conversion of Block related types + use std::sync::Arc; use crate::{RpcBlock, RpcError, RpcRawBlock, RpcResult, RpcTransaction}; diff --git a/rpc/core/src/convert/mod.rs b/rpc/core/src/convert/mod.rs index dee1988d59..bc5c0e64b9 100644 --- a/rpc/core/src/convert/mod.rs +++ b/rpc/core/src/convert/mod.rs @@ -1,3 +1,7 @@ +//! +//! Data conversion utilities and structs for the RPC layer. +//! + pub mod block; pub mod notification; pub mod scope; diff --git a/rpc/core/src/convert/notification.rs b/rpc/core/src/convert/notification.rs index 362dd5ed9c..6251cc1cdf 100644 --- a/rpc/core/src/convert/notification.rs +++ b/rpc/core/src/convert/notification.rs @@ -1,3 +1,5 @@ +//! Conversion of Notification related types + use crate::{ convert::utxo::utxo_set_into_rpc, BlockAddedNotification, FinalityConflictNotification, FinalityConflictResolvedNotification, NewBlockTemplateNotification, Notification, PruningPointUtxoSetOverrideNotification, RpcAcceptedTransactionIds, diff --git a/rpc/core/src/convert/scope.rs b/rpc/core/src/convert/scope.rs index e38f09a1f6..6d94de326f 100644 --- a/rpc/core/src/convert/scope.rs +++ b/rpc/core/src/convert/scope.rs @@ -1,3 +1,5 @@ +//! Conversion of Notification Scope related types + use crate::{ NotifyBlockAddedRequest, NotifyFinalityConflictRequest, NotifyNewBlockTemplateRequest, NotifyPruningPointUtxoSetOverrideRequest, NotifySinkBlueScoreChangedRequest, NotifyUtxosChangedRequest, NotifyVirtualChainChangedRequest, diff --git a/rpc/core/src/convert/tx.rs b/rpc/core/src/convert/tx.rs index 20d41d6741..9b69ca1688 100644 --- a/rpc/core/src/convert/tx.rs +++ b/rpc/core/src/convert/tx.rs @@ -1,3 +1,5 @@ +//! Conversion of Transaction related types + use crate::{RpcError, RpcResult, RpcTransaction, RpcTransactionInput, RpcTransactionOutput}; use kaspa_consensus_core::tx::{Transaction, TransactionInput, TransactionOutput}; diff --git a/rpc/core/src/convert/utxo.rs b/rpc/core/src/convert/utxo.rs index a0376580d7..5fc09f6902 100644 --- a/rpc/core/src/convert/utxo.rs +++ b/rpc/core/src/convert/utxo.rs @@ -1,3 +1,5 @@ +//! Conversion functions for UTXO related types. + use crate::RpcUtxoEntry; use crate::RpcUtxosByAddressesEntry; use kaspa_addresses::Prefix; diff --git a/rpc/core/src/error.rs b/rpc/core/src/error.rs index 235ea639e7..0e2bfee225 100644 --- a/rpc/core/src/error.rs +++ b/rpc/core/src/error.rs @@ -1,3 +1,7 @@ +//! +//! [`RpcError`] enum used by RPC primitives. +//! + use kaspa_consensus_core::{subnets::SubnetworkConversionError, tx::TransactionId}; use kaspa_utils::networking::IpAddress; use std::{net::AddrParseError, num::TryFromIntError}; diff --git a/rpc/core/src/lib.rs b/rpc/core/src/lib.rs index 66e4ece3a8..a2ece77d4d 100644 --- a/rpc/core/src/lib.rs +++ b/rpc/core/src/lib.rs @@ -1,3 +1,16 @@ +//! # RPC Core +//! +//! This crate provides foundational primitives used in Rusty Kaspa node RPC subsystem. +//! These include the main [`RpcApi`](api::rpc::RpcApi) trait, [`RpcApiOps`](crate::api::ops::RpcApiOps) +//! enum used in RPC method dispatching, and various data structures used in RPC method arguments. +//! +//! This crate acts as a foundation for [`kaspa_grpc_client`](https://docs.rs/kaspa_grpc_client) and +//! [`kaspa_wrpc_client`](https://docs.rs/kaspa_wrpc_client) crates, which provide gRPC and WebSocket +//! RPC client implementations. This crate is also used by WASM bindings to provide [WASM RpcClient +//! implementation](https://docs.rs/kaspa-wrpc-client/latest/kaspa_wrpc_client/wasm/struct.RpcClient.html) +//! (based on wRPC). +//! + // This attribute is required by BorshSerialize/Deserialize #![recursion_limit = "256"] @@ -9,6 +22,7 @@ pub mod notify; pub mod wasm; pub mod prelude { + //! Re-exports of the most commonly used types and traits in this crate. pub use super::api::notifications::*; pub use super::model::script_class::*; pub use super::model::*; diff --git a/rpc/core/src/model/mod.rs b/rpc/core/src/model/mod.rs index beef032572..a7c2556249 100644 --- a/rpc/core/src/model/mod.rs +++ b/rpc/core/src/model/mod.rs @@ -1,3 +1,6 @@ +//! This module contains RPC-specific data structures +//! used in RPC methods. + pub mod address; pub mod block; pub mod blue_work; diff --git a/rpc/core/src/notify/mod.rs b/rpc/core/src/notify/mod.rs index 088483e8fa..e6dc1be062 100644 --- a/rpc/core/src/notify/mod.rs +++ b/rpc/core/src/notify/mod.rs @@ -1,3 +1,7 @@ +//! +//! Notification structures used by the RPC subsystem. +//! + pub mod channel; pub mod collector; pub mod connection; diff --git a/rpc/core/src/wasm/convert.rs b/rpc/core/src/wasm/convert.rs index 7bc68171b3..319f74bf0c 100644 --- a/rpc/core/src/wasm/convert.rs +++ b/rpc/core/src/wasm/convert.rs @@ -1,3 +1,7 @@ +//! +//! WASM specific conversion functions +//! + use crate::model::*; use kaspa_consensus_client::*; use std::sync::Arc; diff --git a/rpc/core/src/wasm/message.rs b/rpc/core/src/wasm/message.rs index 4e330c3474..85c0857023 100644 --- a/rpc/core/src/wasm/message.rs +++ b/rpc/core/src/wasm/message.rs @@ -1,3 +1,7 @@ +//! +//! WASM interfaces and conversion to and from RPC messages. +//! + #![allow(non_snake_case)] use crate::error::RpcError as Error; use crate::error::RpcResult as Result; diff --git a/rpc/core/src/wasm/mod.rs b/rpc/core/src/wasm/mod.rs index 6552baa42b..e3bcdc024b 100644 --- a/rpc/core/src/wasm/mod.rs +++ b/rpc/core/src/wasm/mod.rs @@ -1,3 +1,5 @@ +//! WASM related conversions + pub mod convert; cfg_if::cfg_if! { diff --git a/rpc/grpc/client/src/lib.rs b/rpc/grpc/client/src/lib.rs index 74db82c4ec..00dadee232 100644 --- a/rpc/grpc/client/src/lib.rs +++ b/rpc/grpc/client/src/lib.rs @@ -102,7 +102,7 @@ impl GrpcClient { /// `url`: the server to connect to /// /// `subscription_context`: it is advised to provide a clone of the same instance if multiple clients dealing with - /// [`UtxosChangedNotifications`] are connected concurrently in order to optimize the memory footprint. + /// `UtxosChangedNotifications` are connected concurrently in order to optimize the memory footprint. /// /// `reconnect`: features an automatic reconnection to the server, reactivating all subscriptions on success. /// diff --git a/rpc/grpc/core/src/convert/message.rs b/rpc/grpc/core/src/convert/message.rs index c0e75cf036..67ac60650c 100644 --- a/rpc/grpc/core/src/convert/message.rs +++ b/rpc/grpc/core/src/convert/message.rs @@ -3,7 +3,7 @@ //! Response payloads in protowire do always contain an error field and generally a set of //! fields providing the requested data. //! -//! Responses in rpc core are expressed as RpcResult, where Xxx is the called +//! Responses in rpc core are expressed as `RpcResult`, where `Xxx` is the called //! RPC method. //! //! The general conversion convention from protowire to rpc core is to consider the error diff --git a/rpc/wrpc/client/src/client.rs b/rpc/wrpc/client/src/client.rs index 71147fd67b..3ac04fa984 100644 --- a/rpc/wrpc/client/src/client.rs +++ b/rpc/wrpc/client/src/client.rs @@ -1,3 +1,5 @@ +//! Kaspa wRPC client implementation. + use crate::imports::*; use crate::parse::parse_host; use crate::{error::Error, node::NodeDescriptor}; @@ -246,14 +248,17 @@ impl RpcResolver for Inner { const WRPC_CLIENT: &str = "wrpc-client"; -/// [`KaspaRpcClient`] allows connection to the Kaspa wRPC Server via -/// binary Borsh or JSON protocols. +/// # [`KaspaRpcClient`] connects to Kaspa wRPC endpoint via binary Borsh or JSON protocols. /// /// RpcClient has two ways to interface with the underlying RPC subsystem: /// [`Interface`] that has a [`notification()`](Interface::notification) /// method to register closures that will be invoked on server-side -/// notifications and the [`RpcClient::call`] method that allows async -/// method invocation server-side. +/// notifications and the [`RpcClient::call`] method that allows server-side +/// async method invocation. +/// +/// The node address can be supplied via a URL or a [`Resolver`] that +/// can be used to resolve a public node address dynamically. [`Resolver`] can also +/// be configured to operate against custom node clusters. /// #[derive(Clone)] pub struct KaspaRpcClient { diff --git a/rpc/wrpc/client/src/error.rs b/rpc/wrpc/client/src/error.rs index 781455ddd8..657027ed0b 100644 --- a/rpc/wrpc/client/src/error.rs +++ b/rpc/wrpc/client/src/error.rs @@ -1,3 +1,5 @@ +//! [`Error`](enum@Error) variants for the wRPC client library. + use thiserror::Error; use wasm_bindgen::JsError; use wasm_bindgen::JsValue; diff --git a/rpc/wrpc/client/src/lib.rs b/rpc/wrpc/client/src/lib.rs index b3f26c425f..ac004eccb4 100644 --- a/rpc/wrpc/client/src/lib.rs +++ b/rpc/wrpc/client/src/lib.rs @@ -1,3 +1,19 @@ +//! +//! # wRPC Client for Rusty Kaspa p2p Node +//! +//! This crate provides a WebSocket RPC client for Rusty Kaspa p2p node. It is based on the +//! [wRPC](https://docs.rs/workflow-rpc) crate that offers WebSocket RPC implementation +//! for Rust based on Borsh and Serde JSON serialization. wRPC is a lightweight RPC framework +//! meant to function as an IPC (Inter-Process Communication) mechanism for Rust applications. +//! +//! Rust examples on using wRPC client can be found in the +//! [examples](https://github.com/kaspanet/rusty-kaspa/tree/master/rpc/wrpc/examples) folder. +//! +//! WASM bindings for wRPC client can be found in the [`kaspa-wrpc-wasm`](https://docs.rs/kaspa-wrpc-wasm) crate. +//! +//! The main struct managing Kaspa RPC client connections is the [`KaspaRpcClient`]. +//! + pub mod client; pub mod error; mod imports; diff --git a/rpc/wrpc/client/src/node.rs b/rpc/wrpc/client/src/node.rs index ca7e19c879..f775bfd188 100644 --- a/rpc/wrpc/client/src/node.rs +++ b/rpc/wrpc/client/src/node.rs @@ -1,3 +1,5 @@ +//! Node connection endpoint as provided by the [`Resolver`]. + use crate::imports::*; /// diff --git a/rpc/wrpc/client/src/parse.rs b/rpc/wrpc/client/src/parse.rs index 35db2c7686..5a497c507a 100644 --- a/rpc/wrpc/client/src/parse.rs +++ b/rpc/wrpc/client/src/parse.rs @@ -1,3 +1,5 @@ +//! wRPC URL parsing and validation utilities. + use std::fmt::Display; use std::net::{Ipv4Addr, Ipv6Addr}; use std::num::ParseIntError; diff --git a/rpc/wrpc/client/src/prelude.rs b/rpc/wrpc/client/src/prelude.rs index 6a410b7235..a4598e5374 100644 --- a/rpc/wrpc/client/src/prelude.rs +++ b/rpc/wrpc/client/src/prelude.rs @@ -1,3 +1,5 @@ +//! Re-exports of the most commonly used types and traits. + pub use crate::client::{ConnectOptions, ConnectStrategy}; pub use crate::{KaspaRpcClient, Resolver, WrpcEncoding}; pub use kaspa_consensus_core::network::{NetworkId, NetworkType}; diff --git a/rpc/wrpc/client/src/resolver.rs b/rpc/wrpc/client/src/resolver.rs index 8dcb194476..170fe12ddf 100644 --- a/rpc/wrpc/client/src/resolver.rs +++ b/rpc/wrpc/client/src/resolver.rs @@ -1,3 +1,7 @@ +//! +//! Module implementing [`Resolver`] client for obtaining public Kaspa wRPC endpoints. +//! + use std::sync::OnceLock; use crate::error::Error; @@ -78,7 +82,12 @@ impl Inner { } /// -/// Resolver is a client for obtaining public Kaspa wRPC endpoints. +/// # Resolver - a client for obtaining public Kaspa wRPC endpoints. +/// +/// This client operates against [Kaspa Resolver](https://github.com/aspectron/kaspa-resolver) service +/// that provides load-balancing and failover capabilities for Kaspa wRPC endpoints. The default +/// configuration allows access to public Kaspa nodes, while custom configurations can be supplied +/// if you are running your own custom Kaspa node cluster. /// #[derive(Debug, Clone)] pub struct Resolver { @@ -92,10 +101,15 @@ impl Default for Resolver { } impl Resolver { + /// Create a new [`Resolver`] client with the specified list of resolver URLs and an optional `tls` flag. + /// The `tls` flag can be used to enforce secure connection to the node. pub fn new(urls: Option>>, tls: bool) -> Self { Self { inner: Arc::new(Inner::new(urls, tls)) } } + /// Obtain a list of URLs in the resolver client. (This function + /// returns `None` if the resolver is configured to use public + /// node endpoints.) pub fn urls(&self) -> Option>> { if self.inner.public { None @@ -104,11 +118,12 @@ impl Resolver { } } + /// Obtain the `tls` flag in the resolver client. pub fn tls(&self) -> bool { self.inner.tls } - pub fn tls_as_str(&self) -> &'static str { + fn tls_as_str(&self) -> &'static str { if self.inner.tls { "tls" } else { @@ -140,6 +155,7 @@ impl Resolver { format!("{url}/v{CURRENT_VERSION}/kaspa/{network_id}/{tls}/wrpc/{encoding}") } + // query a single resolver service async fn fetch_node_info(&self, url: &str, encoding: Encoding, network_id: NetworkId) -> Result { let url = self.make_url(url, encoding, network_id); let node = @@ -147,7 +163,8 @@ impl Resolver { Ok(node) } - pub async fn fetch(&self, encoding: Encoding, network_id: NetworkId) -> Result { + // query multiple resolver services in random order + async fn fetch(&self, encoding: Encoding, network_id: NetworkId) -> Result { let mut urls = self.inner.urls.clone(); urls.shuffle(&mut thread_rng()); @@ -161,10 +178,12 @@ impl Resolver { Err(Error::Custom(format!("Failed to connect: {:?}", errors))) } + /// Obtain a Kaspa p2p [`NodeDescriptor`] from the resolver based on the supplied [`Encoding`] and [`NetworkId`]. pub async fn get_node(&self, encoding: Encoding, network_id: NetworkId) -> Result { self.fetch(encoding, network_id).await } + /// Returns a Kaspa wRPC URL from the resolver based on the supplied [`Encoding`] and [`NetworkId`]. pub async fn get_url(&self, encoding: Encoding, network_id: NetworkId) -> Result { let nodes = self.fetch(encoding, network_id).await?; Ok(nodes.url.clone()) diff --git a/rpc/wrpc/client/src/result.rs b/rpc/wrpc/client/src/result.rs index 32f663388a..8427fd12f8 100644 --- a/rpc/wrpc/client/src/result.rs +++ b/rpc/wrpc/client/src/result.rs @@ -1 +1,3 @@ +//! The [`Result`] type alias bound to the [`Error`](super::error::Error) enum used in this crate. + pub type Result = std::result::Result; diff --git a/rpc/wrpc/wasm/src/client.rs b/rpc/wrpc/wasm/src/client.rs index 81487172fe..5982a24254 100644 --- a/rpc/wrpc/wasm/src/client.rs +++ b/rpc/wrpc/wasm/src/client.rs @@ -1,3 +1,10 @@ +//! +//! # WASM bindings for the [Kaspa p2p Node RPC client](KaspaRpcClient). +//! +//! This module provides a WASM interface for the Kaspa p2p Node RPC client +//! - [`RpcClient`]. +//! + #![allow(non_snake_case)] use crate::imports::*; @@ -130,7 +137,7 @@ impl TryFrom for NotificationEvent { } } -pub struct Inner { +pub(crate) struct Inner { client: Arc, resolver: Option, notification_task: AtomicBool, diff --git a/rpc/wrpc/wasm/src/lib.rs b/rpc/wrpc/wasm/src/lib.rs index e80b3baac0..61d0de19b1 100644 --- a/rpc/wrpc/wasm/src/lib.rs +++ b/rpc/wrpc/wasm/src/lib.rs @@ -1,3 +1,7 @@ +//! +//! WASM bindings for the [Rusty Kaspa p2p Node wRPC Client](kaspa-wrpc-client) +//! + #![allow(unused_imports)] use cfg_if::cfg_if; diff --git a/rpc/wrpc/wasm/src/notify.rs b/rpc/wrpc/wasm/src/notify.rs index 23781e3143..c586f24dc7 100644 --- a/rpc/wrpc/wasm/src/notify.rs +++ b/rpc/wrpc/wasm/src/notify.rs @@ -1,3 +1,7 @@ +//! Notification types and interfaces for wRPC events. + +#![allow(non_snake_case)] + use crate::imports::*; use kaspa_rpc_macros::declare_typescript_wasm_interface as declare; diff --git a/rpc/wrpc/wasm/src/resolver.rs b/rpc/wrpc/wasm/src/resolver.rs index 2ffc7ea56a..7abfdb6884 100644 --- a/rpc/wrpc/wasm/src/resolver.rs +++ b/rpc/wrpc/wasm/src/resolver.rs @@ -1,3 +1,7 @@ +//! [`Resolver`](NativeResolver) bindings for obtaining public Kaspa wRPC URL endpoints. + +#![allow(non_snake_case)] + use crate::client::{RpcClient, RpcConfig}; use crate::imports::*; use js_sys::Array; diff --git a/simpa/src/main.rs b/simpa/src/main.rs index 1d14a3c681..c66656be3c 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -83,7 +83,7 @@ struct Args { ram_scale: f64, /// Logging level for all subsystems {off, error, warn, info, debug, trace} - /// -- You may also specify =,=,... to set the log level for individual subsystems + /// -- You may also specify `=,=,...` to set the log level for individual subsystems #[arg(long = "loglevel", default_value = format!("info,{}=trace", env!("CARGO_PKG_NAME")))] log_level: String, diff --git a/utils/src/lib.rs b/utils/src/lib.rs index 4e57548e72..3d1bb54384 100644 --- a/utils/src/lib.rs +++ b/utils/src/lib.rs @@ -1,3 +1,9 @@ +//! +//! # Kaspa Utilities +//! +//! General purpose utilities and various type extensions used across the Rusty Kaspa codebase. +//! + pub mod any; pub mod arc; pub mod binary_heap; diff --git a/utils/src/option.rs b/utils/src/option.rs index ff4779dc12..3e619f46fa 100644 --- a/utils/src/option.rs +++ b/utils/src/option.rs @@ -1,5 +1,5 @@ pub trait OptionExtensions { - /// Substitute for unstable [Option::is_non_or] + /// Substitute for unstable [`Option::is_none_or`] fn is_none_or_ex(&self, f: impl FnOnce(&T) -> bool) -> bool; } diff --git a/wallet/bip32/src/address_type.rs b/wallet/bip32/src/address_type.rs index 63ea00361d..3aecfdb754 100644 --- a/wallet/bip32/src/address_type.rs +++ b/wallet/bip32/src/address_type.rs @@ -1,5 +1,10 @@ +//! +//! Address type (`Receive` or `Change`) used in HD wallet address derivation. +//! + use std::fmt; +/// Address type used in HD wallet address derivation. pub enum AddressType { Receive = 0, Change, diff --git a/wallet/bip32/src/lib.rs b/wallet/bip32/src/lib.rs index a406067f6f..1926728c43 100644 --- a/wallet/bip32/src/lib.rs +++ b/wallet/bip32/src/lib.rs @@ -32,6 +32,8 @@ pub use xkey::ExtendedKey; pub use xprivate_key::ExtendedPrivateKey; pub use xpublic_key::ExtendedPublicKey; +/// Extension for [`secp256k1::SecretKey`] that provides access +/// to [`secp256k1::PublicKey`] and the public key string representation. pub trait SecretKeyExt { fn get_public_key(&self) -> secp256k1::PublicKey; fn as_str(&self, attrs: ExtendedKeyAttrs, prefix: Prefix) -> Zeroizing; diff --git a/wallet/bip32/src/private_key.rs b/wallet/bip32/src/private_key.rs index 0d4769ee4f..d5dbc3d144 100644 --- a/wallet/bip32/src/private_key.rs +++ b/wallet/bip32/src/private_key.rs @@ -4,6 +4,7 @@ use crate::Result; pub use secp256k1::SecretKey; use secp256k1::{scalar::Scalar, Secp256k1, SignOnly}; +/// Trait for private key types which can be derived using BIP32. pub trait PrivateKey: Sized { /// Public key type which corresponds to this private key. type PublicKey: PublicKey; diff --git a/wallet/bip32/src/public_key.rs b/wallet/bip32/src/public_key.rs index 28a121811b..56fb17de51 100644 --- a/wallet/bip32/src/public_key.rs +++ b/wallet/bip32/src/public_key.rs @@ -3,7 +3,7 @@ use ripemd::{Digest, Ripemd160}; use secp256k1::{scalar::Scalar, Secp256k1, VerifyOnly}; use sha2::Sha256; -/// Trait for key types which can be derived using BIP32. +/// Trait for public key types which can be derived using BIP32. pub trait PublicKey: Sized { /// Initialize this key from bytes. fn from_bytes(bytes: PublicKeyBytes) -> Result; diff --git a/wallet/core/src/account/descriptor.rs b/wallet/core/src/account/descriptor.rs index c549b739d2..c3bf97cc1b 100644 --- a/wallet/core/src/account/descriptor.rs +++ b/wallet/core/src/account/descriptor.rs @@ -11,6 +11,11 @@ use kaspa_wallet_macros::declare_typescript_wasm_interface as declare; use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; +/// +/// Structure that represents a wallet account. This structure contains +/// properties that are common to all wallet accounts as well as +/// account-specific properties stored in a BTreeMap by each account. +/// /// @category Wallet API #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub struct AccountDescriptor { diff --git a/wallet/core/src/account/kind.rs b/wallet/core/src/account/kind.rs index 20e863d77c..511c1f4ed2 100644 --- a/wallet/core/src/account/kind.rs +++ b/wallet/core/src/account/kind.rs @@ -8,6 +8,11 @@ use std::hash::Hash; use std::str::FromStr; use workflow_wasm::convert::CastFromJs; +/// +/// Account kind is a string signature that represents an account type. +/// Account kind is used to identify the account type during +/// serialization, deserialization and various API calls. +/// /// @category Wallet SDK #[derive(Debug, Default, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Hash, CastFromJs)] #[wasm_bindgen] diff --git a/wallet/core/src/account/pskb.rs b/wallet/core/src/account/pskb.rs index 8fc46088b9..e71d7e4796 100644 --- a/wallet/core/src/account/pskb.rs +++ b/wallet/core/src/account/pskb.rs @@ -1,3 +1,8 @@ +//! +//! Tools for interfacing wallet accounts with PSKBs. +//! (Partial Signed Kaspa Transaction Bundles). +//! + pub use crate::error::Error; use crate::imports::*; use crate::tx::PaymentOutputs; diff --git a/wallet/core/src/api/message.rs b/wallet/core/src/api/message.rs index 3b96abd1a5..e27cb2b29c 100644 --- a/wallet/core/src/api/message.rs +++ b/wallet/core/src/api/message.rs @@ -118,15 +118,18 @@ pub struct RetainContextRequest { #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] #[serde(rename_all = "camelCase")] -pub struct RetainContextResponse { - // pub name : String, - // pub data: Option>>, - // pub is_connected: bool, - // pub is_synced: bool, - // pub is_open: bool, - // pub url: Option, - // pub is_wrpc_client: bool, - // pub network_id: Option, +pub struct RetainContextResponse {} + +#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetContextRequest { + pub name: String, +} + +#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetContextResponse { + pub data: Option>, } #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] diff --git a/wallet/core/src/api/mod.rs b/wallet/core/src/api/mod.rs index f0963d610b..979ef1c72f 100644 --- a/wallet/core/src/api/mod.rs +++ b/wallet/core/src/api/mod.rs @@ -1,4 +1,6 @@ //! +//! # Wallet API +//! //! Wallet API module that provides a unified interface for all wallet operations. //! diff --git a/wallet/core/src/api/traits.rs b/wallet/core/src/api/traits.rs index 08ebd65f67..357665e77b 100644 --- a/wallet/core/src/api/traits.rs +++ b/wallet/core/src/api/traits.rs @@ -21,13 +21,29 @@ pub trait WalletApi: Send + Sync + AnySync { async fn register_notifications(self: Arc, channel: Receiver) -> Result; async fn unregister_notifications(self: Arc, channel_id: u64) -> Result<()>; + /// Wrapper around [`retain_context_call()`](Self::retain_context_call). async fn retain_context(self: Arc, name: &str, data: Option>) -> Result<()> { self.retain_context_call(RetainContextRequest { name: name.to_string(), data }).await?; Ok(()) } + /// Obtain earlier retained context data using the context `name` as a key. + async fn get_context(self: Arc, name: &str) -> Result>> { + Ok(self.get_context_call(GetContextRequest { name: name.to_string() }).await?.data) + } + + /// Allows user to store string key-associated context data in the wallet subsystem runtime. + /// The context data persists only during the wallet instance runtime. + /// This can be useful if you have a front-end that connects to a + /// persistent wallet instance operating in the backend (such as a browser + /// extension popup connecting to the background page) and you need to store + /// any type of runtime data in the backend (but are limited to using only + /// the wallet interface). async fn retain_context_call(self: Arc, request: RetainContextRequest) -> Result; + /// Obtain context data stored using [`retain_context()`](Self::retain_context). + async fn get_context_call(self: Arc, request: GetContextRequest) -> Result; + /// Wrapper around [`get_status_call()`](Self::get_status_call). async fn get_status(self: Arc, name: Option<&str>) -> Result { Ok(self.get_status_call(GetStatusRequest { name: name.map(String::from) }).await?) @@ -42,7 +58,7 @@ pub trait WalletApi: Send + Sync + AnySync { /// - `is_wrpc_client` - whether the wallet is connected to a node via wRPC async fn get_status_call(self: Arc, request: GetStatusRequest) -> Result; - /// Synchronous connect call (blocking, single attempt, requires sync). + /// Synchronous connect call (blocking, single attempt, requires node sync). async fn connect(self: Arc, url: Option, network_id: &NetworkId) -> Result<()> { let retry_on_error = false; let block_async_connect = true; @@ -55,6 +71,7 @@ pub trait WalletApi: Send + Sync + AnySync { /// comprised of the `url` and a `network_id`. async fn connect_call(self: Arc, request: ConnectRequest) -> Result; + /// Request the wallet RPC subsystem to disconnect from the node. async fn disconnect(self: Arc) -> Result<()> { self.disconnect_call(DisconnectRequest {}).await?; Ok(()) @@ -80,6 +97,7 @@ pub trait WalletApi: Send + Sync + AnySync { /// Ping the wallet service. Accepts an optional `u64` value that is returned in the response. async fn ping_call(self: Arc, request: PingRequest) -> Result; + /// Wrapper around [`batch_call()`](Self::batch_call). async fn batch(self: Arc) -> Result<()> { self.batch_call(BatchRequest {}).await?; Ok(()) @@ -94,6 +112,7 @@ pub trait WalletApi: Send + Sync + AnySync { /// async fn batch_call(self: Arc, request: BatchRequest) -> Result; + /// Wrapper around [`flush_call()`](Self::flush_call). async fn flush(self: Arc, wallet_secret: Secret) -> Result<()> { self.flush_call(FlushRequest { wallet_secret }).await?; Ok(()) @@ -268,6 +287,7 @@ pub trait WalletApi: Send + Sync + AnySync { /// around this call. async fn accounts_rename_call(self: Arc, request: AccountsRenameRequest) -> Result; + /// Wrapper around [`accounts_select_call()`](Self::accounts_select_call) async fn accounts_select(self: Arc, account_id: Option) -> Result<()> { self.accounts_select_call(AccountsSelectRequest { account_id }).await?; Ok(()) @@ -404,6 +424,7 @@ pub trait WalletApi: Send + Sync + AnySync { async fn accounts_estimate_call(self: Arc, request: AccountsEstimateRequest) -> Result; /// Get a range of transaction records for a specific account id. + /// Wrapper around [`transactions_data_get_call()`](Self::transactions_data_get_call). async fn transactions_data_get_range( self: Arc, account_id: AccountId, @@ -413,8 +434,8 @@ pub trait WalletApi: Send + Sync + AnySync { self.transactions_data_get_call(TransactionsDataGetRequest::with_range(account_id, network_id, range)).await } + /// Get a range of transaction records for a specific account id. async fn transactions_data_get_call(self: Arc, request: TransactionsDataGetRequest) -> Result; - // async fn transaction_get_call(self: Arc, request: TransactionGetRequest) -> Result; /// Replaces the note of a transaction with a new note. Note is meant /// to explicitly store a user-supplied string. The note is treated @@ -439,6 +460,7 @@ pub trait WalletApi: Send + Sync + AnySync { request: TransactionsReplaceMetadataRequest, ) -> Result; + // TODO async fn address_book_enumerate_call( self: Arc, request: AddressBookEnumerateRequest, diff --git a/wallet/core/src/api/transport.rs b/wallet/core/src/api/transport.rs index 4de2d78248..c9e5f6de63 100644 --- a/wallet/core/src/api/transport.rs +++ b/wallet/core/src/api/transport.rs @@ -71,6 +71,7 @@ impl WalletApi for WalletClient { Disconnect, ChangeNetworkId, RetainContext, + GetContext, Batch, Flush, WalletEnumerate, @@ -118,7 +119,7 @@ pub trait EventHandler: Send + Sync { /// [`WalletServer`] is a server-side transport interface that declares /// API methods that can be invoked via Borsh or Serde messages containing -/// serializations created using the [`Transport`] interface. The [`WalletServer`] +/// serializations created using the [`Codec`] interface. The [`WalletServer`] /// is a counter-part to [`WalletClient`]. pub struct WalletServer { // pub wallet_api: Arc, @@ -147,6 +148,7 @@ impl WalletServer { Disconnect, ChangeNetworkId, RetainContext, + GetContext, Batch, Flush, WalletEnumerate, diff --git a/wallet/core/src/compat/mod.rs b/wallet/core/src/compat/mod.rs index 79c8e11dd9..093b8845cc 100644 --- a/wallet/core/src/compat/mod.rs +++ b/wallet/core/src/compat/mod.rs @@ -1,3 +1,7 @@ +//! +//! Compatibility layer for legacy wallets. +//! + pub mod gen0; pub use gen0::*; pub mod gen1; diff --git a/wallet/core/src/cryptobox.rs b/wallet/core/src/cryptobox.rs index fa9b188f3b..83845eee99 100644 --- a/wallet/core/src/cryptobox.rs +++ b/wallet/core/src/cryptobox.rs @@ -1,3 +1,7 @@ +//! +//! Re-export of the `crypto_box` crate that can be used to encrypt and decrypt messages. +//! + use crate::imports::*; use crypto_box::{ aead::{Aead, AeadCore, OsRng}, @@ -5,8 +9,13 @@ use crypto_box::{ }; pub use crypto_box::{PublicKey, SecretKey}; -// https://docs.rs/crypto_box/0.9.1/crypto_box/ - +/// +/// Primitives for encrypting and decrypting messages using the `crypto_box` crate. +/// This exists primarily for the purposes of [WASM bindings](crate::wasm::cryptobox::CryptoBox) +/// to allow access to the `crypto_box` encryption functionality from within web wallets. +/// +/// +/// pub struct CryptoBox { public_key: PublicKey, codec: ChaChaBox, diff --git a/wallet/core/src/events.rs b/wallet/core/src/events.rs index 63d7d5bcab..37816d8b20 100644 --- a/wallet/core/src/events.rs +++ b/wallet/core/src/events.rs @@ -245,6 +245,12 @@ impl Events { } } +/// +/// Event kind representing [`Events`] variant. +/// Used primarily by WASM bindings to identify event types +/// by their string representation. Can be obtained from the +/// event via [`Events::kind()`]. +/// #[derive(Clone, Copy, Debug, Serialize, Eq, PartialEq, Hash)] #[serde(rename_all = "kebab-case")] pub enum EventKind { diff --git a/wallet/core/src/factory.rs b/wallet/core/src/factory.rs index 515093fdc7..178e331e54 100644 --- a/wallet/core/src/factory.rs +++ b/wallet/core/src/factory.rs @@ -6,6 +6,7 @@ use crate::imports::*; use crate::result::Result; use std::sync::OnceLock; +/// Wallet account loading factory. #[async_trait] pub trait Factory { fn name(&self) -> String; @@ -22,6 +23,7 @@ type FactoryMap = AHashMap static EXTERNAL: OnceLock> = OnceLock::new(); static INITIALIZED: AtomicBool = AtomicBool::new(false); +/// Global factory registry accessor. pub fn factories() -> &'static FactoryMap { static FACTORIES: OnceLock = OnceLock::new(); FACTORIES.get_or_init(|| { @@ -41,6 +43,7 @@ pub fn factories() -> &'static FactoryMap { }) } +/// Registers a new global account factory. pub fn register(kind: AccountKind, factory: Arc) { if INITIALIZED.load(Ordering::Relaxed) { panic!("Factory registrations must occur before the framework initialization"); diff --git a/wallet/core/src/lib.rs b/wallet/core/src/lib.rs index bca89d4e7e..09cc3ca7fc 100644 --- a/wallet/core/src/lib.rs +++ b/wallet/core/src/lib.rs @@ -1,16 +1,21 @@ //! -//! Kaspa Wallet Core - Multi-platform Rust framework for Kaspa Wallet. +//! # Kaspa Wallet Core +//! +//! Multi-platform Rust framework for Kaspa Wallet. //! //! This framework provides a series of APIs and primitives //! to simplify building applications that interface with //! the Kaspa p2p network. //! -//! Included are low-level primitives +//! For key generation and derivation, please see the +//! [`kaspa_wallet_keys`] crate. +//! +//! This crate included are low-level primitives //! such as [`UtxoProcessor`](crate::utxo::UtxoProcessor) //! and [`UtxoContext`](crate::utxo::UtxoContext) that provide //! various levels of automation as well as higher-level //! APIs such as [`Wallet`](crate::wallet::Wallet), -//! [`Account`](crate::account::Account) (managed via +//! [`Account`](crate::account::Account) (managed via the //! [`WalletApi`](crate::api::WalletApi) trait) //! that offer a fully-featured wallet implementation //! backed by a multi-platform data storage layer capable of @@ -28,15 +33,28 @@ //! to satisfy the requested amount exceeds the maximum //! allowed transaction mass. //! +//! Key generation and derivation is available in the +//! [`kaspa_wallet_keys`] crate. +//! //! The framework can operate -//! within native Rust applications as well as within the NodeJS -//! and browser environments via WASM32. +//! within native Rust applications as well as within NodeJS, Bun +//! and browser environments via the WASM32 SDK. //! -//! For JavaScript / TypeScript environments, there are two +//! WASM32 SDK documentation is available at: +//! +//! +//! For NodeJS JavaScript and TypeScript environments, there are two //! available NPM modules: //! - //! - //! +//! NOTE: for security reasons (to mitigate potential upstream vendor +//! attacks) it is always recommended to build WASM SDK from source or +//! download pre-built redistributables. +//! +//! Latest development builds of the WASM32 SDK can be found at: +//! +//! //! The `kaspa-wasm` module is a pure WASM32 module that includes //! the entire wallet framework, but does not support RPC due to an absence //! of a native WebSocket in NodeJs environment, while @@ -54,36 +72,6 @@ extern crate alloc; extern crate self as kaspa_wallet_core; -// use cfg_if::cfg_if; - -// cfg_if! { -// if #[cfg(feature = "wasm32-core")] { -// // pub mod wasm; -// // pub use wasm::*; - -// pub mod account; -// pub mod api; -// pub mod compat; -// pub mod derivation; -// pub mod deterministic; -// pub mod encryption; -// pub mod error; -// pub mod events; -// pub mod factory; -// mod imports; -// pub mod message; -// pub mod prelude; -// pub mod result; -// pub mod rpc; -// pub mod serializer; -// pub mod settings; -// pub mod storage; -// pub mod tx; -// pub mod utils; -// pub mod utxo; -// pub mod wallet; - -// } else if #[cfg(any(feature = "wasm32-sdk", not(target_arch = "wasm32")))] { pub mod account; pub mod api; pub mod compat; @@ -107,9 +95,6 @@ pub mod tx; pub mod utils; pub mod utxo; pub mod wallet; -// } - -// } #[cfg(any(feature = "wasm32-sdk", feature = "wasm32-core"))] pub mod wasm; diff --git a/wallet/core/src/message.rs b/wallet/core/src/message.rs index 160c8f0407..01dc78676b 100644 --- a/wallet/core/src/message.rs +++ b/wallet/core/src/message.rs @@ -5,6 +5,7 @@ use kaspa_hashes::{Hash, PersonalMessageSigningHash}; use secp256k1::{Error, XOnlyPublicKey}; +/// A personal message (text) that can be signed. #[derive(Clone)] pub struct PersonalMessage<'a>(pub &'a str); diff --git a/wallet/core/src/metrics.rs b/wallet/core/src/metrics.rs index 87a3f99131..b0edb1f885 100644 --- a/wallet/core/src/metrics.rs +++ b/wallet/core/src/metrics.rs @@ -1,6 +1,13 @@ +//! +//! Primitives for network metrics. +//! + use crate::imports::*; -// use kaspa_metrics_core::MetricsSnapshot; +/// Metrics posted by the wallet subsystem. +/// See [`UtxoProcessor::start_metrics`] to enable metrics processing. +/// This struct contains mempool size that can be used to estimate +/// current network congestion. #[derive(Debug, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] #[serde(tag = "type", content = "data")] #[serde(rename_all = "kebab-case")] @@ -8,37 +15,19 @@ pub enum MetricsUpdate { WalletMetrics { #[serde(rename = "mempoolSize")] mempool_size: u64, - #[serde(rename = "nodePeers")] - node_peers: u32, - #[serde(rename = "networkTPS")] - network_tps: f64, }, - // NodeMetrics { - // snapshot : Box - // } } +/// [`MetricsUpdate`] variant identifier. #[derive(Debug, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub enum MetricsUpdateKind { WalletMetrics, - // NodeMetrics } impl MetricsUpdate { pub fn kind(&self) -> MetricsUpdateKind { match self { MetricsUpdate::WalletMetrics { .. } => MetricsUpdateKind::WalletMetrics, - // MetricsUpdate::NodeMetrics { .. } => MetricsUpdateKind::NodeMetrics } } } - -// impl MetricsUpdate { -// pub fn wallet_metrics(mempool_size: u64, peers: usize) -> Self { -// MetricsUpdate::WalletMetrics { mempool_size, peers } -// } - -// pub fn node_metrics(snapshot: MetricsSnapshot) -> Self { -// MetricsUpdate::NodeMetrics(Box::new(snapshot)) -// } -// } diff --git a/wallet/core/src/prelude.rs b/wallet/core/src/prelude.rs index 0ca0194343..741ea0b1e9 100644 --- a/wallet/core/src/prelude.rs +++ b/wallet/core/src/prelude.rs @@ -1,6 +1,5 @@ //! -//! A module which is typically glob imported. -//! Contains most commonly used imports. +//! Re-exports of the most commonly used types and traits in this crate. //! pub use crate::account::descriptor::AccountDescriptor; diff --git a/wallet/core/src/rpc.rs b/wallet/core/src/rpc.rs index 999e09e302..b75f4b5b28 100644 --- a/wallet/core/src/rpc.rs +++ b/wallet/core/src/rpc.rs @@ -6,13 +6,17 @@ use std::sync::Arc; pub use kaspa_rpc_core::api::ctl::RpcCtl; pub use kaspa_rpc_core::api::rpc::RpcApi; -pub type DynRpcApi = dyn RpcApi; -pub type NotificationChannel = kaspa_utils::channel::Channel; pub use kaspa_rpc_core::notify::mode::NotificationMode; pub use kaspa_wrpc_client::client::{ConnectOptions, ConnectStrategy}; pub use kaspa_wrpc_client::Resolver; pub use kaspa_wrpc_client::WrpcEncoding; +/// Type alias for [`dyn RpcApi`](RpcApi). +pub type DynRpcApi = dyn RpcApi; +/// Type alias for a concrete [`Channel`](kaspa_utils::channel::Channel) +/// used for handling RPC [`Notification`](kaspa_rpc_core::Notification) events. +pub type NotificationChannel = kaspa_utils::channel::Channel; + /// RPC adaptor class that holds the [`RpcApi`] /// and [`RpcCtl`] instances. #[derive(Clone)] diff --git a/wallet/core/src/settings.rs b/wallet/core/src/settings.rs index 35fde44866..f861c5a134 100644 --- a/wallet/core/src/settings.rs +++ b/wallet/core/src/settings.rs @@ -13,6 +13,7 @@ use std::path::PathBuf; use workflow_core::enums::Describe; use workflow_store::fs; +/// Wallet settings enumeration. #[derive(Describe, Debug, Clone, Serialize, Deserialize, Hash, Eq, PartialEq, Ord, PartialOrd)] #[serde(rename_all = "lowercase")] pub enum WalletSettings { @@ -36,6 +37,8 @@ pub trait DefaultSettings: Sized { async fn defaults() -> Vec<(Self, Value)>; } +/// Platform neutral settings store (stores the settings K:V map +/// in a file or the browser `localStorage`). #[derive(Debug, Clone)] pub struct SettingsStore where @@ -170,10 +173,12 @@ where } } +/// Returns the wallet data storage folder `~/.kaspa`. pub fn application_folder() -> Result { Ok(fs::resolve_path(storage::local::default_storage_folder())?) } +/// If missing, creates the wallet data storage folder `~/.kaspa`. pub async fn ensure_application_folder() -> Result<()> { let path = application_folder()?; log_info!("Creating application folder: `{}`", path.display()); diff --git a/wallet/core/src/tx/payment.rs b/wallet/core/src/tx/payment.rs index e28c75a22f..c164e0d789 100644 --- a/wallet/core/src/tx/payment.rs +++ b/wallet/core/src/tx/payment.rs @@ -30,12 +30,19 @@ export interface IPaymentOutput { #[wasm_bindgen] extern "C" { + /// WASM (TypeScript) type representing a single payment output (`IPaymentOutput`). + /// @category Wallet SDK #[wasm_bindgen(typescript_type = "IPaymentOutput")] pub type IPaymentOutput; + /// WASM (TypeScript) type representing multiple payment outputs (`IPaymentOutput[]`). + /// @category Wallet SDK #[wasm_bindgen(typescript_type = "IPaymentOutput[]")] pub type IPaymentOutputArray; } +/// A Rust data structure representing a payment destination. +/// A payment destination is used to signal Generator where to send the funds. +/// The destination can be a change address or a set of [`PaymentOutput`]. #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub enum PaymentDestination { Change, @@ -51,6 +58,9 @@ impl PaymentDestination { } } +/// A Rust data structure representing a single payment +/// output containing a destination address and amount. +/// /// @category Wallet SDK #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, CastFromJs)] #[wasm_bindgen(inspectable)] diff --git a/wallet/core/src/utxo/processor.rs b/wallet/core/src/utxo/processor.rs index b72b9784ad..f6480f333e 100644 --- a/wallet/core/src/utxo/processor.rs +++ b/wallet/core/src/utxo/processor.rs @@ -604,9 +604,7 @@ impl UtxoProcessor { match kind { MetricsUpdateKind::WalletMetrics => { let mempool_size = snapshot.get(&Metric::NetworkMempoolSize) as u64; - let node_peers = snapshot.get(&Metric::NodeActivePeers) as u32; - let network_tps = snapshot.get(&Metric::NetworkTransactionsPerSecond); - let metrics = MetricsUpdate::WalletMetrics { mempool_size, node_peers, network_tps }; + let metrics = MetricsUpdate::WalletMetrics { mempool_size }; self.try_notify(Events::Metrics { network_id: self.network_id()?, metrics })?; } } diff --git a/wallet/core/src/wallet/api.rs b/wallet/core/src/wallet/api.rs index adeb000757..93becef420 100644 --- a/wallet/core/src/wallet/api.rs +++ b/wallet/core/src/wallet/api.rs @@ -1,5 +1,5 @@ //! -//! [`WalletApi`] trait implementation for [`Wallet`]. +//! [`WalletApi`] trait implementation for the [`Wallet`] struct. //! use crate::api::{message::*, traits::WalletApi}; @@ -62,15 +62,17 @@ impl WalletApi for super::Wallet { if let Some(data) = data { self.inner.retained_contexts.lock().unwrap().insert(name, Arc::new(data)); - Ok(RetainContextResponse {}) } else { self.inner.retained_contexts.lock().unwrap().remove(&name); - // let data = self.inner.retained_contexts.lock().unwrap().get(&name).cloned(); Ok(RetainContextResponse {}) } + } - // self.retain_context(retain); + async fn get_context_call(self: Arc, request: GetContextRequest) -> Result { + let GetContextRequest { name } = request; + let data = self.inner.retained_contexts.lock().unwrap().get(&name).map(|data| (**data).clone()); + Ok(GetContextResponse { data }) } // ------------------------------------------------------------------------------------- diff --git a/wallet/core/src/wallet/args.rs b/wallet/core/src/wallet/args.rs index a5fa378bb5..f0168f7406 100644 --- a/wallet/core/src/wallet/args.rs +++ b/wallet/core/src/wallet/args.rs @@ -3,7 +3,6 @@ //! use crate::imports::*; -// use crate::secret::Secret; use crate::storage::interface::CreateArgs; use crate::storage::{Hint, PrvKeyDataId}; use borsh::{BorshDeserialize, BorshSerialize}; diff --git a/wallet/core/src/wallet/maps.rs b/wallet/core/src/wallet/maps.rs index 430f54e568..232e6fbeb4 100644 --- a/wallet/core/src/wallet/maps.rs +++ b/wallet/core/src/wallet/maps.rs @@ -5,6 +5,7 @@ use crate::imports::*; +/// A thread-safe map of [`AccountId`] to [`Account`] instances. #[derive(Default, Clone)] pub struct ActiveAccountMap(Arc>>>); diff --git a/wallet/core/src/wallet/mod.rs b/wallet/core/src/wallet/mod.rs index d5f4dfadd3..d7c9b6c76e 100644 --- a/wallet/core/src/wallet/mod.rs +++ b/wallet/core/src/wallet/mod.rs @@ -1,6 +1,14 @@ //! -//! Kaspa wallet runtime implementation. +//! # Kaspa wallet runtime implementation. //! +//! This module contains a Rust implementation of the Kaspa wallet that +//! can be used in native Rust as well as WASM32 (Browser, NodeJs, Bun) +//! environments. +//! +//! This wallet is not meant to be used directly, but rather through the +//! use of the [`WalletApi`] trait. +//! + pub mod api; pub mod args; pub mod maps; @@ -81,7 +89,8 @@ pub enum WalletBusMessage { Discovery { record: TransactionRecord }, } -pub struct Inner { +/// Internal wallet state. +struct Inner { active_accounts: ActiveAccountMap, legacy_accounts: ActiveAccountMap, listener_id: Mutex>, @@ -189,10 +198,6 @@ impl Wallet { self } - pub fn inner(&self) -> &Arc { - &self.inner - } - // // Mutex used to protect concurrent access to accounts // at the wallet api level. This is a global lock that diff --git a/wallet/core/src/wasm/cryptobox.rs b/wallet/core/src/wasm/cryptobox.rs index 118020fe4d..957d4fc35a 100644 --- a/wallet/core/src/wasm/cryptobox.rs +++ b/wallet/core/src/wasm/cryptobox.rs @@ -106,7 +106,7 @@ impl std::ops::Deref for CryptoBoxPublicKey { /// /// CryptoBox allows for encrypting and decrypting messages using the `crypto_box` crate. /// -/// https://docs.rs/crypto_box/0.9.1/crypto_box/ +/// /// /// @category Wallet SDK /// diff --git a/wallet/core/src/wasm/signer.rs b/wallet/core/src/wasm/signer.rs index e00729ef50..157f06d909 100644 --- a/wallet/core/src/wasm/signer.rs +++ b/wallet/core/src/wasm/signer.rs @@ -50,7 +50,7 @@ pub fn js_sign_transaction(tx: &Transaction, signer: &PrivateKeyArrayT, verify_s } } -pub fn sign_transaction<'a>(tx: &'a Transaction, private_keys: &[[u8; 32]], verify_sig: bool) -> Result<&'a Transaction> { +fn sign_transaction<'a>(tx: &'a Transaction, private_keys: &[[u8; 32]], verify_sig: bool) -> Result<&'a Transaction> { let tx = sign(tx, private_keys)?; if verify_sig { let (cctx, utxos) = tx.tx_and_utxos()?; @@ -97,7 +97,7 @@ pub fn sign_script_hash(script_hash: JsValue, privkey: &PrivateKey) -> Result Result> { +fn sign_hash(sig_hash: Hash, privkey: &[u8; 32]) -> Result> { let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice())?; let schnorr_key = secp256k1::Keypair::from_seckey_slice(secp256k1::SECP256K1, privkey)?; let sig: [u8; 64] = *schnorr_key.sign_schnorr(msg).as_ref(); diff --git a/wallet/keys/src/derivation/gen0/mod.rs b/wallet/keys/src/derivation/gen0/mod.rs index d250326268..0de5da286e 100644 --- a/wallet/keys/src/derivation/gen0/mod.rs +++ b/wallet/keys/src/derivation/gen0/mod.rs @@ -1,4 +1,4 @@ -//! Derivation management for legacy account derivation scheme `972` +//! Derivation management for legacy account derivation scheme based on `'972` derivation path (deprecated). mod hd; pub use hd::{PubkeyDerivationManagerV0, WalletDerivationManagerV0}; diff --git a/wallet/keys/src/derivation/gen1/mod.rs b/wallet/keys/src/derivation/gen1/mod.rs index 1822c4d7a2..5ec859b59a 100644 --- a/wallet/keys/src/derivation/gen1/mod.rs +++ b/wallet/keys/src/derivation/gen1/mod.rs @@ -1,3 +1,4 @@ -/// Derivation management for the Kaspa standard derivation scheme `111111'` +//! Derivation management for the Kaspa standard derivation scheme `'111111'` + mod hd; pub use hd::{PubkeyDerivationManager, WalletDerivationManager}; diff --git a/wallet/keys/src/derivation/mod.rs b/wallet/keys/src/derivation/mod.rs index a63201194a..cfa80b4f70 100644 --- a/wallet/keys/src/derivation/mod.rs +++ b/wallet/keys/src/derivation/mod.rs @@ -1,3 +1,7 @@ +//! +//! Derivation utilities used by the integrated Kaspa Wallet API. +//! + pub mod gen0; pub mod gen1; pub mod traits; diff --git a/wallet/keys/src/derivation_path.rs b/wallet/keys/src/derivation_path.rs index df220ee445..a5389ca37e 100644 --- a/wallet/keys/src/derivation_path.rs +++ b/wallet/keys/src/derivation_path.rs @@ -1,7 +1,13 @@ +//! +//! Implementation of the [`DerivationPath`] manager for arbitrary derivation paths. +//! + use crate::imports::*; use workflow_wasm::prelude::*; +/// /// Key derivation path +/// /// @category Wallet SDK #[derive(Clone, CastFromJs)] #[wasm_bindgen] diff --git a/wallet/keys/src/keypair.rs b/wallet/keys/src/keypair.rs index f4b39f3d39..2cc3d57607 100644 --- a/wallet/keys/src/keypair.rs +++ b/wallet/keys/src/keypair.rs @@ -2,6 +2,8 @@ //! [`keypair`](mod@self) module encapsulates [`Keypair`] and [`PrivateKey`]. //! The [`Keypair`] provides access to the secret and public keys. //! +//! # JavaScript Example +//! //! ```javascript //! //! let keypair = Keypair.random(); @@ -56,7 +58,8 @@ impl Keypair { } /// Get the [`Address`] of this Keypair's [`PublicKey`]. - /// Receives a [`NetworkType`] to determine the prefix of the address. + /// Receives a [`NetworkType`](kaspa_consensus_core::network::NetworkType) + /// to determine the prefix of the address. /// JavaScript: `let address = keypair.toAddress(NetworkType.MAINNET);`. #[wasm_bindgen(js_name = toAddress)] // pub fn to_address(&self, network_type: NetworkType) -> Result
{ @@ -67,7 +70,8 @@ impl Keypair { } /// Get `ECDSA` [`Address`] of this Keypair's [`PublicKey`]. - /// Receives a [`NetworkType`] to determine the prefix of the address. + /// Receives a [`NetworkType`](kaspa_consensus_core::network::NetworkType) + /// to determine the prefix of the address. /// JavaScript: `let address = keypair.toAddress(NetworkType.MAINNET);`. #[wasm_bindgen(js_name = toAddressECDSA)] pub fn to_address_ecdsa(&self, network: &NetworkTypeT) -> Result
{ diff --git a/wallet/keys/src/lib.rs b/wallet/keys/src/lib.rs index bec8747d05..86984e36ab 100644 --- a/wallet/keys/src/lib.rs +++ b/wallet/keys/src/lib.rs @@ -1,3 +1,10 @@ +//! +//! # Kaspa Wallet Keys +//! +//! This crate provides tools for creating and managing Kaspa wallet keys. +//! This includes extended key generation and derivation. +//! + pub mod derivation; pub mod derivation_path; pub mod error; diff --git a/wallet/keys/src/prelude.rs b/wallet/keys/src/prelude.rs index 1aed7c5353..5d3af82dda 100644 --- a/wallet/keys/src/prelude.rs +++ b/wallet/keys/src/prelude.rs @@ -1,3 +1,7 @@ +//! +//! Re-exports of the most commonly used types and traits in this crate. +//! + pub use crate::derivation_path::*; pub use crate::keypair::*; pub use crate::privatekey::*; diff --git a/wallet/keys/src/privatekey.rs b/wallet/keys/src/privatekey.rs index 84e2d2e3ba..554bdf36e3 100644 --- a/wallet/keys/src/privatekey.rs +++ b/wallet/keys/src/privatekey.rs @@ -68,7 +68,8 @@ impl PrivateKey { } /// Get the [`Address`] of the PublicKey generated from this PrivateKey. - /// Receives a [`NetworkType`] to determine the prefix of the address. + /// Receives a [`NetworkType`](kaspa_consensus_core::network::NetworkType) + /// to determine the prefix of the address. /// JavaScript: `let address = privateKey.toAddress(NetworkType.MAINNET);`. #[wasm_bindgen(js_name = toAddress)] pub fn to_address(&self, network: &NetworkTypeT) -> Result
{ @@ -80,7 +81,8 @@ impl PrivateKey { } /// Get `ECDSA` [`Address`] of the PublicKey generated from this PrivateKey. - /// Receives a [`NetworkType`] to determine the prefix of the address. + /// Receives a [`NetworkType`](kaspa_consensus_core::network::NetworkType) + /// to determine the prefix of the address. /// JavaScript: `let address = privateKey.toAddress(NetworkType.MAINNET);`. #[wasm_bindgen(js_name = toAddressECDSA)] pub fn to_address_ecdsa(&self, network: &NetworkTypeT) -> Result
{ diff --git a/wallet/keys/src/privkeygen.rs b/wallet/keys/src/privkeygen.rs index ff2f3bd8fa..474dec8ec1 100644 --- a/wallet/keys/src/privkeygen.rs +++ b/wallet/keys/src/privkeygen.rs @@ -1,3 +1,7 @@ +//! +//! [`PrivateKeyGenerator`] helper for generating private key derivations from an extended private key (XPrv). +//! + use crate::derivation::gen1::WalletDerivationManager; use crate::imports::*; diff --git a/wallet/keys/src/pubkeygen.rs b/wallet/keys/src/pubkeygen.rs index c05ae844fa..a61eeb5ada 100644 --- a/wallet/keys/src/pubkeygen.rs +++ b/wallet/keys/src/pubkeygen.rs @@ -1,3 +1,7 @@ +//! +//! [`PublicKeyGenerator`] helper for generating public key derivations from an extended public key (XPub). +//! + use crate::derivation::gen1::WalletDerivationManager; use crate::derivation::traits::WalletDerivationManagerTrait; use crate::imports::*; diff --git a/wallet/keys/src/publickey.rs b/wallet/keys/src/publickey.rs index d63eca491c..235eb80804 100644 --- a/wallet/keys/src/publickey.rs +++ b/wallet/keys/src/publickey.rs @@ -1,6 +1,6 @@ //! -//! [`keypair`](mod@self) module encapsulates [`Keypair`] and [`PrivateKey`]. -//! The [`Keypair`] provides access to the secret and public keys. +//! [`keypair`](mod@self) module encapsulates [`Keypair`](crate::keypair::Keypair) and [`PrivateKey`]. +//! The [`Keypair`](crate::keypair::Keypair) provides access to the secret and public keys. //! //! ```javascript //! diff --git a/wallet/keys/src/secret.rs b/wallet/keys/src/secret.rs index 99d94f5bed..d934ce4ec8 100644 --- a/wallet/keys/src/secret.rs +++ b/wallet/keys/src/secret.rs @@ -1,10 +1,10 @@ //! -//! Secret container for sensitive data. Performs zeroization on drop. +//! Secret container for sensitive data. Performs data erasure (zeroization) on drop. //! use crate::imports::*; -/// Secret container for sensitive data. Performs memory zeroization on drop. +/// Secret container for sensitive data. Performs memory erasure (zeroization) on drop. #[derive(Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub struct Secret(Vec); diff --git a/wallet/keys/src/types.rs b/wallet/keys/src/types.rs index c006d28729..11c5678349 100644 --- a/wallet/keys/src/types.rs +++ b/wallet/keys/src/types.rs @@ -1,5 +1,5 @@ //! -//! Type aliases used by the wallet framework. +//! Key-related type aliases used by the wallet framework. //! use std::sync::Arc; diff --git a/wallet/keys/src/xprv.rs b/wallet/keys/src/xprv.rs index 3e120841b7..c19e0b9cc8 100644 --- a/wallet/keys/src/xprv.rs +++ b/wallet/keys/src/xprv.rs @@ -1,3 +1,7 @@ +//! +//! Extended private key ([`XPrv`]). +//! + use kaspa_bip32::{ChainCode, KeyFingerprint}; use crate::imports::*; diff --git a/wallet/keys/src/xpub.rs b/wallet/keys/src/xpub.rs index 551881d8ee..8706f3fc91 100644 --- a/wallet/keys/src/xpub.rs +++ b/wallet/keys/src/xpub.rs @@ -1,3 +1,7 @@ +//! +//! Extended public key ([`XPub`]). +//! + use kaspa_bip32::{ChainCode, KeyFingerprint, Prefix}; use std::{fmt, str::FromStr}; diff --git a/wallet/pskt/src/bundle.rs b/wallet/pskt/src/bundle.rs index 6e8dc83506..6c926c6665 100644 --- a/wallet/pskt/src/bundle.rs +++ b/wallet/pskt/src/bundle.rs @@ -13,6 +13,11 @@ use kaspa_txscript::{extract_script_pub_key_address, pay_to_address_script, pay_ use serde::{Deserialize, Serialize}; use std::ops::Deref; +/// +/// Bundle is a [`PSKT`] bundle - a sequence of PSKT transactions +/// meant for batch processing and transport as a +/// single serialized payload. +/// #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Bundle(pub Vec); diff --git a/wallet/pskt/src/convert.rs b/wallet/pskt/src/convert.rs index 18acf94ed9..a3956c3bb5 100644 --- a/wallet/pskt/src/convert.rs +++ b/wallet/pskt/src/convert.rs @@ -1,3 +1,9 @@ +//! +//! Conversion functions for converting between +//! the [`kaspa_consensus_client`], [`kaspa_consensus_core`] +//! and [`kaspa_wallet_pskt`](crate) types. +//! + use crate::error::Error; use crate::input::{Input, InputBuilder}; use crate::output::{Output, OutputBuilder}; diff --git a/wallet/pskt/src/error.rs b/wallet/pskt/src/error.rs index 11303ae4a3..f3fd835701 100644 --- a/wallet/pskt/src/error.rs +++ b/wallet/pskt/src/error.rs @@ -1,3 +1,5 @@ +//! Error types for the PSKT crate. + use kaspa_txscript_errors::TxScriptError; use crate::input::InputBuilderError; diff --git a/wallet/pskt/src/global.rs b/wallet/pskt/src/global.rs index b79798776d..ad98f11d30 100644 --- a/wallet/pskt/src/global.rs +++ b/wallet/pskt/src/global.rs @@ -1,3 +1,5 @@ +//! Global PSKT data. + use crate::pskt::{KeySource, Version}; use crate::utils::combine_if_no_conflicts; use derive_builder::Builder; diff --git a/wallet/pskt/src/input.rs b/wallet/pskt/src/input.rs index c99ae25426..8d01a7d48d 100644 --- a/wallet/pskt/src/input.rs +++ b/wallet/pskt/src/input.rs @@ -1,3 +1,5 @@ +//! PSKT input structure. + use crate::pskt::{KeySource, PartialSigs}; use crate::utils::{combine_if_no_conflicts, Error as CombineMapErr}; use derive_builder::Builder; diff --git a/wallet/pskt/src/output.rs b/wallet/pskt/src/output.rs index e873ce4a66..36b09edaea 100644 --- a/wallet/pskt/src/output.rs +++ b/wallet/pskt/src/output.rs @@ -1,3 +1,5 @@ +//! PSKT output structure. + use crate::pskt::KeySource; use crate::utils::combine_if_no_conflicts; use derive_builder::Builder; diff --git a/wallet/pskt/src/pskt.rs b/wallet/pskt/src/pskt.rs index 245609803d..73f87a628f 100644 --- a/wallet/pskt/src/pskt.rs +++ b/wallet/pskt/src/pskt.rs @@ -1,3 +1,7 @@ +//! +//! Partially Signed Kaspa Transaction (PSKT) +//! + use kaspa_bip32::{secp256k1, DerivationPath, KeyFingerprint}; use serde::{Deserialize, Serialize}; use serde_repr::{Deserialize_repr, Serialize_repr}; @@ -76,6 +80,23 @@ impl Signature { } } +/// +/// A Partially Signed Kaspa Transaction (PSKT) is a standardized format +/// that allows multiple participants to collaborate in creating and signing +/// a Kaspa transaction. PSKT enables the exchange of incomplete transaction +/// data between different wallets or entities, allowing each participant +/// to add their signature or inputs in stages. This facilitates more complex +/// transaction workflows, such as multi-signature setups or hardware wallet +/// interactions, by ensuring that sensitive data remains secure while +/// enabling cooperation across different devices or platforms without +/// exposing private keys. +/// +/// Please note that due to transaction mass limits and potential of +/// a wallet aggregating large UTXO sets, the PSKT [`Bundle`](crate::bundle::Bundle) primitive +/// is used to represent a collection of PSKTs and should be used for +/// PSKT serialization and transport. PSKT is an internal implementation +/// primitive that represents each transaction in the bundle. +/// #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct PSKT { diff --git a/wallet/pskt/src/role.rs b/wallet/pskt/src/role.rs index 84f55bb043..2d6daa47df 100644 --- a/wallet/pskt/src/role.rs +++ b/wallet/pskt/src/role.rs @@ -1,3 +1,5 @@ +//! PSKT roles. + /// Initializes the PSKT with 0 inputs and 0 outputs. /// Reference: [BIP-370: Creator](https://github.com/bitcoin/bips/blob/master/bip-0370.mediawiki#creator) pub enum Creator {} diff --git a/wallet/pskt/src/utils.rs b/wallet/pskt/src/utils.rs index cab2119512..357b61bc70 100644 --- a/wallet/pskt/src/utils.rs +++ b/wallet/pskt/src/utils.rs @@ -1,3 +1,5 @@ +//! Utility functions for the PSKT module. + use std::collections::BTreeMap; // todo optimize without cloning diff --git a/wasm/src/lib.rs b/wasm/src/lib.rs index 77c5e16ea8..d8b0f06a95 100644 --- a/wasm/src/lib.rs +++ b/wasm/src/lib.rs @@ -1,5 +1,5 @@ /*! -# `rusty-kaspa WASM32 bindings` +# Rusty Kaspa WASM32 bindings [github](https://github.com/kaspanet/rusty-kaspa/tree/master/wasm) [crates.io](https://crates.io/crates/kaspa-wasm) @@ -13,9 +13,9 @@ codebase within JavaScript environments such as Node.js and Web Browsers. ## Documentation -- [**integrating with Kaspa** guide](https://kaspa.aspectron.org/) -- [**Rustdoc** documentation](https://docs.rs/kaspa-wasm/latest/kaspa-wasm) -- [**JSDoc** documentation](https://kaspa.aspectron.org/jsdoc/) +- [**Integrating with Kaspa** guide](https://kaspa.aspectron.org/) +- [Rust SDK documentation (**Rustdoc**)](https://docs.rs/kaspa-wasm/) +- [TypeScript documentation (**JSDoc**)](https://kaspa.aspectron.org/docs/) Please note that while WASM directly binds JavaScript and Rust resources, their names on JavaScript side are different from their name in Rust as they conform to the 'camelCase' convention in JavaScript and @@ -25,9 +25,10 @@ to the 'snake_case' convention in Rust. The APIs are currently separated into the following groups (this will be expanded in the future): -- **Transaction API** — Bindings for primitives related to transactions. -- **RPC API** — [RPC interface bindings](rpc) for the Kaspa node using WebSocket (wRPC) connections. -- **Wallet API** — API for async core wallet processing tasks. +- **Consensus Client API** — Bindings for primitives related to transactions. +- **RPC API** — [RPC interface bindings](kaspa_wrpc_wasm::client) for the Kaspa node using WebSocket (wRPC) connections. +- **Wallet SDK** — API for async core wallet processing tasks. +- **Wallet API** — A rust implementation of the fully-featured wallet usable in the native Rust, Browser or NodeJs and Bun environments. ## NPM Modules @@ -43,6 +44,9 @@ of a native WebSocket in NodeJs environment, while the `kaspa` module includes `websocket` package dependency simulating the W3C WebSocket and due to this supports RPC. +NOTE: for security reasons it is always recommended to build WASM SDK from source or +download pre-built redistributables from releases or development builds. + ## Examples JavaScript examples for using this framework can be found at: @@ -54,8 +58,19 @@ For pre-built browser-compatible WASM32 redistributables of this framework please see the releases section of the Rusty Kaspa repository at . +## Development Builds + +The latest development builds from . +Development builds typically contain fixes and improvements that are not yet available in +stable releases. Additional information can be found at +. + ## Using RPC +No special handling is required to use the RPC client +in **Browser** or **Bun** environments due to the fact that +these environments provide native WebSocket support. + **NODEJS:** If you are building from source, to use WASM RPC client in the NodeJS environment, you need to introduce a global W3C WebSocket object before loading the WASM32 library (to simulate the browser behavior). @@ -123,7 +138,7 @@ const rpc = new RpcClient({ })(); ``` -For more details, please follow the [**integrating with Kaspa**](https://kaspa.aspectron.org/) guide. +For more details, please follow the [**Integrating with Kaspa**](https://kaspa.aspectron.org/) guide. */ From 180114e6a7839eec287687a3d5debcf0da593057 Mon Sep 17 00:00:00 2001 From: aspect Date: Sun, 29 Sep 2024 17:14:16 +0300 Subject: [PATCH 08/31] fix wasm rpc method types for methods without mandatory arguments (#572) --- rpc/wrpc/wasm/src/client.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/rpc/wrpc/wasm/src/client.rs b/rpc/wrpc/wasm/src/client.rs index 5982a24254..ccd9cb284b 100644 --- a/rpc/wrpc/wasm/src/client.rs +++ b/rpc/wrpc/wasm/src/client.rs @@ -976,6 +976,11 @@ build_wrpc_wasm_bindgen_interface!( /// Obtains basic information about the synchronization status of the Kaspa node. /// Returned information: Syncing status. GetSyncStatus, + /// Feerate estimates + GetFeeEstimate, + /// Retrieves the current network configuration. + /// Returned information: Current network configuration. + GetCurrentNetwork, ], [ // functions with `request` argument @@ -1010,13 +1015,8 @@ build_wrpc_wasm_bindgen_interface!( /// score timestamp estimate. /// Returned information: DAA score timestamp estimate. GetDaaScoreTimestampEstimate, - /// Feerate estimates - GetFeeEstimate, /// Feerate estimates (experimental) GetFeeEstimateExperimental, - /// Retrieves the current network configuration. - /// Returned information: Current network configuration. - GetCurrentNetwork, /// Retrieves block headers from the Kaspa BlockDAG. /// Returned information: List of block headers. GetHeaders, From 3bc2844ee36eb1dd4af8a342ba56a2663b7a5c25 Mon Sep 17 00:00:00 2001 From: aspect Date: Sun, 29 Sep 2024 18:59:54 +0300 Subject: [PATCH 09/31] cleanup legacy bip39 cfg that interferes with docs.rs builds (#573) --- wallet/bip32/src/mnemonic/mod.rs | 1 - wallet/bip32/src/mnemonic/phrase.rs | 2 -- wallet/bip32/src/mnemonic/seed.rs | 1 - 3 files changed, 4 deletions(-) diff --git a/wallet/bip32/src/mnemonic/mod.rs b/wallet/bip32/src/mnemonic/mod.rs index 611d88b050..89262b7e25 100644 --- a/wallet/bip32/src/mnemonic/mod.rs +++ b/wallet/bip32/src/mnemonic/mod.rs @@ -7,7 +7,6 @@ mod bits; mod language; mod phrase; -//#[cfg(feature = "bip39")] pub(crate) mod seed; pub use self::{language::Language, phrase::Mnemonic, phrase::WordCount}; diff --git a/wallet/bip32/src/mnemonic/phrase.rs b/wallet/bip32/src/mnemonic/phrase.rs index 95fd921892..eaa7e7096e 100644 --- a/wallet/bip32/src/mnemonic/phrase.rs +++ b/wallet/bip32/src/mnemonic/phrase.rs @@ -229,8 +229,6 @@ impl Mnemonic { } /// Convert this mnemonic phrase into the BIP39 seed value. - //#[cfg(feature = "bip39")] - //#[cfg_attr(docsrs, doc(cfg(feature = "bip39")))] pub fn to_seed(&self, password: &str) -> Seed { let salt = Zeroizing::new(format!("mnemonic{password}")); let mut seed = [0u8; Seed::SIZE]; diff --git a/wallet/bip32/src/mnemonic/seed.rs b/wallet/bip32/src/mnemonic/seed.rs index a4c4025c64..7fd57f19a8 100644 --- a/wallet/bip32/src/mnemonic/seed.rs +++ b/wallet/bip32/src/mnemonic/seed.rs @@ -4,7 +4,6 @@ use zeroize::Zeroize; /// BIP39 seeds. // TODO(tarcieri): support for 32-byte seeds -#[cfg_attr(docsrs, doc(cfg(feature = "bip39")))] pub struct Seed(pub(crate) [u8; Seed::SIZE]); impl Seed { From 66959d4a72abc150454da12179c0bef553a8bdd7 Mon Sep 17 00:00:00 2001 From: Maxim <59533214+biryukovmaxim@users.noreply.github.com> Date: Mon, 30 Sep 2024 20:43:35 +0400 Subject: [PATCH 10/31] Bump tonic and prost versions, adapt middlewares (#553) * bump tonic, prost versions update middlewares * use unbounded channel * change log level to trace * use bounded channel * reuse counts bytes body to measure bytes body * remove unneeded clone --- Cargo.lock | 245 ++++++++------------ Cargo.toml | 15 +- protocol/p2p/src/core/connection_handler.rs | 9 +- rpc/grpc/client/src/lib.rs | 7 +- rpc/grpc/server/src/connection_handler.rs | 4 +- utils/tower/Cargo.toml | 6 +- utils/tower/src/middleware.rs | 77 ++---- 7 files changed, 129 insertions(+), 234 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1296a8922d..8e9829fea6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -435,18 +435,17 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "axum" -version = "0.6.20" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" +checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" dependencies = [ "async-trait", "axum-core", - "bitflags 1.3.2", "bytes", "futures-util", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.30", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", "itoa", "matchit", "memchr", @@ -455,25 +454,28 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", - "sync_wrapper 0.1.2", - "tower", + "sync_wrapper 1.0.1", + "tower 0.4.13", "tower-layer", "tower-service", ] [[package]] name = "axum-core" -version = "0.3.4" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" dependencies = [ "async-trait", "bytes", "futures-util", - "http 0.2.12", - "http-body 0.4.6", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", "mime", + "pin-project-lite", "rustversion", + "sync_wrapper 0.1.2", "tower-layer", "tower-service", ] @@ -493,12 +495,6 @@ dependencies = [ "rustc-demangle", ] -[[package]] -name = "base64" -version = "0.21.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" - [[package]] name = "base64" version = "0.22.1" @@ -2008,12 +2004,6 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "http-range-header" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" - [[package]] name = "httparse" version = "1.9.4" @@ -2069,6 +2059,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.1", "httparse", + "httpdate", "itoa", "pin-project-lite", "smallvec", @@ -2086,24 +2077,25 @@ dependencies = [ "http 1.1.0", "hyper 1.4.1", "hyper-util", - "rustls 0.23.12", + "rustls", "rustls-pki-types", "tokio", - "tokio-rustls 0.26.0", + "tokio-rustls", "tower-service", - "webpki-roots 0.26.5", + "webpki-roots", ] [[package]] name = "hyper-timeout" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" dependencies = [ - "hyper 0.14.30", + "hyper 1.4.1", + "hyper-util", "pin-project-lite", "tokio", - "tokio-io-timeout", + "tower-service", ] [[package]] @@ -2121,7 +2113,7 @@ dependencies = [ "pin-project-lite", "socket2 0.5.7", "tokio", - "tower", + "tower 0.4.13", "tower-service", "tracing", ] @@ -2755,7 +2747,7 @@ dependencies = [ "prost", "rand 0.8.5", "regex", - "rustls 0.23.12", + "rustls", "thiserror", "tokio", "tokio-stream", @@ -2819,7 +2811,7 @@ dependencies = [ "paste", "prost", "rand 0.8.5", - "rustls 0.23.12", + "rustls", "thiserror", "tokio", "tokio-stream", @@ -3337,13 +3329,15 @@ dependencies = [ name = "kaspa-utils-tower" version = "0.15.2" dependencies = [ + "bytes", "cfg-if 1.0.0", "futures", - "hyper 0.14.30", + "http-body 1.0.1", + "http-body-util", "log", "pin-project-lite", "tokio", - "tower", + "tower 0.5.1", "tower-http", ] @@ -3404,7 +3398,7 @@ dependencies = [ "async-channel 2.3.1", "async-std", "async-trait", - "base64 0.22.1", + "base64", "borsh", "cfb-mode", "cfg-if 1.0.0", @@ -3606,7 +3600,7 @@ dependencies = [ "paste", "rand 0.8.5", "regex", - "rustls 0.23.12", + "rustls", "serde", "serde-wasm-bindgen", "serde_json", @@ -3675,7 +3669,7 @@ dependencies = [ "log", "num_cpus", "paste", - "rustls 0.23.12", + "rustls", "serde", "thiserror", "tokio", @@ -4788,9 +4782,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.12.6" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" +checksum = "3b2ecbe40f08db5c006b5764a2645f7f3f141ce756412ac9e1dd6087e6d32995" dependencies = [ "bytes", "prost-derive", @@ -4798,13 +4792,13 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.12.6" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" +checksum = "f8650aabb6c35b860610e9cff5dc1af886c9e25073b7b1712a68972af4281302" dependencies = [ "bytes", "heck", - "itertools 0.12.1", + "itertools 0.13.0", "log", "multimap", "once_cell", @@ -4819,12 +4813,12 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.12.6" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" +checksum = "acf0c195eebb4af52c752bec4f52f645da98b6e92077a04110c7f349477ae5ac" dependencies = [ "anyhow", - "itertools 0.12.1", + "itertools 0.13.0", "proc-macro2", "quote", "syn 2.0.75", @@ -4832,9 +4826,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.12.6" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9091c90b0a32608e984ff2fa4091273cbdd755d54935c51d520887f4a1dbd5b0" +checksum = "60caa6738c7369b940c3d49246a8d1749323674c65cb13010134f5c9bad5b519" dependencies = [ "prost", ] @@ -4850,7 +4844,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.12", + "rustls", "socket2 0.5.7", "thiserror", "tokio", @@ -4867,7 +4861,7 @@ dependencies = [ "rand 0.8.5", "ring", "rustc-hash 2.0.0", - "rustls 0.23.12", + "rustls", "slab", "thiserror", "tinyvec", @@ -5067,7 +5061,7 @@ version = "0.12.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" dependencies = [ - "base64 0.22.1", + "base64", "bytes", "encoding_rs", "futures-core", @@ -5087,8 +5081,8 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.12", - "rustls-pemfile 2.1.3", + "rustls", + "rustls-pemfile", "rustls-pki-types", "serde", "serde_json", @@ -5096,13 +5090,13 @@ dependencies = [ "sync_wrapper 1.0.1", "system-configuration", "tokio", - "tokio-rustls 0.26.0", + "tokio-rustls", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots 0.26.5", + "webpki-roots", "windows-registry", ] @@ -5227,48 +5221,28 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "rustls" -version = "0.21.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" -dependencies = [ - "log", - "ring", - "rustls-webpki 0.101.7", - "sct", -] - [[package]] name = "rustls" version = "0.23.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" dependencies = [ + "log", "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.102.6", + "rustls-webpki", "subtle", "zeroize", ] -[[package]] -name = "rustls-pemfile" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" -dependencies = [ - "base64 0.21.7", -] - [[package]] name = "rustls-pemfile" version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" dependencies = [ - "base64 0.22.1", + "base64", "rustls-pki-types", ] @@ -5278,16 +5252,6 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" -[[package]] -name = "rustls-webpki" -version = "0.101.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "rustls-webpki" version = "0.102.6" @@ -5341,16 +5305,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sct" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "secp256k1" version = "0.29.0" @@ -5486,7 +5440,7 @@ version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cecfa94848272156ea67b2b1a53f20fc7bc638c4a46d2f8abde08f05f4b857" dependencies = [ - "base64 0.22.1", + "base64", "chrono", "hex", "indexmap 1.9.3", @@ -5985,16 +5939,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "tokio-io-timeout" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" -dependencies = [ - "pin-project-lite", - "tokio", -] - [[package]] name = "tokio-macros" version = "2.4.0" @@ -6006,23 +5950,13 @@ dependencies = [ "syn 2.0.75", ] -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls 0.21.12", - "tokio", -] - [[package]] name = "tokio-rustls" version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.12", + "rustls", "rustls-pki-types", "tokio", ] @@ -6046,12 +5980,12 @@ checksum = "c6989540ced10490aaf14e6bad2e3d33728a2813310a0c71d1574304c49631cd" dependencies = [ "futures-util", "log", - "rustls 0.23.12", + "rustls", "rustls-pki-types", "tokio", - "tokio-rustls 0.26.0", + "tokio-rustls", "tungstenite", - "webpki-roots 0.26.5", + "webpki-roots", ] [[package]] @@ -6114,41 +6048,43 @@ dependencies = [ [[package]] name = "tonic" -version = "0.10.2" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d560933a0de61cf715926b9cac824d4c883c2c43142f787595e48280c40a1d0e" +checksum = "c6f6ba989e4b2c58ae83d862d3a3e27690b6e3ae630d0deb59f3697f32aa88ad" dependencies = [ "async-stream", "async-trait", "axum", - "base64 0.21.7", + "base64", "bytes", "flate2", - "h2 0.3.26", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.30", + "h2 0.4.6", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.4.1", "hyper-timeout", + "hyper-util", "percent-encoding", "pin-project", "prost", - "rustls 0.21.12", - "rustls-pemfile 1.0.4", + "rustls-pemfile", + "socket2 0.5.7", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls", "tokio-stream", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", - "webpki-roots 0.25.4", + "webpki-roots", ] [[package]] name = "tonic-build" -version = "0.10.2" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d021fc044c18582b9a2408cd0dd05b1596e3ecdb5c4df822bb0183545683889" +checksum = "fe4ee8877250136bd7e3d2331632810a4df4ea5e004656990d8d66d2f5ee8a67" dependencies = [ "prettyplease", "proc-macro2", @@ -6177,19 +6113,27 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +dependencies = [ + "tower-layer", + "tower-service", +] + [[package]] name = "tower-http" -version = "0.4.4" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" +checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ "bitflags 2.6.0", "bytes", - "futures-core", - "futures-util", - "http 0.2.12", - "http-body 0.4.6", - "http-range-header", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", "pin-project-lite", "tower-layer", "tower-service", @@ -6213,7 +6157,6 @@ version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ - "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -6264,7 +6207,7 @@ dependencies = [ "httparse", "log", "rand 0.8.5", - "rustls 0.23.12", + "rustls", "rustls-pki-types", "sha1", "thiserror", @@ -6581,12 +6524,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "webpki-roots" -version = "0.25.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" - [[package]] name = "webpki-roots" version = "0.26.5" @@ -7156,7 +7093,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d161c4b844eee479f81306f2526266f9608a663e0a679d9fc0572ee15c144e06" dependencies = [ "async-std", - "base64 0.22.1", + "base64", "cfg-if 1.0.0", "chrome-sys", "faster-hex", diff --git a/Cargo.toml b/Cargo.toml index 37acfb1729..8e6da754a6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -150,6 +150,7 @@ bincode = { version = "1.3.3", default-features = false } blake2b_simd = "1.0.2" borsh = { version = "1.5.1", features = ["derive", "rc"] } bs58 = { version = "0.5.0", features = ["check"], default-features = false } +bytes = "1.7.1" cc = "1.0.83" cfb-mode = "0.8.2" cfg-if = "1.0.0" @@ -188,6 +189,8 @@ hex-literal = "0.4.1" hexplay = "0.3.0" hmac = { version = "0.12.1", default-features = false } home = "0.5.5" +http-body = "1.0.1" +http-body-util = "0.1.2" igd-next = { version = "0.14.2", features = ["aio_tokio"] } indexmap = "2.1.0" intertrait = "0.2.2" @@ -211,8 +214,7 @@ parking_lot = "0.12.1" paste = "1.0.14" pbkdf2 = "0.12.2" portable-atomic = { version = "1.5.1", features = ["float"] } -prost = "0.12.1" -# prost = "0.13.1" +prost = "0.13.2" rand = "0.8.5" rand_chacha = "0.3.1" rand_core = { version = "0.6.4", features = ["std"] } @@ -251,8 +253,8 @@ thiserror = "1.0.50" tokio = { version = "1.33.0", features = ["sync", "rt-multi-thread"] } tokio-stream = "0.1.14" toml = "0.8.8" -tonic = { version = "0.10.2", features = ["tls-webpki-roots", "gzip", "transport"] } -tonic-build = { version = "0.10.2", features = ["prost"] } +tonic = { version = "0.12.2", features = ["tls-webpki-roots", "gzip", "transport"] } +tonic-build = { version = "0.12.2", features = ["prost"] } triggered = "0.1.2" uuid = { version = "1.5.0", features = ["v4", "fast-rng", "serde"] } wasm-bindgen = { version = "0.2.93", features = ["serde-serialize"] } @@ -262,12 +264,11 @@ web-sys = "0.3.70" xxhash-rust = { version = "0.8.7", features = ["xxh3"] } zeroize = { version = "1.6.0", default-features = false, features = ["alloc"] } pin-project-lite = "0.2.13" -tower-http = { version = "0.4.4", features = [ +tower-http = { version = "0.5.2", features = [ "map-response-body", "map-request-body", ] } -tower = "0.4.7" -hyper = "0.14.27" +tower = "0.5.1" chrono = "0.4.31" indexed_db_futures = "0.5.0" # workflow dependencies that are not a part of core libraries diff --git a/protocol/p2p/src/core/connection_handler.rs b/protocol/p2p/src/core/connection_handler.rs index a8ec431e42..54d387043c 100644 --- a/protocol/p2p/src/core/connection_handler.rs +++ b/protocol/p2p/src/core/connection_handler.rs @@ -9,7 +9,7 @@ use kaspa_core::{debug, info}; use kaspa_utils::networking::NetAddress; use kaspa_utils_tower::{ counters::TowerConnectionCounters, - middleware::{measure_request_body_size_layer, CountBytesBody, MapResponseBodyLayer, ServiceBuilder}, + middleware::{BodyExt, CountBytesBody, MapRequestBodyLayer, MapResponseBodyLayer, ServiceBuilder}, }; use std::net::ToSocketAddrs; use std::pin::Pin; @@ -20,7 +20,6 @@ use tokio::sync::mpsc::{channel as mpsc_channel, Sender as MpscSender}; use tokio::sync::oneshot::{channel as oneshot_channel, Sender as OneshotSender}; use tokio_stream::wrappers::ReceiverStream; use tokio_stream::StreamExt; -use tonic::codegen::Body; use tonic::transport::{Error as TonicError, Server as TonicServer}; use tonic::{Request, Response, Status as TonicStatus, Streaming}; @@ -80,7 +79,7 @@ impl ConnectionHandler { // TODO: check whether we should set tcp_keepalive let serve_result = TonicServer::builder() - .layer(measure_request_body_size_layer(bytes_rx, |b| b)) + .layer(MapRequestBodyLayer::new(move |body| CountBytesBody::new(body, bytes_rx.clone()).boxed_unsync())) .layer(MapResponseBodyLayer::new(move |body| CountBytesBody::new(body, bytes_tx.clone()))) .add_service(proto_server) .serve_with_shutdown(serve_address.into(), termination_receiver.map(drop)) @@ -110,9 +109,7 @@ impl ConnectionHandler { let channel = ServiceBuilder::new() .layer(MapResponseBodyLayer::new(move |body| CountBytesBody::new(body, self.counters.bytes_rx.clone()))) - .layer(measure_request_body_size_layer(self.counters.bytes_tx.clone(), |body| { - body.map_err(|e| tonic::Status::from_error(Box::new(e))).boxed_unsync() - })) + .layer(MapRequestBodyLayer::new(move |body| CountBytesBody::new(body, self.counters.bytes_tx.clone()).boxed_unsync())) .service(channel); let mut client = ProtoP2pClient::new(channel) diff --git a/rpc/grpc/client/src/lib.rs b/rpc/grpc/client/src/lib.rs index 00dadee232..b7e53bb5e1 100644 --- a/rpc/grpc/client/src/lib.rs +++ b/rpc/grpc/client/src/lib.rs @@ -38,7 +38,7 @@ use kaspa_rpc_core::{ use kaspa_utils::{channel::Channel, triggers::DuplexTrigger}; use kaspa_utils_tower::{ counters::TowerConnectionCounters, - middleware::{measure_request_body_size_layer, CountBytesBody, MapResponseBodyLayer, ServiceBuilder}, + middleware::{BodyExt, CountBytesBody, MapRequestBodyLayer, MapResponseBodyLayer, ServiceBuilder}, }; use regex::Regex; use std::{ @@ -50,7 +50,6 @@ use std::{ }; use tokio::sync::Mutex; use tonic::codec::CompressionEncoding; -use tonic::codegen::Body; use tonic::Streaming; mod connection_event; @@ -544,9 +543,7 @@ impl Inner { let bytes_tx = &counters.bytes_tx; let channel = ServiceBuilder::new() .layer(MapResponseBodyLayer::new(move |body| CountBytesBody::new(body, bytes_rx.clone()))) - .layer(measure_request_body_size_layer(bytes_tx.clone(), |body| { - body.map_err(|e| tonic::Status::from_error(Box::new(e))).boxed_unsync() - })) + .layer(MapRequestBodyLayer::new(move |body| CountBytesBody::new(body, bytes_tx.clone()).boxed_unsync())) .service(channel); // Build the gRPC client with an interceptor setting the request timeout diff --git a/rpc/grpc/server/src/connection_handler.rs b/rpc/grpc/server/src/connection_handler.rs index d581ea441f..fd13cf9bb0 100644 --- a/rpc/grpc/server/src/connection_handler.rs +++ b/rpc/grpc/server/src/connection_handler.rs @@ -29,7 +29,7 @@ use kaspa_rpc_core::{ use kaspa_utils::networking::NetAddress; use kaspa_utils_tower::{ counters::TowerConnectionCounters, - middleware::{measure_request_body_size_layer, CountBytesBody, MapResponseBodyLayer}, + middleware::{BodyExt, CountBytesBody, MapRequestBodyLayer, MapResponseBodyLayer}, }; use std::fmt::Debug; use std::{ @@ -144,7 +144,7 @@ impl ConnectionHandler { let serve_result = TonicServer::builder() // .http2_keepalive_interval(Some(GRPC_KEEP_ALIVE_PING_INTERVAL)) // .http2_keepalive_timeout(Some(GRPC_KEEP_ALIVE_PING_TIMEOUT)) - .layer(measure_request_body_size_layer(bytes_rx, |b| b)) + .layer(MapRequestBodyLayer::new(move |body| CountBytesBody::new(body, bytes_rx.clone()).boxed_unsync())) .layer(MapResponseBodyLayer::new(move |body| CountBytesBody::new(body, bytes_tx.clone()))) .add_service(protowire_server) .serve_with_shutdown( diff --git a/utils/tower/Cargo.toml b/utils/tower/Cargo.toml index 2a2f5f7962..010f8843ba 100644 --- a/utils/tower/Cargo.toml +++ b/utils/tower/Cargo.toml @@ -14,9 +14,11 @@ cfg-if.workspace = true log.workspace = true [target.'cfg(not(target_arch = "wasm32"))'.dependencies] +bytes.workspace = true futures.workspace = true -hyper.workspace = true +http-body.workspace = true +http-body-util.workspace = true pin-project-lite.workspace = true tokio.workspace = true tower-http.workspace = true -tower.workspace = true \ No newline at end of file +tower.workspace = true diff --git a/utils/tower/src/middleware.rs b/utils/tower/src/middleware.rs index 727d8ca47d..8d0fa77c38 100644 --- a/utils/tower/src/middleware.rs +++ b/utils/tower/src/middleware.rs @@ -1,9 +1,6 @@ -use futures::ready; -use hyper::{ - body::{Bytes, HttpBody, SizeHint}, - HeaderMap, -}; -use log::*; +use bytes::Bytes; +use http_body::{Body, Frame, SizeHint}; +use log::trace; use pin_project_lite::pin_project; use std::{ pin::Pin, @@ -11,11 +8,12 @@ use std::{ atomic::{AtomicUsize, Ordering}, Arc, }, - task::{Context, Poll}, + task::{ready, Context, Poll}, }; + +pub use http_body_util::BodyExt; pub use tower::ServiceBuilder; -pub use tower_http::map_request_body::MapRequestBodyLayer; -pub use tower_http::map_response_body::MapResponseBodyLayer; +pub use tower_http::{map_request_body::MapRequestBodyLayer, map_response_body::MapResponseBodyLayer}; pin_project! { pub struct CountBytesBody { @@ -31,32 +29,29 @@ impl CountBytesBody { } } -impl HttpBody for CountBytesBody +impl Body for CountBytesBody where - B: HttpBody + Default, + B: Body + Default, { type Data = B::Data; type Error = B::Error; - fn poll_data(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>> { + fn poll_frame(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll, Self::Error>>> { let this = self.project(); - let counter: Arc = this.counter.clone(); - match ready!(this.inner.poll_data(cx)) { - Some(Ok(chunk)) => { - debug!("[SIZE MW] response body chunk size = {}", chunk.len()); - let _previous = counter.fetch_add(chunk.len(), Ordering::Relaxed); - debug!("[SIZE MW] total count: {}", _previous); + match ready!(this.inner.poll_frame(cx)) { + Some(Ok(frame)) => { + if let Some(chunk) = frame.data_ref() { + trace!("[SIZE MW] body chunk size = {}", chunk.len()); + let _previous = this.counter.fetch_add(chunk.len(), Ordering::Relaxed); + trace!("[SIZE MW] total count: {}", _previous); + } - Poll::Ready(Some(Ok(chunk))) + Poll::Ready(Some(Ok(frame))) } x => Poll::Ready(x), } } - fn poll_trailers(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll, Self::Error>> { - self.project().inner.poll_trailers(cx) - } - fn is_end_stream(&self) -> bool { self.inner.is_end_stream() } @@ -68,43 +63,9 @@ where impl Default for CountBytesBody where - B: HttpBody + Default, + B: Body + Default, { fn default() -> Self { Self { inner: Default::default(), counter: Default::default() } } } - -pub fn measure_request_body_size_layer( - bytes_sent_counter: Arc, - f: F, -) -> MapRequestBodyLayer B2 + Clone> -where - B1: HttpBody + Unpin + Send + 'static, - ::Error: Send, - F: Fn(hyper::body::Body) -> B2 + Clone, -{ - MapRequestBodyLayer::new(move |mut body: B1| { - let (mut tx, new_body) = hyper::Body::channel(); - let bytes_sent_counter = bytes_sent_counter.clone(); - tokio::spawn(async move { - while let Some(Ok(chunk)) = body.data().await { - debug!("[SIZE MW] request body chunk size = {}", chunk.len()); - let _previous = bytes_sent_counter.fetch_add(chunk.len(), Ordering::Relaxed); - debug!("[SIZE MW] total count: {}", _previous); - if let Err(_err) = tx.send_data(chunk).await { - // error can occurs only if the channel is already closed - debug!("[SIZE MW] error sending data: {}", _err) - } - } - - if let Ok(Some(trailers)) = body.trailers().await { - if let Err(_err) = tx.send_trailers(trailers).await { - // error can occurs only if the channel is already closed - debug!("[SIZE MW] error sending trailers: {}", _err) - } - } - }); - f(new_body) - }) -} From 2b0f3ab57c09d58082560ba81447c52630506b51 Mon Sep 17 00:00:00 2001 From: George Bogodukhov Date: Thu, 3 Oct 2024 16:36:45 +1000 Subject: [PATCH 11/31] Fix README.md layout and add linting section (#488) --- README.md | 88 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 46 insertions(+), 42 deletions(-) diff --git a/README.md b/README.md index ada38c55d7..8749c49e90 100644 --- a/README.md +++ b/README.md @@ -12,15 +12,15 @@ The default branch of this repository is `master` and new contributions are cons ## Installation
Building on Linux - + 1. Install general prerequisites ```bash - sudo apt install curl git build-essential libssl-dev pkg-config + sudo apt install curl git build-essential libssl-dev pkg-config ``` 2. Install Protobuf (required for gRPC) - + ```bash sudo apt install protobuf-compiler libprotobuf-dev #Required for gRPC ``` @@ -36,8 +36,8 @@ The default branch of this repository is `master` and new contributions are cons llvm python3-clang ``` 3. Install the [rust toolchain](https://rustup.rs/) - - If you already have rust installed, update it by running: `rustup update` + + If you already have rust installed, update it by running: `rustup update` 4. Install wasm-pack ```bash cargo install wasm-pack @@ -45,7 +45,7 @@ The default branch of this repository is `master` and new contributions are cons 4. Install wasm32 target ```bash rustup target add wasm32-unknown-unknown - ``` + ``` 5. Clone the repo ```bash git clone https://github.com/kaspanet/rusty-kaspa @@ -55,7 +55,7 @@ The default branch of this repository is `master` and new contributions are cons -
+
Building on Windows @@ -63,18 +63,18 @@ The default branch of this repository is `master` and new contributions are cons 2. Install [Protocol Buffers](https://github.com/protocolbuffers/protobuf/releases/download/v21.10/protoc-21.10-win64.zip) and add the `bin` directory to your `Path` - + 3. Install [LLVM-15.0.6-win64.exe](https://github.com/llvm/llvm-project/releases/download/llvmorg-15.0.6/LLVM-15.0.6-win64.exe) Add the `bin` directory of the LLVM installation (`C:\Program Files\LLVM\bin`) to PATH - + set `LIBCLANG_PATH` environment variable to point to the `bin` directory as well **IMPORTANT:** Due to C++ dependency configuration issues, LLVM `AR` installation on Windows may not function correctly when switching between WASM and native C++ code compilation (native `RocksDB+secp256k1` vs WASM32 builds of `secp256k1`). Unfortunately, manually setting `AR` environment variable also confuses C++ build toolchain (it should not be set for native but should be set for WASM32 targets). Currently, the best way to address this, is as follows: after installing LLVM on Windows, go to the target `bin` installation directory and copy or rename `LLVM_AR.exe` to `AR.exe`. - + 4. Install the [rust toolchain](https://rustup.rs/) - - If you already have rust installed, update it by running: `rustup update` + + If you already have rust installed, update it by running: `rustup update` 5. Install wasm-pack ```bash cargo install wasm-pack @@ -82,16 +82,16 @@ The default branch of this repository is `master` and new contributions are cons 6. Install wasm32 target ```bash rustup target add wasm32-unknown-unknown - ``` + ``` 7. Clone the repo ```bash git clone https://github.com/kaspanet/rusty-kaspa cd rusty-kaspa ``` -
+
-
+
Building on Mac OS @@ -99,8 +99,8 @@ The default branch of this repository is `master` and new contributions are cons ```bash brew install protobuf ``` - 2. Install llvm. - + 2. Install llvm. + The default XCode installation of `llvm` does not support WASM build targets. To build WASM on MacOS you need to install `llvm` from homebrew (at the time of writing, the llvm version for MacOS is 16.0.1). ```bash @@ -133,8 +133,8 @@ To build WASM on MacOS you need to install `llvm` from homebrew (at the time of source ~/.zshrc ``` 3. Install the [rust toolchain](https://rustup.rs/) - - If you already have rust installed, update it by running: `rustup update` + + If you already have rust installed, update it by running: `rustup update` 4. Install wasm-pack ```bash cargo install wasm-pack @@ -142,14 +142,14 @@ To build WASM on MacOS you need to install `llvm` from homebrew (at the time of 4. Install wasm32 target ```bash rustup target add wasm32-unknown-unknown - ``` + ``` 5. Clone the repo ```bash git clone https://github.com/kaspanet/rusty-kaspa cd rusty-kaspa ``` -
+
@@ -235,7 +235,7 @@ cargo run --release --bin kaspad -- --testnet ``` **Testnet 11** - + For participation in the 10BPS test network (TN11), see the following detailed [guide](docs/testnet11.md).
@@ -249,7 +249,7 @@ cargo run --release --bin kaspad -- --configfile /path/to/configfile.toml # or cargo run --release --bin kaspad -- -C /path/to/configfile.toml ``` - - The config file should be a list of \ = \ separated by newlines. + - The config file should be a list of \ = \ separated by newlines. - Whitespace around the `=` is fine, `arg=value` and `arg = value` are both parsed correctly. - Values with special characters like `.` or `=` will require quoting the value i.e \ = "\". - Arguments with multiple values should be surrounded with brackets like `addpeer = ["10.0.0.1", "1.2.3.4"]`. @@ -297,17 +297,17 @@ wRPC **Sidenote:** Rusty Kaspa integrates an optional wRPC - subsystem. wRPC is a high-performance, platform-neutral, Rust-centric, WebSocket-framed RPC + subsystem. wRPC is a high-performance, platform-neutral, Rust-centric, WebSocket-framed RPC implementation that can use [Borsh](https://borsh.io/) and JSON protocol encoding. - JSON protocol messaging - is similar to JSON-RPC 1.0, but differs from the specification due to server-side + JSON protocol messaging + is similar to JSON-RPC 1.0, but differs from the specification due to server-side notifications. [Borsh](https://borsh.io/) encoding is meant for inter-process communication. When using [Borsh](https://borsh.io/) - both client and server should be built from the same codebase. + both client and server should be built from the same codebase. - JSON protocol is based on + JSON protocol is based on Kaspa data structures and is data-structure-version agnostic. You can connect to the JSON endpoint using any WebSocket library. Built-in RPC clients for JavaScript and TypeScript capable of running in web browsers and Node.js are available as a part of @@ -316,27 +316,23 @@ wRPC
- -
- - ## Benchmarking & Testing -
+
Simulation framework (Simpa) Logging in `kaspad` and `simpa` can be [filtered](https://docs.rs/env_logger/0.10.0/env_logger/#filtering-results) by either: -The current codebase supports a full in-process network simulation, building an actual DAG over virtual time with virtual delay and benchmarking validation time (following the simulation generation). +The current codebase supports a full in-process network simulation, building an actual DAG over virtual time with virtual delay and benchmarking validation time (following the simulation generation). To see the available commands -```bash +```bash cargo run --release --bin simpa -- --help -``` +``` -The following command will run a simulation to produce 1000 blocks with communication delay of 2 seconds and 8 BPS (blocks per second) while attempting to fill each block with up to 200 transactions. +The following command will run a simulation to produce 1000 blocks with communication delay of 2 seconds and 8 BPS (blocks per second) while attempting to fill each block with up to 200 transactions. ```bash cargo run --release --bin simpa -- -t=200 -d=2 -b=8 -n=1000 @@ -347,7 +343,7 @@ cargo run --release --bin simpa -- -t=200 -d=2 -b=8 -n=1000 -
+
Heap Profiling @@ -362,7 +358,7 @@ It will produce `{bin-name}-heap.json` file in the root of the workdir, that can
-
+
Tests @@ -384,12 +380,21 @@ cd rusty-kaspa cargo nextest run --release ``` +
+
+ +Lints + +```bash +cd rusty-kaspa +./check +```
-
+
Benchmarks @@ -400,7 +405,7 @@ cargo bench
-
+
Logging @@ -415,4 +420,3 @@ Logging in `kaspad` and `simpa` can be [filtered](https://docs.rs/env_logger/0.1 In this command we set the `loglevel` to `INFO`.
- From b37f0305401b16e80fea74e6d3b1a2cbb7ac5c44 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Sun, 6 Oct 2024 14:55:24 +0300 Subject: [PATCH 12/31] Bump tonic version (#579) --- Cargo.lock | 13 +++++++------ Cargo.toml | 4 ++-- protocol/p2p/build.rs | 2 +- rpc/grpc/core/build.rs | 2 +- 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8e9829fea6..b5e4bc913f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5963,9 +5963,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", @@ -6048,9 +6048,9 @@ dependencies = [ [[package]] name = "tonic" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6f6ba989e4b2c58ae83d862d3a3e27690b6e3ae630d0deb59f3697f32aa88ad" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" dependencies = [ "async-stream", "async-trait", @@ -6082,13 +6082,14 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe4ee8877250136bd7e3d2331632810a4df4ea5e004656990d8d66d2f5ee8a67" +checksum = "9557ce109ea773b399c9b9e5dca39294110b74f1f342cb347a80d1fce8c26a11" dependencies = [ "prettyplease", "proc-macro2", "prost-build", + "prost-types", "quote", "syn 2.0.75", ] diff --git a/Cargo.toml b/Cargo.toml index 8e6da754a6..b99276945b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -253,8 +253,8 @@ thiserror = "1.0.50" tokio = { version = "1.33.0", features = ["sync", "rt-multi-thread"] } tokio-stream = "0.1.14" toml = "0.8.8" -tonic = { version = "0.12.2", features = ["tls-webpki-roots", "gzip", "transport"] } -tonic-build = { version = "0.12.2", features = ["prost"] } +tonic = { version = "0.12.3", features = ["tls-webpki-roots", "gzip", "transport"] } +tonic-build = { version = "0.12.3", features = ["prost"] } triggered = "0.1.2" uuid = { version = "1.5.0", features = ["v4", "fast-rng", "serde"] } wasm-bindgen = { version = "0.2.93", features = ["serde-serialize"] } diff --git a/protocol/p2p/build.rs b/protocol/p2p/build.rs index b41fe87f53..b4aea69394 100644 --- a/protocol/p2p/build.rs +++ b/protocol/p2p/build.rs @@ -5,7 +5,7 @@ fn main() { tonic_build::configure() .build_server(true) .build_client(true) - .compile(&proto_files[0..1], dirs) + .compile_protos(&proto_files[0..1], dirs) .unwrap_or_else(|e| panic!("protobuf compilation failed, error: {e}")); // recompile protobufs only if any of the proto files changes. for file in proto_files { diff --git a/rpc/grpc/core/build.rs b/rpc/grpc/core/build.rs index fdf54486dd..b3a0614eae 100644 --- a/rpc/grpc/core/build.rs +++ b/rpc/grpc/core/build.rs @@ -10,7 +10,7 @@ fn main() { // uncomment this line and reflect the change in src/lib.rs //.out_dir("./src") - .compile(&protowire_files[0..1], dirs) + .compile_protos(&protowire_files[0..1], dirs) .unwrap_or_else(|e| panic!("protobuf compile error: {e}")); // recompile protobufs only if any of the proto files changes. From 1378e7b6015fcbcc098ec03103f6ce02273adefc Mon Sep 17 00:00:00 2001 From: D-Stacks <78099568+D-Stacks@users.noreply.github.com> Date: Mon, 7 Oct 2024 14:41:32 +0200 Subject: [PATCH 13/31] replace statrs and statest deps & upgrade some deps. (#425) * replace statrs and statest deps. * remove todo in toml.cargo and fmt & lints. * do a run of `cargo audit fix` for some miscellaneous reports. * use maintained alt ks crate. * add cargo.lock. * update * use new command * newline * refresh cargo lock with a few more version updates * fix minor readme glitches --------- Co-authored-by: Michael Sutton --- Cargo.lock | 1167 +++++++++++--------------- Cargo.toml | 3 +- README.md | 5 +- components/addressmanager/Cargo.toml | 3 +- components/addressmanager/src/lib.rs | 17 +- 5 files changed, 488 insertions(+), 707 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b5e4bc913f..3449a43dae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11,24 +11,18 @@ dependencies = [ "macroific", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] name = "addr2line" -version = "0.22.0" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - [[package]] name = "adler2" version = "2.0.0" @@ -42,7 +36,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ "crypto-common", - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -63,7 +57,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if 1.0.0", - "getrandom 0.2.15", + "getrandom", "once_cell", "version_check", "zerocopy", @@ -78,17 +72,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "alga" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f823d037a7ec6ea2197046bafd4ae150e6bc36f9ca347404f46a46823fa84f2" -dependencies = [ - "approx", - "num-complex 0.2.4", - "num-traits", -] - [[package]] name = "android-tzdata" version = "0.1.1" @@ -170,18 +153,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.86" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" - -[[package]] -name = "approx" -version = "0.3.2" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0e60b75072ecd4168020818c0107f2857bb6c4e64252d8d3983f6263b40a5c3" -dependencies = [ - "num-traits", -] +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "arc-swap" @@ -203,9 +177,9 @@ dependencies = [ [[package]] name = "arrayref" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "arrayvec" @@ -248,14 +222,14 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7ebdfa2ebdab6b1760375fa7d6f382b9f486eac35fc994625a00e89280bdbb7" +checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" dependencies = [ "async-task", "concurrent-queue", - "fastrand 2.1.0", - "futures-lite 2.3.0", + "fastrand", + "futures-lite", "slab", ] @@ -267,61 +241,32 @@ checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ "async-channel 2.3.1", "async-executor", - "async-io 2.3.4", - "async-lock 3.4.0", + "async-io", + "async-lock", "blocking", - "futures-lite 2.3.0", + "futures-lite", "once_cell", ] -[[package]] -name = "async-io" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" -dependencies = [ - "async-lock 2.8.0", - "autocfg", - "cfg-if 1.0.0", - "concurrent-queue", - "futures-lite 1.13.0", - "log", - "parking", - "polling 2.8.0", - "rustix 0.37.27", - "slab", - "socket2 0.4.10", - "waker-fn", -] - [[package]] name = "async-io" version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" dependencies = [ - "async-lock 3.4.0", + "async-lock", "cfg-if 1.0.0", "concurrent-queue", "futures-io", - "futures-lite 2.3.0", + "futures-lite", "parking", - "polling 3.7.3", - "rustix 0.38.34", + "polling", + "rustix", "slab", "tracing", "windows-sys 0.59.0", ] -[[package]] -name = "async-lock" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" -dependencies = [ - "event-listener 2.5.3", -] - [[package]] name = "async-lock" version = "3.4.0" @@ -335,20 +280,20 @@ dependencies = [ [[package]] name = "async-std" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +checksum = "c634475f29802fde2b8f0b505b1bd00dfe4df7d4a000f0b36f7671197d5c3615" dependencies = [ "async-attributes", "async-channel 1.9.0", "async-global-executor", - "async-io 1.13.0", - "async-lock 2.8.0", + "async-io", + "async-lock", "crossbeam-utils", "futures-channel", "futures-core", "futures-io", - "futures-lite 1.13.0", + "futures-lite", "gloo-timers", "kv-log-macro", "log", @@ -362,9 +307,9 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" dependencies = [ "async-stream-impl", "futures-core", @@ -373,13 +318,13 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -390,13 +335,13 @@ checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.81" +version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -429,15 +374,15 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "axum" -version = "0.7.5" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" +checksum = "504e3947307ac8326a5437504c517c4b56716c9d98fac0028c2acc7ca47d70ae" dependencies = [ "async-trait", "axum-core", @@ -455,16 +400,16 @@ dependencies = [ "rustversion", "serde", "sync_wrapper 1.0.1", - "tower 0.4.13", + "tower 0.5.1", "tower-layer", "tower-service", ] [[package]] name = "axum-core" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" dependencies = [ "async-trait", "bytes", @@ -475,24 +420,24 @@ dependencies = [ "mime", "pin-project-lite", "rustversion", - "sync_wrapper 0.1.2", + "sync_wrapper 1.0.1", "tower-layer", "tower-service", ] [[package]] name = "backtrace" -version = "0.3.73" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if 1.0.0", "libc", - "miniz_oxide 0.7.4", + "miniz_oxide", "object", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] @@ -556,7 +501,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -597,7 +542,7 @@ version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -609,7 +554,7 @@ dependencies = [ "async-channel 2.3.1", "async-task", "futures-io", - "futures-lite 2.3.0", + "futures-lite", "piper", ] @@ -633,7 +578,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", "syn_derive", ] @@ -661,9 +606,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" [[package]] name = "bzip2-sys" @@ -716,9 +661,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.1.13" +version = "1.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72db2f7947ecee9b03b510377e8bb9077afa27176fdbff55c51027e976fdcc48" +checksum = "2e80e3b6a3ab07840e1cae9b0666a63970dc28e8ed5ffbcdacbfc760c281bfc1" dependencies = [ "jobserver", "libc", @@ -884,9 +829,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.16" +version = "4.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed6719fffa43d0d87e5fd8caeab59be1554fb028cd30edc88fc4369b17971019" +checksum = "7be5744db7978a28d9df86a214130d106a89ce49644cbc4e3f0c22c3fba30615" dependencies = [ "clap_builder", "clap_derive", @@ -894,9 +839,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.15" +version = "4.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216aec2b177652e3846684cbfe25c9964d18ec45234f0f5da5157b207ed1aab6" +checksum = "a5fbc17d3ef8278f55b282b2a2e75ae6f6c7d4bb70ed3d0382375104bfafdb4b" dependencies = [ "anstream", "anstyle", @@ -906,14 +851,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.13" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -962,9 +907,9 @@ dependencies = [ [[package]] name = "constant_time_eq" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" [[package]] name = "convert_case" @@ -1005,9 +950,9 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51e852e6dc9a5bed1fae92dd2375037bf2b768725bf3be87811edee3249d09ad" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" dependencies = [ "libc", ] @@ -1030,7 +975,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.5.16", + "clap 4.5.19", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -1126,8 +1071,8 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "generic-array 0.14.7", - "rand_core 0.6.4", + "generic-array", + "rand_core", "typenum", ] @@ -1155,7 +1100,7 @@ dependencies = [ "aead", "chacha20", "cipher", - "generic-array 0.14.7", + "generic-array", "poly1305", "salsa20", "subtle", @@ -1195,7 +1140,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -1219,7 +1164,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -1230,14 +1175,14 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] name = "dashmap" -version = "6.0.1" +version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "804c8821570c3f8b70230c2ba75ffa5c0f9a4189b9a432b6656c536712acae28" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" dependencies = [ "cfg-if 1.0.0", "crossbeam-utils", @@ -1262,7 +1207,7 @@ dependencies = [ "macroific", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -1288,33 +1233,33 @@ dependencies = [ [[package]] name = "derive_builder" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0350b5cb0331628a5916d6c5c0b72e97393b8b6b03b47a9284f4e7f5a405ffd7" +checksum = "cd33f37ee6a119146a1781d3356a7c26028f83d779b2e04ecd45fdc75c76877b" dependencies = [ "derive_builder_macro", ] [[package]] name = "derive_builder_core" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d48cda787f839151732d396ac69e3473923d54312c070ee21e9effcaa8ca0b1d" +checksum = "7431fa049613920234f22c47fdc33e6cf3ee83067091ea4277a3f8c4587aae38" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] name = "derive_builder_macro" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "206868b8242f27cecce124c19fd88157fbd0dd334df2587f36417bafbc85097b" +checksum = "4abae7035bf79b9877b779505d8cf3749285b80c43941eda66604841889451dc" dependencies = [ "derive_builder_core", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -1327,7 +1272,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -1390,6 +1335,12 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "doc-comment" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" + [[package]] name = "downcast" version = "0.11.0" @@ -1449,7 +1400,7 @@ checksum = "ba7795da175654fe16979af73f81f26a8ea27638d8d9823d317016888a63dc4c" dependencies = [ "num-traits", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -1526,7 +1477,7 @@ dependencies = [ "macroific", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -1540,18 +1491,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] - -[[package]] -name = "fastrand" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" [[package]] name = "fiat-crypto" @@ -1561,9 +1503,9 @@ checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "filetime" -version = "0.2.24" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf401df4a4e3872c4fe8151134cf483738e74b67fc934d6532c882b3d24a4550" +checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586" dependencies = [ "cfg-if 1.0.0", "libc", @@ -1579,21 +1521,21 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "fixedstr" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54e049f021908beff8f8c430a99f5c136d3be69f1667346e581f446b173bc012" +checksum = "60aba7afd9b1b9e1950c2b7e8bcac3cc44a273c62a02717dedca2d0a1aee694d" dependencies = [ "serde", ] [[package]] name = "flate2" -version = "1.0.32" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c0596c1eac1f9e04ed902702e9878208b336edc9d6fddc8a48387349bab3666" +checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" dependencies = [ "crc32fast", - "miniz_oxide 0.8.0", + "miniz_oxide", ] [[package]] @@ -1613,9 +1555,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -1628,9 +1570,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -1638,15 +1580,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -1655,24 +1597,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" - -[[package]] -name = "futures-lite" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" -dependencies = [ - "fastrand 1.9.0", - "futures-core", - "futures-io", - "memchr", - "parking", - "pin-project-lite", - "waker-fn", -] +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" @@ -1680,7 +1607,7 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ - "fastrand 2.1.0", + "fastrand", "futures-core", "futures-io", "parking", @@ -1689,32 +1616,32 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -1728,15 +1655,6 @@ dependencies = [ "slab", ] -[[package]] -name = "generic-array" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f797e67af32588215eaaab8327027ee8e71b9dd0b2b26996aedf20c030fce309" -dependencies = [ - "typenum", -] - [[package]] name = "generic-array" version = "0.14.7" @@ -1748,17 +1666,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - [[package]] name = "getrandom" version = "0.2.15" @@ -1768,15 +1675,15 @@ dependencies = [ "cfg-if 1.0.0", "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "wasm-bindgen", ] [[package]] name = "gimli" -version = "0.29.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "glob" @@ -1786,9 +1693,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "gloo-timers" -version = "0.2.6" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" dependencies = [ "futures-channel", "futures-core", @@ -1808,7 +1715,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.4.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -1827,7 +1734,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.4.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -1859,6 +1766,15 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash", +] + [[package]] name = "hashbrown" version = "0.14.5" @@ -1868,6 +1784,12 @@ dependencies = [ "ahash", ] +[[package]] +name = "hashbrown" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" + [[package]] name = "heapless" version = "0.8.0" @@ -2006,9 +1928,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -2039,7 +1961,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.7", + "socket2", "tokio", "tower-service", "tracing", @@ -2069,9 +1991,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" +checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", "http 1.1.0", @@ -2100,9 +2022,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.7" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" +checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" dependencies = [ "bytes", "futures-channel", @@ -2111,18 +2033,17 @@ dependencies = [ "http-body 1.0.1", "hyper 1.4.1", "pin-project-lite", - "socket2 0.5.7", + "socket2", "tokio", - "tower 0.4.13", "tower-service", "tracing", ] [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -2170,7 +2091,7 @@ dependencies = [ "http 0.2.12", "hyper 0.14.30", "log", - "rand 0.8.5", + "rand", "tokio", "url", "xmltree", @@ -2206,12 +2127,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.4.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ead53efc7ea8ed3cfb0c79fc8023fbb782a5432b52830b6518941cebe6505c" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.0", "serde", ] @@ -2221,7 +2142,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -2259,22 +2180,11 @@ dependencies = [ "uuid 0.8.2", ] -[[package]] -name = "io-lifetimes" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" -dependencies = [ - "hermit-abi 0.3.9", - "libc", - "windows-sys 0.48.0", -] - [[package]] name = "ipnet" -version = "2.9.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" [[package]] name = "is-terminal" @@ -2384,11 +2294,10 @@ dependencies = [ "local-ip-address", "log", "parking_lot", - "rand 0.8.5", + "rand", "rocksdb", + "rv", "serde", - "statest", - "statrs", "thiserror", "tokio", ] @@ -2407,15 +2316,15 @@ dependencies = [ "borsh", "bs58", "faster-hex", - "getrandom 0.2.15", + "getrandom", "hmac", "js-sys", "kaspa-consensus-core", "kaspa-utils", "once_cell", "pbkdf2", - "rand 0.8.5", - "rand_core 0.6.4", + "rand", + "rand_core", "ripemd", "secp256k1", "serde", @@ -2487,7 +2396,7 @@ dependencies = [ "kaspa-utils", "log", "parking_lot", - "rand 0.8.5", + "rand", "tokio", ] @@ -2503,7 +2412,7 @@ dependencies = [ "faster-hex", "flate2", "futures-util", - "indexmap 2.4.0", + "indexmap 2.6.0", "itertools 0.13.0", "kaspa-consensus-core", "kaspa-consensus-notify", @@ -2522,8 +2431,8 @@ dependencies = [ "log", "once_cell", "parking_lot", - "rand 0.8.5", - "rand_distr 0.4.3", + "rand", + "rand_distr", "rayon", "rocksdb", "secp256k1", @@ -2551,7 +2460,7 @@ dependencies = [ "kaspa-txscript", "kaspa-utils", "kaspa-wasm-core", - "rand 0.8.5", + "rand", "secp256k1", "serde", "serde-wasm-bindgen", @@ -2573,7 +2482,7 @@ dependencies = [ "criterion", "faster-hex", "futures-util", - "getrandom 0.2.15", + "getrandom", "itertools 0.13.0", "js-sys", "kaspa-addresses", @@ -2584,7 +2493,7 @@ dependencies = [ "kaspa-muhash", "kaspa-txscript-errors", "kaspa-utils", - "rand 0.8.5", + "rand", "secp256k1", "serde", "serde-wasm-bindgen", @@ -2632,7 +2541,7 @@ dependencies = [ "kaspa-hashes", "kaspa-txscript", "kaspa-utils", - "rand 0.8.5", + "rand", "secp256k1", "serde", "serde-wasm-bindgen", @@ -2657,7 +2566,7 @@ dependencies = [ "kaspa-utils", "log", "parking_lot", - "rand 0.8.5", + "rand", "tokio", ] @@ -2708,14 +2617,14 @@ dependencies = [ "bincode", "enum-primitive-derive", "faster-hex", - "indexmap 2.4.0", + "indexmap 2.6.0", "itertools 0.13.0", "kaspa-hashes", "kaspa-utils", "num-traits", "num_cpus", "parking_lot", - "rand 0.8.5", + "rand", "rocksdb", "serde", "smallvec", @@ -2745,7 +2654,7 @@ dependencies = [ "parking_lot", "paste", "prost", - "rand 0.8.5", + "rand", "regex", "rustls", "thiserror", @@ -2773,7 +2682,7 @@ dependencies = [ "log", "paste", "prost", - "rand 0.8.5", + "rand", "regex", "thiserror", "tokio", @@ -2810,7 +2719,7 @@ dependencies = [ "parking_lot", "paste", "prost", - "rand 0.8.5", + "rand", "rustls", "thiserror", "tokio", @@ -2833,7 +2742,7 @@ dependencies = [ "kaspa-utils", "keccak", "once_cell", - "rand 0.8.5", + "rand", "serde", "sha2", "sha3", @@ -2882,7 +2791,7 @@ dependencies = [ "log", "parking_lot", "paste", - "rand 0.8.5", + "rand", "thiserror", "tokio", "triggered", @@ -2899,7 +2808,7 @@ dependencies = [ "kaspa-utils", "malachite-base", "malachite-nz", - "rand_chacha 0.3.1", + "rand_chacha", "serde", "serde-wasm-bindgen", "thiserror", @@ -2950,7 +2859,7 @@ dependencies = [ "kaspa-utils", "log", "parking_lot", - "rand 0.8.5", + "rand", "secp256k1", "serde", "smallvec", @@ -2974,8 +2883,8 @@ dependencies = [ "criterion", "kaspa-hashes", "kaspa-math", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand", + "rand_chacha", "rayon", "serde", ] @@ -2991,7 +2900,7 @@ dependencies = [ "derive_more", "futures", "futures-util", - "indexmap 2.4.0", + "indexmap 2.6.0", "itertools 0.13.0", "kaspa-addresses", "kaspa-alloc", @@ -3005,7 +2914,7 @@ dependencies = [ "log", "parking_lot", "paste", - "rand 0.8.5", + "rand", "serde", "thiserror", "tokio", @@ -3023,7 +2932,7 @@ dependencies = [ "async-trait", "chrono", "futures", - "indexmap 2.4.0", + "indexmap 2.6.0", "itertools 0.13.0", "kaspa-addressmanager", "kaspa-connectionmanager", @@ -3040,7 +2949,7 @@ dependencies = [ "kaspa-utils-tower", "log", "parking_lot", - "rand 0.8.5", + "rand", "thiserror", "tokio", "tokio-stream", @@ -3067,7 +2976,7 @@ dependencies = [ "log", "parking_lot", "prost", - "rand 0.8.5", + "rand", "seqlock", "serde", "thiserror", @@ -3135,7 +3044,7 @@ dependencies = [ "kaspa-utils", "log", "paste", - "rand 0.8.5", + "rand", "serde", "serde-wasm-bindgen", "serde_json", @@ -3197,14 +3106,14 @@ dependencies = [ "async-trait", "bincode", "chrono", - "clap 4.5.16", + "clap 4.5.19", "criterion", "crossbeam-channel", "dhat", "faster-hex", "flate2", "futures-util", - "indexmap 2.4.0", + "indexmap 2.6.0", "itertools 0.13.0", "kaspa-addresses", "kaspa-alloc", @@ -3235,8 +3144,8 @@ dependencies = [ "kaspad", "log", "parking_lot", - "rand 0.8.5", - "rand_distr 0.4.3", + "rand", + "rand_distr", "rayon", "rocksdb", "secp256k1", @@ -3259,7 +3168,7 @@ dependencies = [ "criterion", "hex", "hexplay", - "indexmap 2.4.0", + "indexmap 2.6.0", "itertools 0.13.0", "kaspa-addresses", "kaspa-consensus-core", @@ -3269,7 +3178,7 @@ dependencies = [ "kaspa-wasm-core", "log", "parking_lot", - "rand 0.8.5", + "rand", "secp256k1", "serde", "serde-wasm-bindgen", @@ -3311,7 +3220,7 @@ dependencies = [ "num_cpus", "once_cell", "parking_lot", - "rand 0.8.5", + "rand", "rlimit", "serde", "serde_json", @@ -3356,7 +3265,7 @@ dependencies = [ "kaspa-utils", "log", "parking_lot", - "rand 0.8.5", + "rand", "rocksdb", "serde", "thiserror", @@ -3441,7 +3350,7 @@ dependencies = [ "md-5", "pad", "pbkdf2", - "rand 0.8.5", + "rand", "regex", "ripemd", "secp256k1", @@ -3486,7 +3395,7 @@ dependencies = [ "kaspa-txscript-errors", "kaspa-utils", "kaspa-wasm-core", - "rand 0.8.5", + "rand", "ripemd", "secp256k1", "serde", @@ -3598,7 +3507,7 @@ dependencies = [ "kaspa-rpc-core", "kaspa-rpc-macros", "paste", - "rand 0.8.5", + "rand", "regex", "rustls", "serde", @@ -3637,7 +3546,7 @@ name = "kaspa-wrpc-proxy" version = "0.15.2" dependencies = [ "async-trait", - "clap 4.5.16", + "clap 4.5.19", "kaspa-consensus-core", "kaspa-grpc-client", "kaspa-rpc-core", @@ -3725,7 +3634,7 @@ version = "0.15.2" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", - "clap 4.5.16", + "clap 4.5.19", "dhat", "dirs", "futures-util", @@ -3754,7 +3663,7 @@ dependencies = [ "kaspa-wrpc-server", "log", "num_cpus", - "rand 0.8.5", + "rand", "rayon", "serde", "serde_with", @@ -3797,9 +3706,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.158" +version = "0.2.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" +checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" [[package]] name = "libloading" @@ -3856,9 +3765,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.19" +version = "1.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc53a7799a7496ebc9fd29f31f7df80e83c9bda5299768af5f9e59eeea74647" +checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" dependencies = [ "cc", "pkg-config", @@ -3885,12 +3794,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "linux-raw-sys" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" - [[package]] name = "linux-raw-sys" version = "0.4.14" @@ -3899,14 +3802,14 @@ checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "local-ip-address" -version = "0.6.1" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136ef34e18462b17bf39a7826f8f3bbc223341f8e83822beb8b77db9a3d49696" +checksum = "3669cf5561f8d27e8fc84cc15e58350e70f557d4d65f70e3154e54cd2f8e1782" dependencies = [ "libc", "neli", "thiserror", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -3953,7 +3856,7 @@ dependencies = [ "log-mdc", "once_cell", "parking_lot", - "rand 0.8.5", + "rand", "serde", "serde-value", "serde_json", @@ -3964,11 +3867,20 @@ dependencies = [ "winapi", ] +[[package]] +name = "lru" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71e7d46de488603ffdd5f30afbc64fbba2378214a2c3a2fb83abf3d33126df17" +dependencies = [ + "hashbrown 0.13.2", +] + [[package]] name = "lz4-sys" -version = "1.10.0" +version = "1.11.1+lz4-1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109de74d5d2353660401699a4174a4ff23fcc649caf553df71933c7fb45ad868" +checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6" dependencies = [ "cc", "libc", @@ -4013,7 +3925,7 @@ dependencies = [ "cfg-if 1.0.0", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -4024,7 +3936,7 @@ checksum = "13198c120864097a565ccb3ff947672d969932b7975ebd4085732c9f09435e55" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -4037,14 +3949,14 @@ dependencies = [ "macroific_core", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] name = "malachite-base" -version = "0.4.15" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5f8d7930df6fcb9c86761ca0999ba484d7b6469c81cee4a7d38da5386440f96" +checksum = "46059721011b0458b7bd6d9179be5d0b60294281c23320c207adceaecc54d13b" dependencies = [ "hashbrown 0.14.5", "itertools 0.11.0", @@ -4054,9 +3966,9 @@ dependencies = [ [[package]] name = "malachite-nz" -version = "0.4.15" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa263ca62420c1f65cf6758f55c979a49ad83169f332e602b1890f1e1277a429" +checksum = "1503b27e825cabd1c3d0ff1e95a39fb2ec9eab6fd3da6cfa41aec7091d273e78" dependencies = [ "itertools 0.11.0", "libm", @@ -4080,11 +3992,15 @@ checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "matrixmultiply" -version = "0.2.4" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "916806ba0031cd542105d916a97c8572e1fa6dd79c9c51e7eb43a09ec2dd84c1" +checksum = "9380b911e3e96d10c1f415da0876389aaf1b56759054eeb0de7df940c456ba1a" dependencies = [ + "autocfg", + "num_cpus", + "once_cell", "rawpointer", + "thread-tree", ] [[package]] @@ -4143,15 +4059,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" -[[package]] -name = "miniz_oxide" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" -dependencies = [ - "adler", -] - [[package]] name = "miniz_oxide" version = "0.8.0" @@ -4175,7 +4082,7 @@ checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "log", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "windows-sys 0.48.0", ] @@ -4187,7 +4094,7 @@ checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ "hermit-abi 0.3.9", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "windows-sys 0.52.0", ] @@ -4197,44 +4104,13 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" -[[package]] -name = "nalgebra" -version = "0.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0abb021006c01b126a936a8dd1351e0720d83995f4fc942d0d426c654f990745" -dependencies = [ - "alga", - "approx", - "generic-array 0.13.3", - "matrixmultiply", - "num-complex 0.2.4", - "num-rational 0.2.4", - "num-traits", - "rand 0.7.3", - "rand_distr 0.2.2", - "typenum", -] - [[package]] name = "nanoid" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ffa00dec017b5b1a8b7cf5e2c008bfda1aa7e0697ac1508b491fdf2622fb4d8" dependencies = [ - "rand 0.8.5", -] - -[[package]] -name = "ndarray" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac06db03ec2f46ee0ecdca1a1c34a99c0d188a0d83439b84bf0cb4b386e4ab09" -dependencies = [ - "matrixmultiply", - "num-complex 0.2.4", - "num-integer", - "num-traits", - "rawpointer", + "rand", ] [[package]] @@ -4326,10 +4202,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" dependencies = [ "num-bigint", - "num-complex 0.4.6", + "num-complex", "num-integer", "num-iter", - "num-rational 0.4.2", + "num-rational", "num-traits", ] @@ -4343,16 +4219,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-complex" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6b19411a9719e753aff12e5187b74d60d3dc449ec3f4dc21e3989c3f554bc95" -dependencies = [ - "autocfg", - "num-traits", -] - [[package]] name = "num-complex" version = "0.4.6" @@ -4388,17 +4254,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-rational" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - [[package]] name = "num-rational" version = "0.4.2" @@ -4460,18 +4315,18 @@ dependencies = [ [[package]] name = "object" -version = "0.36.3" +version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27b64972346851a39438c60b341ebc01bba47464ae329e55cf343eb93964efd9" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "oorandom" @@ -4491,6 +4346,12 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" +[[package]] +name = "order-stat" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efa535d5117d3661134dbf1719b6f0ffe06f2375843b13935db186cd094105eb" + [[package]] name = "ordered-float" version = "2.10.1" @@ -4521,9 +4382,9 @@ dependencies = [ [[package]] name = "parking" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" @@ -4565,7 +4426,7 @@ checksum = "70df726c43c645ef1dde24c7ae14692036ebe5457c92c5f0ec4cfceb99634ff6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -4575,7 +4436,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" dependencies = [ "base64ct", - "rand_core 0.6.4", + "rand_core", "subtle", ] @@ -4607,6 +4468,30 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +[[package]] +name = "peroxide" +version = "0.32.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "703b5fbdc1f9018a66e2db8758633cec31d39ad3127bfd38c9b6ad510637519c" +dependencies = [ + "matrixmultiply", + "order-stat", + "peroxide-ad", + "puruspe", + "rand", + "rand_distr", +] + +[[package]] +name = "peroxide-ad" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6fba8ff3f40b67996f7c745f699babaa3e57ef5c8178ec999daf7eedc51dc8c" +dependencies = [ + "quote", + "syn 1.0.109", +] + [[package]] name = "petgraph" version = "0.6.5" @@ -4614,27 +4499,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.4.0", + "indexmap 2.6.0", ] [[package]] name = "pin-project" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "baf123a161dde1e524adf36f90bc5d8d3462824a9c43553ad07a8183161189ec" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -4656,31 +4541,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" dependencies = [ "atomic-waker", - "fastrand 2.1.0", + "fastrand", "futures-io", ] [[package]] name = "pkg-config" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" - -[[package]] -name = "polling" -version = "2.8.0" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" -dependencies = [ - "autocfg", - "bitflags 1.3.2", - "cfg-if 1.0.0", - "concurrent-queue", - "libc", - "log", - "pin-project-lite", - "windows-sys 0.48.0", -] +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "polling" @@ -4692,7 +4561,7 @@ dependencies = [ "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.34", + "rustix", "tracing", "windows-sys 0.59.0", ] @@ -4710,9 +4579,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.7.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da544ee218f0d287a911e9c99a39a8c9bc8fcad3cb8db5959940044ecfc67265" +checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" [[package]] name = "powerfmt" @@ -4731,21 +4600,21 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.20" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" +checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" dependencies = [ "proc-macro2", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] name = "proc-macro-crate" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" +checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_edit 0.21.1", + "toml_edit", ] [[package]] @@ -4782,9 +4651,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2ecbe40f08db5c006b5764a2645f7f3f141ce756412ac9e1dd6087e6d32995" +checksum = "7b0487d90e047de87f984913713b85c601c05609aad5b0df4b4573fbf69aa13f" dependencies = [ "bytes", "prost-derive", @@ -4792,9 +4661,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8650aabb6c35b860610e9cff5dc1af886c9e25073b7b1712a68972af4281302" +checksum = "0c1318b19085f08681016926435853bbf7858f9c082d0999b80550ff5d9abe15" dependencies = [ "bytes", "heck", @@ -4807,32 +4676,38 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.75", + "syn 2.0.79", "tempfile", ] [[package]] name = "prost-derive" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acf0c195eebb4af52c752bec4f52f645da98b6e92077a04110c7f349477ae5ac" +checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" dependencies = [ "anyhow", "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] name = "prost-types" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60caa6738c7369b940c3d49246a8d1749323674c65cb13010134f5c9bad5b519" +checksum = "4759aa0d3a6232fb8dbdb97b61de2c20047c68aca932c7ed76da9d788508d670" dependencies = [ "prost", ] +[[package]] +name = "puruspe" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3804877ffeba468c806c2ad9057bbbae92e4b2c410c2f108baaa0042f241fa4c" + [[package]] name = "quinn" version = "0.11.5" @@ -4845,7 +4720,7 @@ dependencies = [ "quinn-udp", "rustc-hash 2.0.0", "rustls", - "socket2 0.5.7", + "socket2", "thiserror", "tokio", "tracing", @@ -4858,7 +4733,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" dependencies = [ "bytes", - "rand 0.8.5", + "rand", "ring", "rustc-hash 2.0.0", "rustls", @@ -4876,33 +4751,20 @@ checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" dependencies = [ "libc", "once_cell", - "socket2 0.5.7", + "socket2", "tracing", "windows-sys 0.59.0", ] [[package]] name = "quote" -version = "1.0.36" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc", -] - [[package]] name = "rand" version = "0.8.5" @@ -4910,18 +4772,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", + "rand_chacha", + "rand_core", ] [[package]] @@ -4931,16 +4783,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", + "rand_core", ] [[package]] @@ -4949,16 +4792,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.15", -] - -[[package]] -name = "rand_distr" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96977acbdd3a6576fb1d27391900035bf3863d4a16422973a409b488cf29ffb2" -dependencies = [ - "rand 0.7.3", + "getrandom", ] [[package]] @@ -4968,16 +4802,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" dependencies = [ "num-traits", - "rand 0.8.5", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", + "rand", ] [[package]] @@ -5008,9 +4833,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.3" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ "bitflags 2.6.0", ] @@ -5021,16 +4846,16 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ - "getrandom 0.2.15", + "getrandom", "libredox", "thiserror", ] [[package]] name = "regex" -version = "1.10.6" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" dependencies = [ "aho-corasick", "memchr", @@ -5040,9 +4865,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", @@ -5051,15 +4876,15 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" -version = "0.12.7" +version = "0.12.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" +checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" dependencies = [ "base64", "bytes", @@ -5108,7 +4933,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if 1.0.0", - "getrandom 0.2.15", + "getrandom", "libc", "spin", "untrusted", @@ -5126,9 +4951,9 @@ dependencies = [ [[package]] name = "rlimit" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3560f70f30a0f16d11d01ed078a07740fe6b489667abc7c7b029155d9f21c3d8" +checksum = "7043b63bd0cd1aaa628e476b80e6d4023a3b50eb32789f2728908107bd0c793a" dependencies = [ "libc", ] @@ -5148,7 +4973,7 @@ name = "rothschild" version = "0.15.2" dependencies = [ "async-channel 2.3.1", - "clap 4.5.16", + "clap 4.5.19", "criterion", "faster-hex", "itertools 0.13.0", @@ -5187,45 +5012,31 @@ checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver", ] [[package]] name = "rustix" -version = "0.37.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" -dependencies = [ - "bitflags 1.3.2", - "errno", - "io-lifetimes", - "libc", - "linux-raw-sys 0.3.8", - "windows-sys 0.48.0", -] - -[[package]] -name = "rustix" -version = "0.38.34" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ "bitflags 2.6.0", "errno", "libc", - "linux-raw-sys 0.4.14", + "linux-raw-sys", "windows-sys 0.52.0", ] [[package]] name = "rustls" -version = "0.23.12" +version = "0.23.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" +checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" dependencies = [ "log", "once_cell", @@ -5238,25 +5049,24 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.3" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" +checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" [[package]] name = "rustls-webpki" -version = "0.102.6" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e6b52d4fda176fd835fdc55a835d4a89b8499cad995885a21149d5ad62f852e" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "ring", "rustls-pki-types", @@ -5269,6 +5079,22 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" +[[package]] +name = "rv" +version = "0.16.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c07e0a3b756794c7ea2f05d93760ffb946ff4f94b255d92444d94c19fd71f4ab" +dependencies = [ + "doc-comment", + "lru", + "num", + "num-traits", + "peroxide", + "rand", + "rand_distr", + "special", +] + [[package]] name = "ryu" version = "1.0.18" @@ -5307,20 +5133,20 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "secp256k1" -version = "0.29.0" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e0cc0f1cf93f4969faf3ea1c7d8a9faed25918d96affa959720823dfe86d4f3" +checksum = "9465315bc9d4566e1724f0fffcbcc446268cb522e60f9a27bcded6b19c108113" dependencies = [ - "rand 0.8.5", + "rand", "secp256k1-sys", "serde", ] [[package]] name = "secp256k1-sys" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1433bd67156263443f14d603720b082dd3121779323fce20cba2aa07b874bc1b" +checksum = "d4387882333d3aa8cb20530a17c69a3752e97837832f34f6dccc760e715001d9" dependencies = [ "cc", ] @@ -5351,9 +5177,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.208" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff085d2cb684faa248efb494c39b68e522822ac0de72ccf08109abde717cfb2" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -5381,20 +5207,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.208" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] name = "serde_json" -version = "1.0.125" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83c8e735a073ccf5be70aa8066aa984eaf2fa000db6c8d0100ae605b366d31ed" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", "memchr", @@ -5410,14 +5236,14 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] name = "serde_spanned" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" dependencies = [ "serde", ] @@ -5436,15 +5262,15 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.9.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cecfa94848272156ea67b2b1a53f20fc7bc638c4a46d2f8abde08f05f4b857" +checksum = "8e28bdad6db2b8340e449f7108f020b3b092e8583a9e3fb82713e1d4e71fe817" dependencies = [ "base64", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.4.0", + "indexmap 2.6.0", "serde", "serde_derive", "serde_json", @@ -5454,14 +5280,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.9.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" +checksum = "9d846214a9854ef724f3da161b426242d8de7c1fc7de2f89bb1efcb154dca79d" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -5470,7 +5296,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.4.0", + "indexmap 2.6.0", "itoa", "ryu", "serde", @@ -5561,11 +5387,11 @@ version = "0.15.2" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", - "clap 4.5.16", + "clap 4.5.19", "dhat", "futures", "futures-util", - "indexmap 2.4.0", + "indexmap 2.6.0", "itertools 0.13.0", "kaspa-alloc", "kaspa-consensus", @@ -5578,8 +5404,8 @@ dependencies = [ "kaspa-utils", "log", "num_cpus", - "rand 0.8.5", - "rand_distr 0.4.3", + "rand", + "rand_distr", "rayon", "secp256k1", "tokio", @@ -5619,16 +5445,6 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c" -[[package]] -name = "socket2" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "socket2" version = "0.5.7" @@ -5645,6 +5461,15 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dd62203d74a728ae353b4d716fc2a80e8da881dfdf8bbc0c012d877a58c4030" +[[package]] +name = "special" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b89cf0d71ae639fdd8097350bfac415a41aabf1d5ddd356295fdc95f09760382" +dependencies = [ + "libm", +] + [[package]] name = "spin" version = "0.9.8" @@ -5657,27 +5482,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" -[[package]] -name = "statest" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04ed65138bd1680f47e4d980ac7d3cf5e827fa99c2fa6683e640094a494602b4" -dependencies = [ - "ndarray", - "num-traits", - "statrs", -] - -[[package]] -name = "statrs" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e34b58a8f9b7462b6922e0b4e3c83d1b3c2075f7f996a56d6c66afa81590064" -dependencies = [ - "nalgebra", - "rand 0.7.3", -] - [[package]] name = "strsim" version = "0.8.0" @@ -5715,9 +5519,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.75" +version = "2.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6af063034fc1935ede7be0122941bafa9bacb949334d090b77ca98b5817c7d9" +checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" dependencies = [ "proc-macro2", "quote", @@ -5733,7 +5537,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -5753,9 +5557,9 @@ dependencies = [ [[package]] name = "sysinfo" -version = "0.31.2" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4115055da5f572fff541dd0c4e61b0262977f453cc9fe04be83aba25a89bdab" +checksum = "355dbe4f8799b304b05e1b0f05fc59b2a18d36645cf169607da45bde2f69a1be" dependencies = [ "core-foundation-sys", "libc", @@ -5788,14 +5592,14 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ "cfg-if 1.0.0", - "fastrand 2.1.0", + "fastrand", "once_cell", - "rustix 0.38.34", + "rustix", "windows-sys 0.59.0", ] @@ -5830,22 +5634,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -5864,6 +5668,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "thread-tree" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbd370cb847953a25954d9f63e14824a36113f8c72eecf6eccef5dc4b45d630" +dependencies = [ + "crossbeam-channel", +] + [[package]] name = "time" version = "0.3.36" @@ -5924,9 +5737,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.39.3" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9babc99b9923bfa4804bd74722ff02c0381021eafa4db9949217e3be8e84fff5" +checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ "backtrace", "bytes", @@ -5934,7 +5747,7 @@ dependencies = [ "mio 1.0.2", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.7", + "socket2", "tokio-macros", "windows-sys 0.52.0", ] @@ -5947,7 +5760,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -5990,9 +5803,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", @@ -6010,7 +5823,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.20", + "toml_edit", ] [[package]] @@ -6024,26 +5837,15 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.21.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" -dependencies = [ - "indexmap 2.4.0", - "toml_datetime", - "winnow 0.5.40", -] - -[[package]] -name = "toml_edit" -version = "0.22.20" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.4.0", + "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.18", + "winnow", ] [[package]] @@ -6069,7 +5871,7 @@ dependencies = [ "pin-project", "prost", "rustls-pemfile", - "socket2 0.5.7", + "socket2", "tokio", "tokio-rustls", "tokio-stream", @@ -6091,7 +5893,7 @@ dependencies = [ "prost-build", "prost-types", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -6105,7 +5907,7 @@ dependencies = [ "indexmap 1.9.3", "pin-project", "pin-project-lite", - "rand 0.8.5", + "rand", "slab", "tokio", "tokio-util", @@ -6120,6 +5922,10 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 0.1.2", "tower-layer", "tower-service", ] @@ -6171,7 +5977,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -6207,7 +6013,7 @@ dependencies = [ "http 1.1.0", "httparse", "log", - "rand 0.8.5", + "rand", "rustls", "rustls-pki-types", "sha1", @@ -6232,15 +6038,15 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-linebreak" @@ -6250,24 +6056,24 @@ checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] [[package]] name = "unicode-segmentation" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] name = "unicode-width" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "universal-hash" @@ -6329,7 +6135,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.15", + "getrandom", ] [[package]] @@ -6338,8 +6144,8 @@ version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" dependencies = [ - "getrandom 0.2.15", - "rand 0.8.5", + "getrandom", + "rand", "serde", "wasm-bindgen", ] @@ -6383,12 +6189,6 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" -[[package]] -name = "waker-fn" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317211a0dc0ceedd78fb2ca9a44aed3d7b9b26f81870d485c07122b4350673b7" - [[package]] name = "walkdir" version = "2.5.0" @@ -6408,12 +6208,6 @@ dependencies = [ "try-lock", ] -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -6444,7 +6238,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", "wasm-bindgen-shared", ] @@ -6478,7 +6272,7 @@ checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6512,7 +6306,7 @@ checksum = "4b8220be1fa9e4c889b30fd207d4906657e7e90b12e0e6b0c8b8d8709f5de021" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -6527,9 +6321,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.5" +version = "0.26.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bd24728e5af82c6c4ec1b66ac4844bdf8156257fccda846ec58b42cd0cdbe6a" +checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" dependencies = [ "rustls-pki-types", ] @@ -6543,7 +6337,7 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.34", + "rustix", ] [[package]] @@ -6616,7 +6410,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -6627,7 +6421,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -6819,18 +6613,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.5.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" -dependencies = [ - "memchr", -] - -[[package]] -name = "winnow" -version = "0.6.18" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] @@ -6865,10 +6650,10 @@ dependencies = [ "dirs", "faster-hex", "futures", - "getrandom 0.2.15", + "getrandom", "instant", "js-sys", - "rand 0.8.5", + "rand", "rlimit", "serde", "serde-wasm-bindgen", @@ -6996,7 +6781,7 @@ dependencies = [ "futures", "js-sys", "nw-sys", - "rand 0.8.5", + "rand", "serde", "serde-wasm-bindgen", "thiserror", @@ -7046,9 +6831,9 @@ dependencies = [ "downcast-rs", "futures", "futures-util", - "getrandom 0.2.15", + "getrandom", "manual_future", - "rand 0.8.5", + "rand", "serde", "serde_json", "thiserror", @@ -7254,9 +7039,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.21" +version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "539a77ee7c0de333dcc6da69b177380a0b81e0dacfa4f7344c465a36871ee601" +checksum = "af4e2e2f7cba5a093896c1e150fbfe177d1883e7448200efb81d40b9d339ef26" [[package]] name = "xmltree" @@ -7291,7 +7076,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index b99276945b..dd5eb31320 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -224,6 +224,7 @@ regex = "1.10.2" ripemd = { version = "0.1.3", default-features = false } rlimit = "0.10.1" rocksdb = "0.22.0" +rv = "0.16.4" secp256k1 = { version = "0.29.0", features = [ "global-context", "rand-std", @@ -243,8 +244,6 @@ sha3 = "0.10.8" slugify-rs = "0.0.3" smallvec = { version = "1.11.1", features = ["serde"] } sorted-insert = "0.2.3" -statest = "0.2.2" -statrs = "0.13.0" # TODO "0.16.0" subtle = { version = "2.5.0", default-features = false } sysinfo = "0.31.2" tempfile = "3.8.1" diff --git a/README.md b/README.md index 8749c49e90..d9066efa84 100644 --- a/README.md +++ b/README.md @@ -184,7 +184,8 @@ To build WASM on MacOS you need to install `llvm` from homebrew (at the time of Kaspa CLI + Wallet -`kaspa-cli` crate provides cli-driven RPC interface to the node and a + +`kaspa-cli` crate provides a cli-driven RPC interface to the node and a terminal interface to the Rusty Kaspa Wallet runtime. These wallets are compatible with WASM SDK Wallet API and Kaspa NG projects. @@ -323,8 +324,6 @@ wRPC Simulation framework (Simpa) -Logging in `kaspad` and `simpa` can be [filtered](https://docs.rs/env_logger/0.10.0/env_logger/#filtering-results) by either: - The current codebase supports a full in-process network simulation, building an actual DAG over virtual time with virtual delay and benchmarking validation time (following the simulation generation). To see the available commands diff --git a/components/addressmanager/Cargo.toml b/components/addressmanager/Cargo.toml index e4398dc4e5..ef735b19c0 100644 --- a/components/addressmanager/Cargo.toml +++ b/components/addressmanager/Cargo.toml @@ -27,5 +27,4 @@ thiserror.workspace = true tokio.workspace = true [dev-dependencies] -statrs.workspace = true -statest.workspace = true +rv.workspace = true diff --git a/components/addressmanager/src/lib.rs b/components/addressmanager/src/lib.rs index 85f9acb3e2..093323e155 100644 --- a/components/addressmanager/src/lib.rs +++ b/components/addressmanager/src/lib.rs @@ -520,8 +520,7 @@ mod address_store_with_cache { use kaspa_database::create_temp_db; use kaspa_database::prelude::ConnBuilder; use kaspa_utils::networking::IpAddress; - use statest::ks::KSTest; - use statrs::distribution::Uniform; + use rv::{dist::Uniform, misc::ks_test as one_way_ks_test, traits::Cdf}; use std::net::{IpAddr, Ipv6Addr}; #[test] @@ -591,10 +590,11 @@ mod address_store_with_cache { assert!(num_of_buckets >= 12); // Run multiple Kolmogorov–Smirnov tests to offset random noise of the random weighted iterator - let num_of_trials = 512; + let num_of_trials = 2048; // Number of trials to run the test, chosen to reduce random noise. let mut cul_p = 0.; // The target uniform distribution - let target_u_dist = Uniform::new(0.0, (num_of_buckets) as f64).unwrap(); + let target_uniform_dist = Uniform::new(1.0, num_of_buckets as f64).unwrap(); + let uniform_cdf = |x: f64| target_uniform_dist.cdf(&x); for _ in 0..num_of_trials { // The weight sampled expected uniform distibution let prioritized_address_distribution = am @@ -603,13 +603,12 @@ mod address_store_with_cache { .take(num_of_buckets) .map(|addr| addr.prefix_bucket().as_u64() as f64) .collect_vec(); - - let ks_test = KSTest::new(prioritized_address_distribution.as_slice()); - cul_p += ks_test.ks1(&target_u_dist).0; + cul_p += one_way_ks_test(prioritized_address_distribution.as_slice(), uniform_cdf).1; } // Normalize and adjust p to test for uniformity, over average of all trials. - let adjusted_p = (0.5 - cul_p / num_of_trials as f64).abs(); + // we do this to reduce the effect of random noise failing this test. + let adjusted_p = ((cul_p / num_of_trials as f64) - 0.5).abs(); // Define the significance threshold. let significance = 0.10; @@ -619,7 +618,7 @@ mod address_store_with_cache { adjusted_p, significance ); - assert!(adjusted_p <= significance) + assert!(adjusted_p <= significance); } } } From 1274e9c1de6778aa4038e354412410a26f7bc6a4 Mon Sep 17 00:00:00 2001 From: Maxim <59533214+biryukovmaxim@users.noreply.github.com> Date: Tue, 8 Oct 2024 15:00:20 +0400 Subject: [PATCH 14/31] enhance tx inputs processing (#495) * sighash reused trait * benches are implemented * use cache per iteration per function * fix par versions * fix benches * use upgreadable read * use concurrent cache * use hashcache * dont apply cache * rollback rwlock and indexmap. * remove scc * apply par iter to `check_scripts` * refactor check_scripts fn, fix tests * fix clippy * add bench with custom threadpool * style: fmt * suppress warnings * Merge branch 'master' into bcm-parallel-processing * renames + map err * reuse code * bench: avoid exposing cache map + iter pools in powers of 2 * simplify check_sig_op_counts * use thread pool also if a single input 1. to avoid confusion 2. since tokio blocking threads are not meant to be used for processing anyway * remove todo * clear cache instead of recreate * use and_then (so map_err can be called in a single location) * extend check scripts tests for better coverage of the par_iter case --------- Co-authored-by: Michael Sutton --- Cargo.lock | 2 + consensus/Cargo.toml | 5 + consensus/benches/check_scripts.rs | 126 ++++++++++++ consensus/client/src/sign.rs | 6 +- consensus/client/src/signing.rs | 10 +- consensus/core/Cargo.toml | 1 + consensus/core/src/hashing/sighash.rs | 193 +++++++++++++----- consensus/core/src/sign.rs | 22 +- .../pipeline/virtual_processor/processor.rs | 5 +- .../transaction_validator_populated.rs | 140 +++++++++++-- consensus/wasm/src/utils.rs | 6 +- crypto/txscript/src/caches.rs | 4 + crypto/txscript/src/lib.rs | 59 +++--- crypto/txscript/src/opcodes/macros.rs | 22 +- crypto/txscript/src/opcodes/mod.rs | 70 ++++--- crypto/txscript/src/standard/multisig.rs | 10 +- .../src/mempool/check_transaction_standard.rs | 9 +- wallet/core/src/account/pskb.rs | 8 +- wallet/pskt/examples/multisig.rs | 8 +- wallet/pskt/src/pskt.rs | 7 +- 20 files changed, 538 insertions(+), 175 deletions(-) create mode 100644 consensus/benches/check_scripts.rs diff --git a/Cargo.lock b/Cargo.lock index 3449a43dae..a0db546302 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2414,6 +2414,7 @@ dependencies = [ "futures-util", "indexmap 2.6.0", "itertools 0.13.0", + "kaspa-addresses", "kaspa-consensus-core", "kaspa-consensus-notify", "kaspa-consensusmanager", @@ -2475,6 +2476,7 @@ dependencies = [ name = "kaspa-consensus-core" version = "0.15.2" dependencies = [ + "arc-swap", "async-trait", "bincode", "borsh", diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index 3f4a1b456d..b9a183ea8c 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -54,11 +54,16 @@ serde_json.workspace = true flate2.workspace = true rand_distr.workspace = true kaspa-txscript-errors.workspace = true +kaspa-addresses.workspace = true [[bench]] name = "hash_benchmarks" harness = false +[[bench]] +name = "check_scripts" +harness = false + [features] html_reports = [] devnet-prealloc = ["kaspa-consensus-core/devnet-prealloc"] diff --git a/consensus/benches/check_scripts.rs b/consensus/benches/check_scripts.rs new file mode 100644 index 0000000000..d65ac63626 --- /dev/null +++ b/consensus/benches/check_scripts.rs @@ -0,0 +1,126 @@ +use criterion::{black_box, criterion_group, criterion_main, Criterion, SamplingMode}; +use kaspa_addresses::{Address, Prefix, Version}; +use kaspa_consensus::processes::transaction_validator::transaction_validator_populated::{ + check_scripts_par_iter, check_scripts_par_iter_pool, check_scripts_sequential, +}; +use kaspa_consensus_core::hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}; +use kaspa_consensus_core::hashing::sighash_type::SIG_HASH_ALL; +use kaspa_consensus_core::subnets::SubnetworkId; +use kaspa_consensus_core::tx::{MutableTransaction, Transaction, TransactionInput, TransactionOutpoint, UtxoEntry}; +use kaspa_txscript::caches::Cache; +use kaspa_txscript::pay_to_address_script; +use rand::{thread_rng, Rng}; +use secp256k1::Keypair; +use std::thread::available_parallelism; + +// You may need to add more detailed mocks depending on your actual code. +fn mock_tx(inputs_count: usize, non_uniq_signatures: usize) -> (Transaction, Vec) { + let reused_values = SigHashReusedValuesUnsync::new(); + let dummy_prev_out = TransactionOutpoint::new(kaspa_hashes::Hash::from_u64_word(1), 1); + let mut tx = Transaction::new( + 0, + vec![], + vec![], + 0, + SubnetworkId::from_bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), + 0, + vec![], + ); + let mut utxos = vec![]; + let mut kps = vec![]; + for _ in 0..inputs_count - non_uniq_signatures { + let kp = Keypair::new(secp256k1::SECP256K1, &mut thread_rng()); + tx.inputs.push(TransactionInput { previous_outpoint: dummy_prev_out, signature_script: vec![], sequence: 0, sig_op_count: 1 }); + let address = Address::new(Prefix::Mainnet, Version::PubKey, &kp.x_only_public_key().0.serialize()); + utxos.push(UtxoEntry { + amount: thread_rng().gen::() as u64, + script_public_key: pay_to_address_script(&address), + block_daa_score: 333, + is_coinbase: false, + }); + kps.push(kp); + } + for _ in 0..non_uniq_signatures { + let kp = kps.last().unwrap(); + tx.inputs.push(TransactionInput { previous_outpoint: dummy_prev_out, signature_script: vec![], sequence: 0, sig_op_count: 1 }); + let address = Address::new(Prefix::Mainnet, Version::PubKey, &kp.x_only_public_key().0.serialize()); + utxos.push(UtxoEntry { + amount: thread_rng().gen::() as u64, + script_public_key: pay_to_address_script(&address), + block_daa_score: 444, + is_coinbase: false, + }); + } + for (i, kp) in kps.iter().enumerate().take(inputs_count - non_uniq_signatures) { + let mut_tx = MutableTransaction::with_entries(&tx, utxos.clone()); + let sig_hash = calc_schnorr_signature_hash(&mut_tx.as_verifiable(), i, SIG_HASH_ALL, &reused_values); + let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); + let sig: [u8; 64] = *kp.sign_schnorr(msg).as_ref(); + // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) + tx.inputs[i].signature_script = std::iter::once(65u8).chain(sig).chain([SIG_HASH_ALL.to_u8()]).collect(); + } + let length = tx.inputs.len(); + for i in (inputs_count - non_uniq_signatures)..length { + let kp = kps.last().unwrap(); + let mut_tx = MutableTransaction::with_entries(&tx, utxos.clone()); + let sig_hash = calc_schnorr_signature_hash(&mut_tx.as_verifiable(), i, SIG_HASH_ALL, &reused_values); + let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); + let sig: [u8; 64] = *kp.sign_schnorr(msg).as_ref(); + // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) + tx.inputs[i].signature_script = std::iter::once(65u8).chain(sig).chain([SIG_HASH_ALL.to_u8()]).collect(); + } + (tx, utxos) +} + +fn benchmark_check_scripts(c: &mut Criterion) { + for inputs_count in [100, 50, 25, 10, 5, 2] { + for non_uniq_signatures in [0, inputs_count / 2] { + let (tx, utxos) = mock_tx(inputs_count, non_uniq_signatures); + let mut group = c.benchmark_group(format!("inputs: {inputs_count}, non uniq: {non_uniq_signatures}")); + group.sampling_mode(SamplingMode::Flat); + + group.bench_function("single_thread", |b| { + let tx = MutableTransaction::with_entries(&tx, utxos.clone()); + let cache = Cache::new(inputs_count as u64); + b.iter(|| { + cache.clear(); + check_scripts_sequential(black_box(&cache), black_box(&tx.as_verifiable())).unwrap(); + }) + }); + + group.bench_function("rayon par iter", |b| { + let tx = MutableTransaction::with_entries(tx.clone(), utxos.clone()); + let cache = Cache::new(inputs_count as u64); + b.iter(|| { + cache.clear(); + check_scripts_par_iter(black_box(&cache), black_box(&tx.as_verifiable())).unwrap(); + }) + }); + + // Iterate powers of two up to available parallelism + for i in (1..=(available_parallelism().unwrap().get() as f64).log2().ceil() as u32).map(|x| 2u32.pow(x) as usize) { + if inputs_count >= i { + group.bench_function(format!("rayon, custom thread pool, thread count {i}"), |b| { + let tx = MutableTransaction::with_entries(tx.clone(), utxos.clone()); + // Create a custom thread pool with the specified number of threads + let pool = rayon::ThreadPoolBuilder::new().num_threads(i).build().unwrap(); + let cache = Cache::new(inputs_count as u64); + b.iter(|| { + cache.clear(); + check_scripts_par_iter_pool(black_box(&cache), black_box(&tx.as_verifiable()), black_box(&pool)).unwrap(); + }) + }); + } + } + } + } +} + +criterion_group! { + name = benches; + // This can be any expression that returns a `Criterion` object. + config = Criterion::default().with_output_color(true).measurement_time(std::time::Duration::new(20, 0)); + targets = benchmark_check_scripts +} + +criterion_main!(benches); diff --git a/consensus/client/src/sign.rs b/consensus/client/src/sign.rs index 4044dc5701..18ff3c8491 100644 --- a/consensus/client/src/sign.rs +++ b/consensus/client/src/sign.rs @@ -7,7 +7,7 @@ use core::iter::once; use itertools::Itertools; use kaspa_consensus_core::{ hashing::{ - sighash::{calc_schnorr_signature_hash, SigHashReusedValues}, + sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}, sighash_type::SIG_HASH_ALL, }, tx::PopulatedTransaction, @@ -44,7 +44,7 @@ pub fn sign_with_multiple_v3<'a>(tx: &'a Transaction, privkeys: &[[u8; 32]]) -> map.insert(script_pub_key_script, schnorr_key); } - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); let mut additional_signatures_required = false; { let input_len = tx.inner().inputs.len(); @@ -59,7 +59,7 @@ pub fn sign_with_multiple_v3<'a>(tx: &'a Transaction, privkeys: &[[u8; 32]]) -> }; let script = script_pub_key.script(); if let Some(schnorr_key) = map.get(script) { - let sig_hash = calc_schnorr_signature_hash(&populated_transaction, i, SIG_HASH_ALL, &mut reused_values); + let sig_hash = calc_schnorr_signature_hash(&populated_transaction, i, SIG_HASH_ALL, &reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let sig: [u8; 64] = *schnorr_key.sign_schnorr(msg).as_ref(); // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) diff --git a/consensus/client/src/signing.rs b/consensus/client/src/signing.rs index ef993d0118..f7fe8cee6a 100644 --- a/consensus/client/src/signing.rs +++ b/consensus/client/src/signing.rs @@ -75,7 +75,7 @@ impl SigHashCache { } } - pub fn sig_op_counts_hash(&mut self, tx: &Transaction, hash_type: SigHashType, reused_values: &mut SigHashReusedValues) -> Hash { + pub fn sig_op_counts_hash(&mut self, tx: &Transaction, hash_type: SigHashType, reused_values: &SigHashReusedValues) -> Hash { if hash_type.is_sighash_anyone_can_pay() { return ZERO_HASH; } @@ -185,16 +185,16 @@ pub fn calc_schnorr_signature_hash( let mut hasher = TransactionSigningHash::new(); hasher .write_u16(tx.version) - .update(previous_outputs_hash(&tx, hash_type, &mut reused_values)) - .update(sequences_hash(&tx, hash_type, &mut reused_values)) - .update(sig_op_counts_hash(&tx, hash_type, &mut reused_values)); + .update(previous_outputs_hash(&tx, hash_type, &reused_values)) + .update(sequences_hash(&tx, hash_type, &reused_values)) + .update(sig_op_counts_hash(&tx, hash_type, &reused_values)); hash_outpoint(&mut hasher, input.previous_outpoint); hash_script_public_key(&mut hasher, &utxo.script_public_key); hasher .write_u64(utxo.amount) .write_u64(input.sequence) .write_u8(input.sig_op_count) - .update(outputs_hash(&tx, hash_type, &mut reused_values, input_index)) + .update(outputs_hash(&tx, hash_type, &reused_values, input_index)) .write_u64(tx.lock_time) .update(&tx.subnetwork_id) .write_u64(tx.gas) diff --git a/consensus/core/Cargo.toml b/consensus/core/Cargo.toml index 44dbedd387..228b4ac11d 100644 --- a/consensus/core/Cargo.toml +++ b/consensus/core/Cargo.toml @@ -15,6 +15,7 @@ wasm32-sdk = [] default = [] [dependencies] +arc-swap.workspace = true async-trait.workspace = true borsh.workspace = true cfg-if.workspace = true diff --git a/consensus/core/src/hashing/sighash.rs b/consensus/core/src/hashing/sighash.rs index c1b6133e8a..e6c7ad4dd0 100644 --- a/consensus/core/src/hashing/sighash.rs +++ b/consensus/core/src/hashing/sighash.rs @@ -1,4 +1,7 @@ +use arc_swap::ArcSwapOption; use kaspa_hashes::{Hash, Hasher, HasherBase, TransactionSigningHash, TransactionSigningHashECDSA, ZERO_HASH}; +use std::cell::Cell; +use std::sync::Arc; use crate::{ subnets::SUBNETWORK_ID_NATIVE, @@ -11,72 +14,172 @@ use super::{sighash_type::SigHashType, HasherExtensions}; /// the same for all transaction inputs. /// Reuse of such values prevents the quadratic hashing problem. #[derive(Default)] -pub struct SigHashReusedValues { - previous_outputs_hash: Option, - sequences_hash: Option, - sig_op_counts_hash: Option, - outputs_hash: Option, +pub struct SigHashReusedValuesUnsync { + previous_outputs_hash: Cell>, + sequences_hash: Cell>, + sig_op_counts_hash: Cell>, + outputs_hash: Cell>, } -impl SigHashReusedValues { +impl SigHashReusedValuesUnsync { pub fn new() -> Self { - Self { previous_outputs_hash: None, sequences_hash: None, sig_op_counts_hash: None, outputs_hash: None } + Self::default() } } -pub fn previous_outputs_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &mut SigHashReusedValues) -> Hash { +#[derive(Default)] +pub struct SigHashReusedValuesSync { + previous_outputs_hash: ArcSwapOption, + sequences_hash: ArcSwapOption, + sig_op_counts_hash: ArcSwapOption, + outputs_hash: ArcSwapOption, +} + +impl SigHashReusedValuesSync { + pub fn new() -> Self { + Self::default() + } +} + +pub trait SigHashReusedValues { + fn previous_outputs_hash(&self, set: impl Fn() -> Hash) -> Hash; + fn sequences_hash(&self, set: impl Fn() -> Hash) -> Hash; + fn sig_op_counts_hash(&self, set: impl Fn() -> Hash) -> Hash; + fn outputs_hash(&self, set: impl Fn() -> Hash) -> Hash; +} + +impl SigHashReusedValues for Arc { + fn previous_outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.as_ref().previous_outputs_hash(set) + } + + fn sequences_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.as_ref().sequences_hash(set) + } + + fn sig_op_counts_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.as_ref().sig_op_counts_hash(set) + } + + fn outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.as_ref().outputs_hash(set) + } +} + +impl SigHashReusedValues for SigHashReusedValuesUnsync { + fn previous_outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.previous_outputs_hash.get().unwrap_or_else(|| { + let hash = set(); + self.previous_outputs_hash.set(Some(hash)); + hash + }) + } + + fn sequences_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.sequences_hash.get().unwrap_or_else(|| { + let hash = set(); + self.sequences_hash.set(Some(hash)); + hash + }) + } + + fn sig_op_counts_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.sig_op_counts_hash.get().unwrap_or_else(|| { + let hash = set(); + self.sig_op_counts_hash.set(Some(hash)); + hash + }) + } + + fn outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.outputs_hash.get().unwrap_or_else(|| { + let hash = set(); + self.outputs_hash.set(Some(hash)); + hash + }) + } +} + +impl SigHashReusedValues for SigHashReusedValuesSync { + fn previous_outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { + if let Some(value) = self.previous_outputs_hash.load().as_ref() { + return **value; + } + let hash = set(); + self.previous_outputs_hash.rcu(|_| Arc::new(hash)); + hash + } + + fn sequences_hash(&self, set: impl Fn() -> Hash) -> Hash { + if let Some(value) = self.sequences_hash.load().as_ref() { + return **value; + } + let hash = set(); + self.sequences_hash.rcu(|_| Arc::new(hash)); + hash + } + + fn sig_op_counts_hash(&self, set: impl Fn() -> Hash) -> Hash { + if let Some(value) = self.sig_op_counts_hash.load().as_ref() { + return **value; + } + let hash = set(); + self.sig_op_counts_hash.rcu(|_| Arc::new(hash)); + hash + } + + fn outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { + if let Some(value) = self.outputs_hash.load().as_ref() { + return **value; + } + let hash = set(); + self.outputs_hash.rcu(|_| Arc::new(hash)); + hash + } +} + +pub fn previous_outputs_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &impl SigHashReusedValues) -> Hash { if hash_type.is_sighash_anyone_can_pay() { return ZERO_HASH; } - - if let Some(previous_outputs_hash) = reused_values.previous_outputs_hash { - previous_outputs_hash - } else { + let hash = || { let mut hasher = TransactionSigningHash::new(); for input in tx.inputs.iter() { hasher.update(input.previous_outpoint.transaction_id.as_bytes()); hasher.write_u32(input.previous_outpoint.index); } - let previous_outputs_hash = hasher.finalize(); - reused_values.previous_outputs_hash = Some(previous_outputs_hash); - previous_outputs_hash - } + hasher.finalize() + }; + reused_values.previous_outputs_hash(hash) } -pub fn sequences_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &mut SigHashReusedValues) -> Hash { +pub fn sequences_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &impl SigHashReusedValues) -> Hash { if hash_type.is_sighash_single() || hash_type.is_sighash_anyone_can_pay() || hash_type.is_sighash_none() { return ZERO_HASH; } - - if let Some(sequences_hash) = reused_values.sequences_hash { - sequences_hash - } else { + let hash = || { let mut hasher = TransactionSigningHash::new(); for input in tx.inputs.iter() { hasher.write_u64(input.sequence); } - let sequence_hash = hasher.finalize(); - reused_values.sequences_hash = Some(sequence_hash); - sequence_hash - } + hasher.finalize() + }; + reused_values.sequences_hash(hash) } -pub fn sig_op_counts_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &mut SigHashReusedValues) -> Hash { +pub fn sig_op_counts_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &impl SigHashReusedValues) -> Hash { if hash_type.is_sighash_anyone_can_pay() { return ZERO_HASH; } - if let Some(sig_op_counts_hash) = reused_values.sig_op_counts_hash { - sig_op_counts_hash - } else { + let hash = || { let mut hasher = TransactionSigningHash::new(); for input in tx.inputs.iter() { hasher.write_u8(input.sig_op_count); } - let sig_op_counts_hash = hasher.finalize(); - reused_values.sig_op_counts_hash = Some(sig_op_counts_hash); - sig_op_counts_hash - } + hasher.finalize() + }; + reused_values.sig_op_counts_hash(hash) } pub fn payload_hash(tx: &Transaction) -> Hash { @@ -92,7 +195,7 @@ pub fn payload_hash(tx: &Transaction) -> Hash { hasher.finalize() } -pub fn outputs_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &mut SigHashReusedValues, input_index: usize) -> Hash { +pub fn outputs_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &impl SigHashReusedValues, input_index: usize) -> Hash { if hash_type.is_sighash_none() { return ZERO_HASH; } @@ -107,19 +210,15 @@ pub fn outputs_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &mu hash_output(&mut hasher, &tx.outputs[input_index]); return hasher.finalize(); } - - // Otherwise, return hash of all outputs. Re-use hash if available. - if let Some(outputs_hash) = reused_values.outputs_hash { - outputs_hash - } else { + let hash = || { let mut hasher = TransactionSigningHash::new(); for output in tx.outputs.iter() { hash_output(&mut hasher, output); } - let outputs_hash = hasher.finalize(); - reused_values.outputs_hash = Some(outputs_hash); - outputs_hash - } + hasher.finalize() + }; + // Otherwise, return hash of all outputs. Re-use hash if available. + reused_values.outputs_hash(hash) } pub fn hash_outpoint(hasher: &mut impl Hasher, outpoint: TransactionOutpoint) { @@ -141,7 +240,7 @@ pub fn calc_schnorr_signature_hash( verifiable_tx: &impl VerifiableTransaction, input_index: usize, hash_type: SigHashType, - reused_values: &mut SigHashReusedValues, + reused_values: &impl SigHashReusedValues, ) -> Hash { let input = verifiable_tx.populated_input(input_index); let tx = verifiable_tx.tx(); @@ -170,7 +269,7 @@ pub fn calc_ecdsa_signature_hash( tx: &impl VerifiableTransaction, input_index: usize, hash_type: SigHashType, - reused_values: &mut SigHashReusedValues, + reused_values: &impl SigHashReusedValues, ) -> Hash { let hash = calc_schnorr_signature_hash(tx, input_index, hash_type, reused_values); let mut hasher = TransactionSigningHashECDSA::new(); @@ -573,9 +672,9 @@ mod tests { } } let populated_tx = PopulatedTransaction::new(&tx, entries); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); assert_eq!( - calc_schnorr_signature_hash(&populated_tx, test.input_index, test.hash_type, &mut reused_values).to_string(), + calc_schnorr_signature_hash(&populated_tx, test.input_index, test.hash_type, &reused_values).to_string(), test.expected_hash, "test {} failed", test.name diff --git a/consensus/core/src/sign.rs b/consensus/core/src/sign.rs index a40b949e35..1a87d03f17 100644 --- a/consensus/core/src/sign.rs +++ b/consensus/core/src/sign.rs @@ -1,6 +1,6 @@ use crate::{ hashing::{ - sighash::{calc_schnorr_signature_hash, SigHashReusedValues}, + sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}, sighash_type::{SigHashType, SIG_HASH_ALL}, }, tx::{SignableTransaction, VerifiableTransaction}, @@ -84,9 +84,9 @@ pub fn sign(mut signable_tx: SignableTransaction, schnorr_key: secp256k1::Keypai signable_tx.tx.inputs[i].sig_op_count = 1; } - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); for i in 0..signable_tx.tx.inputs.len() { - let sig_hash = calc_schnorr_signature_hash(&signable_tx.as_verifiable(), i, SIG_HASH_ALL, &mut reused_values); + let sig_hash = calc_schnorr_signature_hash(&signable_tx.as_verifiable(), i, SIG_HASH_ALL, &reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let sig: [u8; 64] = *schnorr_key.sign_schnorr(msg).as_ref(); // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) @@ -106,11 +106,11 @@ pub fn sign_with_multiple(mut mutable_tx: SignableTransaction, privkeys: Vec<[u8 mutable_tx.tx.inputs[i].sig_op_count = 1; } - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); for i in 0..mutable_tx.tx.inputs.len() { let script = mutable_tx.entries[i].as_ref().unwrap().script_public_key.script(); if let Some(schnorr_key) = map.get(script) { - let sig_hash = calc_schnorr_signature_hash(&mutable_tx.as_verifiable(), i, SIG_HASH_ALL, &mut reused_values); + let sig_hash = calc_schnorr_signature_hash(&mutable_tx.as_verifiable(), i, SIG_HASH_ALL, &reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let sig: [u8; 64] = *schnorr_key.sign_schnorr(msg).as_ref(); // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) @@ -132,12 +132,12 @@ pub fn sign_with_multiple_v2(mut mutable_tx: SignableTransaction, privkeys: &[[u map.insert(script_pub_key_script, schnorr_key); } - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); let mut additional_signatures_required = false; for i in 0..mutable_tx.tx.inputs.len() { let script = mutable_tx.entries[i].as_ref().unwrap().script_public_key.script(); if let Some(schnorr_key) = map.get(script) { - let sig_hash = calc_schnorr_signature_hash(&mutable_tx.as_verifiable(), i, SIG_HASH_ALL, &mut reused_values); + let sig_hash = calc_schnorr_signature_hash(&mutable_tx.as_verifiable(), i, SIG_HASH_ALL, &reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let sig: [u8; 64] = *schnorr_key.sign_schnorr(msg).as_ref(); // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) @@ -155,9 +155,9 @@ pub fn sign_with_multiple_v2(mut mutable_tx: SignableTransaction, privkeys: &[[u /// Sign a transaction input with a sighash_type using schnorr pub fn sign_input(tx: &impl VerifiableTransaction, input_index: usize, private_key: &[u8; 32], hash_type: SigHashType) -> Vec { - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); - let hash = calc_schnorr_signature_hash(tx, input_index, hash_type, &mut reused_values); + let hash = calc_schnorr_signature_hash(tx, input_index, hash_type, &reused_values); let msg = secp256k1::Message::from_digest_slice(hash.as_bytes().as_slice()).unwrap(); let schnorr_key = secp256k1::Keypair::from_seckey_slice(secp256k1::SECP256K1, private_key).unwrap(); let sig: [u8; 64] = *schnorr_key.sign_schnorr(msg).as_ref(); @@ -167,7 +167,7 @@ pub fn sign_input(tx: &impl VerifiableTransaction, input_index: usize, private_k } pub fn verify(tx: &impl VerifiableTransaction) -> Result<(), Error> { - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); for (i, (input, entry)) in tx.populated_inputs().enumerate() { if input.signature_script.is_empty() { return Err(Error::Message(format!("Signature is empty for input: {i}"))); @@ -175,7 +175,7 @@ pub fn verify(tx: &impl VerifiableTransaction) -> Result<(), Error> { let pk = &entry.script_public_key.script()[1..33]; let pk = secp256k1::XOnlyPublicKey::from_slice(pk)?; let sig = secp256k1::schnorr::Signature::from_slice(&input.signature_script[1..65])?; - let sig_hash = calc_schnorr_signature_hash(tx, i, SIG_HASH_ALL, &mut reused_values); + let sig_hash = calc_schnorr_signature_hash(tx, i, SIG_HASH_ALL, &reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice())?; sig.verify(&msg, &pk)?; } diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index 88fee97bff..9af6879c7b 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -778,7 +778,10 @@ impl VirtualStateProcessor { let virtual_utxo_view = &virtual_read.utxo_set; let virtual_daa_score = virtual_state.daa_score; let virtual_past_median_time = virtual_state.past_median_time; - self.validate_mempool_transaction_impl(mutable_tx, virtual_utxo_view, virtual_daa_score, virtual_past_median_time, args) + // Run within the thread pool since par_iter might be internally applied to inputs + self.thread_pool.install(|| { + self.validate_mempool_transaction_impl(mutable_tx, virtual_utxo_view, virtual_daa_score, virtual_past_median_time, args) + }) } pub fn validate_mempool_transactions_in_parallel( diff --git a/consensus/src/processes/transaction_validator/transaction_validator_populated.rs b/consensus/src/processes/transaction_validator/transaction_validator_populated.rs index 4a8733d2be..dbf1aa37ea 100644 --- a/consensus/src/processes/transaction_validator/transaction_validator_populated.rs +++ b/consensus/src/processes/transaction_validator/transaction_validator_populated.rs @@ -1,18 +1,24 @@ use crate::constants::{MAX_SOMPI, SEQUENCE_LOCK_TIME_DISABLED, SEQUENCE_LOCK_TIME_MASK}; use kaspa_consensus_core::{ - hashing::sighash::SigHashReusedValues, + hashing::sighash::{SigHashReusedValuesSync, SigHashReusedValuesUnsync}, mass::Kip9Version, tx::{TransactionInput, VerifiableTransaction}, }; use kaspa_core::warn; -use kaspa_txscript::{get_sig_op_count, TxScriptEngine}; +use kaspa_txscript::{caches::Cache, get_sig_op_count, SigCacheKey, TxScriptEngine}; use kaspa_txscript_errors::TxScriptError; +use rayon::iter::{IntoParallelIterator, ParallelIterator}; +use rayon::ThreadPool; +use std::marker::Sync; use super::{ errors::{TxResult, TxRuleError}, TransactionValidator, }; +/// The threshold above which we apply parallelism to input script processing +const CHECK_SCRIPTS_PARALLELISM_THRESHOLD: usize = 1; + #[derive(Clone, Copy, PartialEq, Eq)] pub enum TxValidationFlags { /// Perform full validation including script verification @@ -29,7 +35,7 @@ pub enum TxValidationFlags { impl TransactionValidator { pub fn validate_populated_transaction_and_get_fee( &self, - tx: &impl VerifiableTransaction, + tx: &(impl VerifiableTransaction + Sync), pov_daa_score: u64, flags: TxValidationFlags, mass_and_feerate_threshold: Option<(u64, f64)>, @@ -48,8 +54,8 @@ impl TransactionValidator { } Self::check_sequence_lock(tx, pov_daa_score)?; - // The following call is not a consensus check (it could not be one in the first place since it uses floating number) - // but rather a mempool Replace by Fee validation rule. It was placed here purposely for avoiding unneeded script checks. + // The following call is not a consensus check (it could not be one in the first place since it uses a floating number) + // but rather a mempool Replace by Fee validation rule. It is placed here purposely for avoiding unneeded script checks. Self::check_feerate_threshold(fee, mass_and_feerate_threshold)?; match flags { @@ -158,7 +164,7 @@ impl TransactionValidator { fn check_sig_op_counts(tx: &T) -> TxResult<()> { for (i, (input, entry)) in tx.populated_inputs().enumerate() { - let calculated = get_sig_op_count::(&input.signature_script, &entry.script_public_key); + let calculated = get_sig_op_count::(&input.signature_script, &entry.script_public_key); if calculated != input.sig_op_count as u64 { return Err(TxRuleError::WrongSigOpCount(i, input.sig_op_count as u64, calculated)); } @@ -166,16 +172,45 @@ impl TransactionValidator { Ok(()) } - pub fn check_scripts(&self, tx: &impl VerifiableTransaction) -> TxResult<()> { - let mut reused_values = SigHashReusedValues::new(); - for (i, (input, entry)) in tx.populated_inputs().enumerate() { - let mut engine = TxScriptEngine::from_transaction_input(tx, input, i, entry, &mut reused_values, &self.sig_cache) - .map_err(|err| map_script_err(err, input))?; - engine.execute().map_err(|err| map_script_err(err, input))?; - } + pub fn check_scripts(&self, tx: &(impl VerifiableTransaction + Sync)) -> TxResult<()> { + check_scripts(&self.sig_cache, tx) + } +} - Ok(()) +pub fn check_scripts(sig_cache: &Cache, tx: &(impl VerifiableTransaction + Sync)) -> TxResult<()> { + if tx.inputs().len() > CHECK_SCRIPTS_PARALLELISM_THRESHOLD { + check_scripts_par_iter(sig_cache, tx) + } else { + check_scripts_sequential(sig_cache, tx) + } +} + +pub fn check_scripts_sequential(sig_cache: &Cache, tx: &impl VerifiableTransaction) -> TxResult<()> { + let reused_values = SigHashReusedValuesUnsync::new(); + for (i, (input, entry)) in tx.populated_inputs().enumerate() { + TxScriptEngine::from_transaction_input(tx, input, i, entry, &reused_values, sig_cache) + .and_then(|mut e| e.execute()) + .map_err(|err| map_script_err(err, input))?; } + Ok(()) +} + +pub fn check_scripts_par_iter(sig_cache: &Cache, tx: &(impl VerifiableTransaction + Sync)) -> TxResult<()> { + let reused_values = SigHashReusedValuesSync::new(); + (0..tx.inputs().len()).into_par_iter().try_for_each(|idx| { + let (input, utxo) = tx.populated_input(idx); + TxScriptEngine::from_transaction_input(tx, input, idx, utxo, &reused_values, sig_cache) + .and_then(|mut e| e.execute()) + .map_err(|err| map_script_err(err, input)) + }) +} + +pub fn check_scripts_par_iter_pool( + sig_cache: &Cache, + tx: &(impl VerifiableTransaction + Sync), + pool: &ThreadPool, +) -> TxResult<()> { + pool.install(|| check_scripts_par_iter(sig_cache, tx)) } fn map_script_err(script_err: TxScriptError, input: &TransactionInput) -> TxRuleError { @@ -189,6 +224,7 @@ fn map_script_err(script_err: TxScriptError, input: &TransactionInput) -> TxRule #[cfg(test)] mod tests { use super::super::errors::TxRuleError; + use super::CHECK_SCRIPTS_PARALLELISM_THRESHOLD; use core::str::FromStr; use itertools::Itertools; use kaspa_consensus_core::sign::sign; @@ -202,6 +238,15 @@ mod tests { use crate::{params::MAINNET_PARAMS, processes::transaction_validator::TransactionValidator}; + /// Helper function to duplicate the last input + fn duplicate_input(tx: &Transaction, entries: &[UtxoEntry]) -> (Transaction, Vec) { + let mut tx2 = tx.clone(); + let mut entries2 = entries.to_owned(); + tx2.inputs.push(tx2.inputs.last().unwrap().clone()); + entries2.push(entries2.last().unwrap().clone()); + (tx2, entries2) + } + #[test] fn check_signature_test() { let mut params = MAINNET_PARAMS.clone(); @@ -261,6 +306,14 @@ mod tests { ); tv.check_scripts(&populated_tx).expect("Signature check failed"); + + // Test a tx with 2 inputs to cover parallelism split points in inner script checking code + let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); + // Duplicated sigs should fail due to wrong sighash + assert_eq!( + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2)), + Err(TxRuleError::SignatureInvalid(TxScriptError::EvalFalse)) + ); } #[test] @@ -322,7 +375,18 @@ mod tests { }], ); - assert!(tv.check_scripts(&populated_tx).is_err(), "Failing Signature Test Failed"); + assert!(tv.check_scripts(&populated_tx).is_err(), "Expecting signature check to fail"); + + // Test a tx with 2 inputs to cover parallelism split points in inner script checking code + let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2)).expect_err("Expecting signature check to fail"); + + // Verify we are correctly testing the parallelism case (applied here as sanity for all tests) + assert!( + tx2.inputs.len() > CHECK_SCRIPTS_PARALLELISM_THRESHOLD, + "The script tests must cover the case of a tx with inputs.len() > {}", + CHECK_SCRIPTS_PARALLELISM_THRESHOLD + ); } #[test] @@ -385,6 +449,14 @@ mod tests { }], ); tv.check_scripts(&populated_tx).expect("Signature check failed"); + + // Test a tx with 2 inputs to cover parallelism split points in inner script checking code + let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); + // Duplicated sigs should fail due to wrong sighash + assert_eq!( + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2)), + Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail)) + ); } #[test] @@ -447,7 +519,14 @@ mod tests { }], ); - assert!(tv.check_scripts(&populated_tx) == Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail))); + assert_eq!(tv.check_scripts(&populated_tx), Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail))); + + // Test a tx with 2 inputs to cover parallelism split points in inner script checking code + let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); + assert_eq!( + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2)), + Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail)) + ); } #[test] @@ -510,7 +589,14 @@ mod tests { }], ); - assert!(tv.check_scripts(&populated_tx) == Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail))); + assert_eq!(tv.check_scripts(&populated_tx), Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail))); + + // Test a tx with 2 inputs to cover parallelism split points in inner script checking code + let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); + assert_eq!( + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2)), + Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail)) + ); } #[test] @@ -573,8 +659,14 @@ mod tests { }], ); - let result = tv.check_scripts(&populated_tx); - assert!(result == Err(TxRuleError::SignatureInvalid(TxScriptError::EvalFalse))); + assert_eq!(tv.check_scripts(&populated_tx), Err(TxRuleError::SignatureInvalid(TxScriptError::EvalFalse))); + + // Test a tx with 2 inputs to cover parallelism split points in inner script checking code + let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); + assert_eq!( + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2)), + Err(TxRuleError::SignatureInvalid(TxScriptError::EvalFalse)) + ); } #[test] @@ -628,8 +720,14 @@ mod tests { }], ); - let result = tv.check_scripts(&populated_tx); - assert!(result == Err(TxRuleError::SignatureInvalid(TxScriptError::SignatureScriptNotPushOnly))); + assert_eq!(tv.check_scripts(&populated_tx), Err(TxRuleError::SignatureInvalid(TxScriptError::SignatureScriptNotPushOnly))); + + // Test a tx with 2 inputs to cover parallelism split points in inner script checking code + let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); + assert_eq!( + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2)), + Err(TxRuleError::SignatureInvalid(TxScriptError::SignatureScriptNotPushOnly)) + ); } #[test] diff --git a/consensus/wasm/src/utils.rs b/consensus/wasm/src/utils.rs index 0139b573f5..b70664e1e6 100644 --- a/consensus/wasm/src/utils.rs +++ b/consensus/wasm/src/utils.rs @@ -1,5 +1,5 @@ use crate::result::Result; -use kaspa_consensus_core::hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValues}; +use kaspa_consensus_core::hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}; use kaspa_consensus_core::hashing::sighash_type::SIG_HASH_ALL; use kaspa_consensus_core::tx; @@ -9,9 +9,9 @@ pub fn script_hashes(mut mutable_tx: tx::SignableTransaction) -> Result Option { self.map.read().get(key).cloned().inspect(|_data| { self.counters.get_counts.fetch_add(1, Ordering::Relaxed); diff --git a/crypto/txscript/src/lib.rs b/crypto/txscript/src/lib.rs index b145fb90e5..f177962721 100644 --- a/crypto/txscript/src/lib.rs +++ b/crypto/txscript/src/lib.rs @@ -45,6 +45,8 @@ pub const MAX_PUB_KEYS_PER_MUTLTISIG: i32 = 20; // Note that this includes OP_RESERVED which counts as a push operation. pub const NO_COST_OPCODE: u8 = 0x60; +type DynOpcodeImplementation = Box>; + #[derive(Clone, Hash, PartialEq, Eq)] enum Signature { Secp256k1(secp256k1::schnorr::Signature), @@ -70,15 +72,14 @@ enum ScriptSource<'a, T: VerifiableTransaction> { StandAloneScripts(Vec<&'a [u8]>), } -pub struct TxScriptEngine<'a, T: VerifiableTransaction> { +pub struct TxScriptEngine<'a, T: VerifiableTransaction, Reused: SigHashReusedValues> { dstack: Stack, astack: Stack, script_source: ScriptSource<'a, T>, // Outer caches for quicker calculation - // TODO:: make it compatible with threading - reused_values: &'a mut SigHashReusedValues, + reused_values: &'a Reused, sig_cache: &'a Cache, cond_stack: Vec, // Following if stacks, and whether it is running @@ -86,30 +87,35 @@ pub struct TxScriptEngine<'a, T: VerifiableTransaction> { num_ops: i32, } -fn parse_script( +fn parse_script( script: &[u8], -) -> impl Iterator>, TxScriptError>> + '_ { +) -> impl Iterator, TxScriptError>> + '_ { script.iter().batching(|it| deserialize_next_opcode(it)) } -pub fn get_sig_op_count(signature_script: &[u8], prev_script_public_key: &ScriptPublicKey) -> u64 { +pub fn get_sig_op_count( + signature_script: &[u8], + prev_script_public_key: &ScriptPublicKey, +) -> u64 { let is_p2sh = ScriptClass::is_pay_to_script_hash(prev_script_public_key.script()); - let script_pub_key_ops = parse_script::(prev_script_public_key.script()).collect_vec(); + let script_pub_key_ops = parse_script::(prev_script_public_key.script()).collect_vec(); if !is_p2sh { return get_sig_op_count_by_opcodes(&script_pub_key_ops); } - let signature_script_ops = parse_script::(signature_script).collect_vec(); + let signature_script_ops = parse_script::(signature_script).collect_vec(); if signature_script_ops.is_empty() || signature_script_ops.iter().any(|op| op.is_err() || !op.as_ref().unwrap().is_push_opcode()) { return 0; } let p2sh_script = signature_script_ops.last().expect("checked if empty above").as_ref().expect("checked if err above").get_data(); - let p2sh_ops = parse_script::(p2sh_script).collect_vec(); + let p2sh_ops = parse_script::(p2sh_script).collect_vec(); get_sig_op_count_by_opcodes(&p2sh_ops) } -fn get_sig_op_count_by_opcodes(opcodes: &[Result>, TxScriptError>]) -> u64 { +fn get_sig_op_count_by_opcodes( + opcodes: &[Result, TxScriptError>], +) -> u64 { // TODO: Check for overflows let mut num_sigs: u64 = 0; for (i, op) in opcodes.iter().enumerate() { @@ -142,12 +148,12 @@ fn get_sig_op_count_by_opcodes(opcodes: &[Result(script: &[u8]) -> bool { - parse_script::(script).enumerate().any(|(index, op)| op.is_err() || (index == 0 && op.unwrap().value() == OpReturn)) +pub fn is_unspendable(script: &[u8]) -> bool { + parse_script::(script).enumerate().any(|(index, op)| op.is_err() || (index == 0 && op.unwrap().value() == OpReturn)) } -impl<'a, T: VerifiableTransaction> TxScriptEngine<'a, T> { - pub fn new(reused_values: &'a mut SigHashReusedValues, sig_cache: &'a Cache) -> Self { +impl<'a, T: VerifiableTransaction, Reused: SigHashReusedValues> TxScriptEngine<'a, T, Reused> { + pub fn new(reused_values: &'a Reused, sig_cache: &'a Cache) -> Self { Self { dstack: vec![], astack: vec![], @@ -164,7 +170,7 @@ impl<'a, T: VerifiableTransaction> TxScriptEngine<'a, T> { input: &'a TransactionInput, input_idx: usize, utxo_entry: &'a UtxoEntry, - reused_values: &'a mut SigHashReusedValues, + reused_values: &'a Reused, sig_cache: &'a Cache, ) -> Result { let script_public_key = utxo_entry.script_public_key.script(); @@ -185,7 +191,7 @@ impl<'a, T: VerifiableTransaction> TxScriptEngine<'a, T> { } } - pub fn from_script(script: &'a [u8], reused_values: &'a mut SigHashReusedValues, sig_cache: &'a Cache) -> Self { + pub fn from_script(script: &'a [u8], reused_values: &'a Reused, sig_cache: &'a Cache) -> Self { Self { dstack: Default::default(), astack: Default::default(), @@ -202,7 +208,7 @@ impl<'a, T: VerifiableTransaction> TxScriptEngine<'a, T> { return self.cond_stack.is_empty() || *self.cond_stack.last().expect("Checked not empty") == OpCond::True; } - fn execute_opcode(&mut self, opcode: Box>) -> Result<(), TxScriptError> { + fn execute_opcode(&mut self, opcode: DynOpcodeImplementation) -> Result<(), TxScriptError> { // Different from kaspad: Illegal and disabled opcode are checked on execute instead // Note that this includes OP_RESERVED which counts as a push operation. if !opcode.is_push_opcode() { @@ -512,6 +518,7 @@ mod tests { use crate::opcodes::codes::{OpBlake2b, OpCheckSig, OpData1, OpData2, OpData32, OpDup, OpEqual, OpPushData1, OpTrue}; use super::*; + use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use kaspa_consensus_core::tx::{ PopulatedTransaction, ScriptPublicKey, Transaction, TransactionId, TransactionOutpoint, TransactionOutput, }; @@ -542,7 +549,7 @@ mod tests { fn run_test_script_cases(test_cases: Vec) { let sig_cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); for test in test_cases { // Ensure encapsulation of variables (no leaking between tests) @@ -565,7 +572,7 @@ mod tests { let populated_tx = PopulatedTransaction::new(&tx, vec![utxo_entry.clone()]); - let mut vm = TxScriptEngine::from_transaction_input(&populated_tx, &input, 0, &utxo_entry, &mut reused_values, &sig_cache) + let mut vm = TxScriptEngine::from_transaction_input(&populated_tx, &input, 0, &utxo_entry, &reused_values, &sig_cache) .expect("Script creation failed"); assert_eq!(vm.execute(), test.expected_result); } @@ -783,7 +790,7 @@ mod tests { ]; for test in test_cases { - let check = TxScriptEngine::::check_pub_key_encoding(test.key); + let check = TxScriptEngine::::check_pub_key_encoding(test.key); if test.is_valid { assert_eq!( check, @@ -880,7 +887,10 @@ mod tests { for test in tests { assert_eq!( - get_sig_op_count::(test.signature_script, &test.prev_script_public_key), + get_sig_op_count::( + test.signature_script, + &test.prev_script_public_key + ), test.expected_sig_ops, "failed for '{}'", test.name @@ -909,7 +919,7 @@ mod tests { for test in tests { assert_eq!( - is_unspendable::(test.script_public_key), + is_unspendable::(test.script_public_key), test.expected, "failed for '{}'", test.name @@ -929,6 +939,7 @@ mod bitcoind_tests { use super::*; use crate::script_builder::ScriptBuilderError; use kaspa_consensus_core::constants::MAX_TX_IN_SEQUENCE_NUM; + use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use kaspa_consensus_core::tx::{ PopulatedTransaction, ScriptPublicKey, Transaction, TransactionId, TransactionOutpoint, TransactionOutput, }; @@ -1019,13 +1030,13 @@ mod bitcoind_tests { // Run transaction let sig_cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); let mut vm = TxScriptEngine::from_transaction_input( &populated_tx, &populated_tx.tx().inputs[0], 0, &populated_tx.entries[0], - &mut reused_values, + &reused_values, &sig_cache, ) .map_err(UnifiedError::TxScriptError)?; diff --git a/crypto/txscript/src/opcodes/macros.rs b/crypto/txscript/src/opcodes/macros.rs index b3db98829a..c4d161d400 100644 --- a/crypto/txscript/src/opcodes/macros.rs +++ b/crypto/txscript/src/opcodes/macros.rs @@ -6,9 +6,9 @@ macro_rules! opcode_serde { [[self.value()].as_slice(), length.to_le_bytes().as_slice(), self.data.as_slice()].concat() } - fn deserialize<'i, I: Iterator, T: VerifiableTransaction>( + fn deserialize<'i, I: Iterator, T: VerifiableTransaction, Reused: SigHashReusedValues>( it: &mut I, - ) -> Result>, TxScriptError> { + ) -> Result>, TxScriptError> { match it.take(size_of::<$type>()).copied().collect::>().try_into() { Ok(bytes) => { let length = <$type>::from_le_bytes(bytes) as usize; @@ -32,9 +32,9 @@ macro_rules! opcode_serde { [[self.value()].as_slice(), self.data.clone().as_slice()].concat() } - fn deserialize<'i, I: Iterator, T: VerifiableTransaction>( + fn deserialize<'i, I: Iterator, T: VerifiableTransaction, Reused: SigHashReusedValues>( it: &mut I, - ) -> Result>, TxScriptError> { + ) -> Result>, TxScriptError> { // Static length includes the opcode itself let data: Vec = it.take($length - 1).copied().collect(); Self::new(data) @@ -44,7 +44,7 @@ macro_rules! opcode_serde { macro_rules! opcode_init { ($type:ty) => { - fn new(data: Vec) -> Result>, TxScriptError> { + fn new(data: Vec) -> Result>, TxScriptError> { if data.len() > <$type>::MAX as usize { return Err(TxScriptError::MalformedPush(<$type>::MAX as usize, data.len())); } @@ -52,7 +52,7 @@ macro_rules! opcode_init { } }; ($length: literal) => { - fn new(data: Vec) -> Result>, TxScriptError> { + fn new(data: Vec) -> Result>, TxScriptError> { if data.len() != $length - 1 { return Err(TxScriptError::MalformedPush($length - 1, data.len())); } @@ -69,20 +69,20 @@ macro_rules! opcode_impl { opcode_serde!($length); } - impl OpCodeExecution for $name { - fn empty() -> Result>, TxScriptError> { + impl OpCodeExecution for $name { + fn empty() -> Result>, TxScriptError> { Self::new(vec![]) } opcode_init!($length); #[allow(unused_variables)] - fn execute(&$self, $vm: &mut TxScriptEngine) -> OpCodeResult { + fn execute(&$self, $vm: &mut TxScriptEngine) -> OpCodeResult { $code } } - impl OpCodeImplementation for $name {} + impl OpCodeImplementation for $name {} } } @@ -111,7 +111,7 @@ macro_rules! opcode_list { )? )* - pub fn deserialize_next_opcode<'i, I: Iterator, T: VerifiableTransaction>(it: &mut I) -> Option>, TxScriptError>> { + pub fn deserialize_next_opcode<'i, I: Iterator, T: VerifiableTransaction, Reused: SigHashReusedValues>(it: &mut I) -> Option>, TxScriptError>> { match it.next() { Some(opcode_num) => match opcode_num { $( diff --git a/crypto/txscript/src/opcodes/mod.rs b/crypto/txscript/src/opcodes/mod.rs index ad800d2488..f2a92fa0b5 100644 --- a/crypto/txscript/src/opcodes/mod.rs +++ b/crypto/txscript/src/opcodes/mod.rs @@ -8,6 +8,7 @@ use crate::{ }; use blake2b_simd::Params; use core::cmp::{max, min}; +use kaspa_consensus_core::hashing::sighash::SigHashReusedValues; use kaspa_consensus_core::hashing::sighash_type::SigHashType; use kaspa_consensus_core::tx::VerifiableTransaction; use sha2::{Digest, Sha256}; @@ -73,28 +74,31 @@ pub trait OpCodeMetadata: Debug { } } -pub trait OpCodeExecution { - fn empty() -> Result>, TxScriptError> +pub trait OpCodeExecution { + fn empty() -> Result>, TxScriptError> where Self: Sized; #[allow(clippy::new_ret_no_self)] - fn new(data: Vec) -> Result>, TxScriptError> + fn new(data: Vec) -> Result>, TxScriptError> where Self: Sized; - fn execute(&self, vm: &mut TxScriptEngine) -> OpCodeResult; + fn execute(&self, vm: &mut TxScriptEngine) -> OpCodeResult; } pub trait OpcodeSerialization { fn serialize(&self) -> Vec; - fn deserialize<'i, I: Iterator, T: VerifiableTransaction>( + fn deserialize<'i, I: Iterator, T: VerifiableTransaction, Reused: SigHashReusedValues>( it: &mut I, - ) -> Result>, TxScriptError> + ) -> Result>, TxScriptError> where Self: Sized; } -pub trait OpCodeImplementation: OpCodeExecution + OpCodeMetadata + OpcodeSerialization {} +pub trait OpCodeImplementation: + OpCodeExecution + OpCodeMetadata + OpcodeSerialization +{ +} impl OpCodeMetadata for OpCode { fn value(&self) -> u8 { @@ -193,13 +197,19 @@ impl OpCodeMetadata for OpCode { // Helpers for some opcodes with shared data #[inline] -fn push_data(data: Vec, vm: &mut TxScriptEngine) -> OpCodeResult { +fn push_data( + data: Vec, + vm: &mut TxScriptEngine, +) -> OpCodeResult { vm.dstack.push(data); Ok(()) } #[inline] -fn push_number(number: i64, vm: &mut TxScriptEngine) -> OpCodeResult { +fn push_number( + number: i64, + vm: &mut TxScriptEngine, +) -> OpCodeResult { vm.dstack.push_item(number); Ok(()) } @@ -958,7 +968,7 @@ opcode_list! { // converts an opcode from the list of Op0 to Op16 to its associated value #[allow(clippy::borrowed_box)] -pub fn to_small_int(opcode: &Box>) -> u8 { +pub fn to_small_int(opcode: &Box>) -> u8 { let value = opcode.value(); if value == codes::OpFalse { return 0; @@ -976,7 +986,7 @@ mod test { use crate::{opcodes, pay_to_address_script, TxScriptEngine, TxScriptError, LOCK_TIME_THRESHOLD}; use kaspa_addresses::{Address, Prefix, Version}; use kaspa_consensus_core::constants::{SOMPI_PER_KASPA, TX_VERSION}; - use kaspa_consensus_core::hashing::sighash::SigHashReusedValues; + use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use kaspa_consensus_core::subnets::SUBNETWORK_ID_NATIVE; use kaspa_consensus_core::tx::{ PopulatedTransaction, ScriptPublicKey, Transaction, TransactionInput, TransactionOutpoint, TransactionOutput, UtxoEntry, @@ -985,21 +995,21 @@ mod test { struct TestCase<'a> { init: Stack, - code: Box>>, + code: Box, SigHashReusedValuesUnsync>>, dstack: Stack, } struct ErrorTestCase<'a> { init: Stack, - code: Box>>, + code: Box, SigHashReusedValuesUnsync>>, error: TxScriptError, } fn run_success_test_cases(tests: Vec) { let cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); for TestCase { init, code, dstack } in tests { - let mut vm = TxScriptEngine::new(&mut reused_values, &cache); + let mut vm = TxScriptEngine::new(&reused_values, &cache); vm.dstack = init; code.execute(&mut vm).unwrap_or_else(|_| panic!("Opcode {} should not fail", code.value())); assert_eq!(*vm.dstack, dstack, "OpCode {} Pushed wrong value", code.value()); @@ -1008,9 +1018,9 @@ mod test { fn run_error_test_cases(tests: Vec) { let cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); for ErrorTestCase { init, code, error } in tests { - let mut vm = TxScriptEngine::new(&mut reused_values, &cache); + let mut vm = TxScriptEngine::new(&reused_values, &cache); vm.dstack.clone_from(&init); assert_eq!( code.execute(&mut vm) @@ -1025,7 +1035,7 @@ mod test { #[test] fn test_opcode_disabled() { - let tests: Vec>> = vec![ + let tests: Vec>> = vec![ opcodes::OpCat::empty().expect("Should accept empty"), opcodes::OpSubStr::empty().expect("Should accept empty"), opcodes::OpLeft::empty().expect("Should accept empty"), @@ -1044,8 +1054,8 @@ mod test { ]; let cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); - let mut vm = TxScriptEngine::new(&mut reused_values, &cache); + let reused_values = SigHashReusedValuesUnsync::new(); + let mut vm = TxScriptEngine::new(&reused_values, &cache); for pop in tests { match pop.execute(&mut vm) { @@ -1057,7 +1067,7 @@ mod test { #[test] fn test_opcode_reserved() { - let tests: Vec>> = vec![ + let tests: Vec>> = vec![ opcodes::OpReserved::empty().expect("Should accept empty"), opcodes::OpVer::empty().expect("Should accept empty"), opcodes::OpVerIf::empty().expect("Should accept empty"), @@ -1067,8 +1077,8 @@ mod test { ]; let cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); - let mut vm = TxScriptEngine::new(&mut reused_values, &cache); + let reused_values = SigHashReusedValuesUnsync::new(); + let mut vm = TxScriptEngine::new(&reused_values, &cache); for pop in tests { match pop.execute(&mut vm) { @@ -1080,7 +1090,7 @@ mod test { #[test] fn test_opcode_invalid() { - let tests: Vec>> = vec![ + let tests: Vec>> = vec![ opcodes::OpUnknown166::empty().expect("Should accept empty"), opcodes::OpUnknown167::empty().expect("Should accept empty"), opcodes::OpUnknown178::empty().expect("Should accept empty"), @@ -1158,8 +1168,8 @@ mod test { ]; let cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); - let mut vm = TxScriptEngine::new(&mut reused_values, &cache); + let reused_values = SigHashReusedValuesUnsync::new(); + let mut vm = TxScriptEngine::new(&reused_values, &cache); for pop in tests { match pop.execute(&mut vm) { @@ -2739,7 +2749,7 @@ mod test { let (base_tx, input, utxo_entry) = make_mock_transaction(1); let sig_cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); let code = opcodes::OpCheckLockTimeVerify::empty().expect("Should accept empty"); @@ -2751,7 +2761,7 @@ mod test { ] { let mut tx = base_tx.clone(); tx.0.lock_time = tx_lock_time; - let mut vm = TxScriptEngine::from_transaction_input(&tx, &input, 0, &utxo_entry, &mut reused_values, &sig_cache) + let mut vm = TxScriptEngine::from_transaction_input(&tx, &input, 0, &utxo_entry, &reused_values, &sig_cache) .expect("Shouldn't fail"); vm.dstack = vec![lock_time.clone()]; match code.execute(&mut vm) { @@ -2781,7 +2791,7 @@ mod test { let (tx, base_input, utxo_entry) = make_mock_transaction(1); let sig_cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); let code = opcodes::OpCheckSequenceVerify::empty().expect("Should accept empty"); @@ -2794,7 +2804,7 @@ mod test { ] { let mut input = base_input.clone(); input.sequence = tx_sequence; - let mut vm = TxScriptEngine::from_transaction_input(&tx, &input, 0, &utxo_entry, &mut reused_values, &sig_cache) + let mut vm = TxScriptEngine::from_transaction_input(&tx, &input, 0, &utxo_entry, &reused_values, &sig_cache) .expect("Shouldn't fail"); vm.dstack = vec![sequence.clone()]; match code.execute(&mut vm) { diff --git a/crypto/txscript/src/standard/multisig.rs b/crypto/txscript/src/standard/multisig.rs index 79c74c7b37..cbd9dbe6da 100644 --- a/crypto/txscript/src/standard/multisig.rs +++ b/crypto/txscript/src/standard/multisig.rs @@ -74,7 +74,7 @@ mod tests { use core::str::FromStr; use kaspa_consensus_core::{ hashing::{ - sighash::{calc_ecdsa_signature_hash, calc_schnorr_signature_hash, SigHashReusedValues}, + sighash::{calc_ecdsa_signature_hash, calc_schnorr_signature_hash, SigHashReusedValuesUnsync}, sighash_type::SIG_HASH_ALL, }, subnets::SubnetworkId, @@ -154,11 +154,11 @@ mod tests { }]; let mut tx = MutableTransaction::with_entries(tx, entries); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); let sig_hash = if !is_ecdsa { - calc_schnorr_signature_hash(&tx.as_verifiable(), 0, SIG_HASH_ALL, &mut reused_values) + calc_schnorr_signature_hash(&tx.as_verifiable(), 0, SIG_HASH_ALL, &reused_values) } else { - calc_ecdsa_signature_hash(&tx.as_verifiable(), 0, SIG_HASH_ALL, &mut reused_values) + calc_ecdsa_signature_hash(&tx.as_verifiable(), 0, SIG_HASH_ALL, &reused_values) }; let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let signatures: Vec<_> = inputs @@ -184,7 +184,7 @@ mod tests { let (input, entry) = tx.populated_inputs().next().unwrap(); let cache = Cache::new(10_000); - let mut engine = TxScriptEngine::from_transaction_input(&tx, input, 0, entry, &mut reused_values, &cache).unwrap(); + let mut engine = TxScriptEngine::from_transaction_input(&tx, input, 0, entry, &reused_values, &cache).unwrap(); assert_eq!(engine.execute().is_ok(), is_ok); } #[test] diff --git a/mining/src/mempool/check_transaction_standard.rs b/mining/src/mempool/check_transaction_standard.rs index e759a9e50c..060677a1e5 100644 --- a/mining/src/mempool/check_transaction_standard.rs +++ b/mining/src/mempool/check_transaction_standard.rs @@ -2,6 +2,7 @@ use crate::mempool::{ errors::{NonStandardError, NonStandardResult}, Mempool, }; +use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use kaspa_consensus_core::{ constants::{MAX_SCRIPT_PUBLIC_KEY_VERSION, MAX_SOMPI}, mass, @@ -114,7 +115,7 @@ impl Mempool { /// It is exposed by [MiningManager] for use by transaction generators and wallets. pub(crate) fn is_transaction_output_dust(&self, transaction_output: &TransactionOutput) -> bool { // Unspendable outputs are considered dust. - if is_unspendable::(transaction_output.script_public_key.script()) { + if is_unspendable::(transaction_output.script_public_key.script()) { return true; } @@ -175,7 +176,6 @@ impl Mempool { if contextual_mass > MAXIMUM_STANDARD_TRANSACTION_MASS { return Err(NonStandardError::RejectContextualMass(transaction_id, contextual_mass, MAXIMUM_STANDARD_TRANSACTION_MASS)); } - for (i, input) in transaction.tx.inputs.iter().enumerate() { // It is safe to elide existence and index checks here since // they have already been checked prior to calling this @@ -188,7 +188,10 @@ impl Mempool { ScriptClass::PubKey => {} ScriptClass::PubKeyECDSA => {} ScriptClass::ScriptHash => { - get_sig_op_count::(&input.signature_script, &entry.script_public_key); + get_sig_op_count::( + &input.signature_script, + &entry.script_public_key, + ); let num_sig_ops = 1; if num_sig_ops > MAX_STANDARD_P2SH_SIG_OPS { return Err(NonStandardError::RejectSignatureCount(transaction_id, i, num_sig_ops, MAX_STANDARD_P2SH_SIG_OPS)); diff --git a/wallet/core/src/account/pskb.rs b/wallet/core/src/account/pskb.rs index e71d7e4796..fad6bdb4ab 100644 --- a/wallet/core/src/account/pskb.rs +++ b/wallet/core/src/account/pskb.rs @@ -9,7 +9,7 @@ use crate::tx::PaymentOutputs; use futures::stream; use kaspa_bip32::{DerivationPath, KeyFingerprint, PrivateKey}; use kaspa_consensus_client::UtxoEntry as ClientUTXO; -use kaspa_consensus_core::hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValues}; +use kaspa_consensus_core::hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}; use kaspa_consensus_core::tx::VerifiableTransaction; use kaspa_consensus_core::tx::{TransactionInput, UtxoEntry}; use kaspa_txscript::extract_script_pub_key_address; @@ -160,7 +160,7 @@ pub async fn pskb_signer_for_address( key_fingerprint: KeyFingerprint, ) -> Result { let mut signed_bundle = Bundle::new(); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); // If set, sign-for address is used for signing. // Else, all addresses from inputs are. @@ -186,7 +186,7 @@ pub async fn pskb_signer_for_address( for pskt_inner in bundle.iter().cloned() { let pskt: PSKT = PSKT::from(pskt_inner); - let mut sign = |signer_pskt: PSKT| { + let sign = |signer_pskt: PSKT| { signer_pskt .pass_signature_sync(|tx, sighash| -> Result, String> { tx.tx @@ -194,7 +194,7 @@ pub async fn pskb_signer_for_address( .iter() .enumerate() .map(|(idx, _input)| { - let hash = calc_schnorr_signature_hash(&tx.as_verifiable(), idx, sighash[idx], &mut reused_values); + let hash = calc_schnorr_signature_hash(&tx.as_verifiable(), idx, sighash[idx], &reused_values); let msg = secp256k1::Message::from_digest_slice(hash.as_bytes().as_slice()).unwrap(); // When address represents a locked UTXO, no private key is available. diff --git a/wallet/pskt/examples/multisig.rs b/wallet/pskt/examples/multisig.rs index fb011402fb..7a9ca190e5 100644 --- a/wallet/pskt/examples/multisig.rs +++ b/wallet/pskt/examples/multisig.rs @@ -1,5 +1,5 @@ use kaspa_consensus_core::{ - hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValues}, + hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}, tx::{TransactionId, TransactionOutpoint, UtxoEntry}, }; use kaspa_txscript::{multisig_redeem_script, opcodes::codes::OpData65, pay_to_script_hash_script, script_builder::ScriptBuilder}; @@ -51,8 +51,8 @@ fn main() { println!("Serialized after setting sequence: {}", ser_updated); let signer_pskt: PSKT = serde_json::from_str(&ser_updated).expect("Failed to deserialize"); - let mut reused_values = SigHashReusedValues::new(); - let mut sign = |signer_pskt: PSKT, kp: &Keypair| { + let reused_values = SigHashReusedValuesUnsync::new(); + let sign = |signer_pskt: PSKT, kp: &Keypair| { signer_pskt .pass_signature_sync(|tx, sighash| -> Result, String> { let tx = dbg!(tx); @@ -61,7 +61,7 @@ fn main() { .iter() .enumerate() .map(|(idx, _input)| { - let hash = calc_schnorr_signature_hash(&tx.as_verifiable(), idx, sighash[idx], &mut reused_values); + let hash = calc_schnorr_signature_hash(&tx.as_verifiable(), idx, sighash[idx], &reused_values); let msg = secp256k1::Message::from_digest_slice(hash.as_bytes().as_slice()).unwrap(); Ok(SignInputOk { signature: Signature::Schnorr(kp.sign_schnorr(msg)), diff --git a/wallet/pskt/src/pskt.rs b/wallet/pskt/src/pskt.rs index 73f87a628f..93c16ccc85 100644 --- a/wallet/pskt/src/pskt.rs +++ b/wallet/pskt/src/pskt.rs @@ -3,6 +3,7 @@ //! use kaspa_bip32::{secp256k1, DerivationPath, KeyFingerprint}; +use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use serde::{Deserialize, Serialize}; use serde_repr::{Deserialize_repr, Serialize_repr}; use std::{collections::BTreeMap, fmt::Display, fmt::Formatter, future::Future, marker::PhantomData, ops::Deref}; @@ -14,7 +15,7 @@ pub use crate::output::{Output, OutputBuilder}; pub use crate::role::{Combiner, Constructor, Creator, Extractor, Finalizer, Signer, Updater}; use kaspa_consensus_core::tx::UtxoEntry; use kaspa_consensus_core::{ - hashing::{sighash::SigHashReusedValues, sighash_type::SigHashType}, + hashing::sighash_type::SigHashType, subnets::SUBNETWORK_ID_NATIVE, tx::{MutableTransaction, SignableTransaction, Transaction, TransactionId, TransactionInput, TransactionOutput}, }; @@ -432,10 +433,10 @@ impl PSKT { { let tx = tx.as_verifiable(); let cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); tx.populated_inputs().enumerate().try_for_each(|(idx, (input, entry))| { - TxScriptEngine::from_transaction_input(&tx, input, idx, entry, &mut reused_values, &cache)?.execute()?; + TxScriptEngine::from_transaction_input(&tx, input, idx, entry, &reused_values, &cache)?.execute()?; >::Ok(()) })?; } From c59a0d1e7d54b732d5626fb0ef56fcb76eb98c62 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Sat, 12 Oct 2024 12:32:55 -0600 Subject: [PATCH 15/31] Parallelize MuHash calculations (#575) * Parallelize MuHash calculations MuHash calculations are additive and can be done in chunks then later combined * Reimplement validate tx with muhash as a separate fn * Use smallvec for muhash parallel Co-authored-by: Michael Sutton * Add independent rayon order test * Filter some data * Use tuple_windows for test iter --------- Co-authored-by: Michael Sutton --- consensus/src/consensus/mod.rs | 19 +++- .../virtual_processor/utxo_validation.rs | 96 ++++++++++++++++++- 2 files changed, 110 insertions(+), 5 deletions(-) diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index 1731729a32..fd352fe28c 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -81,6 +81,7 @@ use kaspa_database::prelude::StoreResultExtensions; use kaspa_hashes::Hash; use kaspa_muhash::MuHash; use kaspa_txscript::caches::TxScriptCacheCounters; +use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use std::{ cmp::Reverse, @@ -771,9 +772,21 @@ impl ConsensusApi for Consensus { fn append_imported_pruning_point_utxos(&self, utxoset_chunk: &[(TransactionOutpoint, UtxoEntry)], current_multiset: &mut MuHash) { let mut pruning_utxoset_write = self.pruning_utxoset_stores.write(); pruning_utxoset_write.utxo_set.write_many(utxoset_chunk).unwrap(); - for (outpoint, entry) in utxoset_chunk { - current_multiset.add_utxo(outpoint, entry); - } + + // Parallelize processing + let inner_multiset = utxoset_chunk + .par_iter() + .map(|(outpoint, entry)| { + let mut inner_multiset = MuHash::new(); + inner_multiset.add_utxo(outpoint, entry); + inner_multiset + }) + .reduce(MuHash::new, |mut a, b| { + a.combine(&b); + a + }); + + current_multiset.combine(&inner_multiset); } fn import_pruning_point_utxo_set(&self, new_pruning_point: Hash, imported_utxo_multiset: MuHash) -> PruningImportResult<()> { diff --git a/consensus/src/pipeline/virtual_processor/utxo_validation.rs b/consensus/src/pipeline/virtual_processor/utxo_validation.rs index 306f81446c..454722dca4 100644 --- a/consensus/src/pipeline/virtual_processor/utxo_validation.rs +++ b/consensus/src/pipeline/virtual_processor/utxo_validation.rs @@ -31,6 +31,7 @@ use kaspa_muhash::MuHash; use kaspa_utils::refs::Refs; use rayon::prelude::*; +use smallvec::{smallvec, SmallVec}; use std::{iter::once, ops::Deref}; /// A context for processing the UTXO state of a block with respect to its selected parent. @@ -95,12 +96,14 @@ impl VirtualStateProcessor { // No need to fully validate selected parent transactions since selected parent txs were already validated // as part of selected parent UTXO state verification with the exact same UTXO context. let validation_flags = if is_selected_parent { TxValidationFlags::SkipScriptChecks } else { TxValidationFlags::Full }; - let validated_transactions = self.validate_transactions_in_parallel(&txs, &composed_view, pov_daa_score, validation_flags); + let (validated_transactions, inner_multiset) = + self.validate_transactions_with_muhash_in_parallel(&txs, &composed_view, pov_daa_score, validation_flags); + + ctx.multiset_hash.combine(&inner_multiset); let mut block_fee = 0u64; for (validated_tx, _) in validated_transactions.iter() { ctx.mergeset_diff.add_transaction(validated_tx, pov_daa_score).unwrap(); - ctx.multiset_hash.add_transaction(validated_tx, pov_daa_score); ctx.accepted_tx_ids.push(validated_tx.id()); block_fee += validated_tx.calculated_fee; } @@ -229,6 +232,38 @@ impl VirtualStateProcessor { }) } + /// Same as validate_transactions_in_parallel except during the iteration this will also + /// calculate the muhash in parallel for valid transactions + pub(crate) fn validate_transactions_with_muhash_in_parallel<'a, V: UtxoView + Sync>( + &self, + txs: &'a Vec, + utxo_view: &V, + pov_daa_score: u64, + flags: TxValidationFlags, + ) -> (SmallVec<[(ValidatedTransaction<'a>, u32); 2]>, MuHash) { + self.thread_pool.install(|| { + txs + .par_iter() // We can do this in parallel without complications since block body validation already ensured + // that all txs within each block are independent + .enumerate() + .skip(1) // Skip the coinbase tx. + .filter_map(|(i, tx)| self.validate_transaction_in_utxo_context(tx, &utxo_view, pov_daa_score, flags).ok().map(|vtx| { + let mut mh = MuHash::new(); + mh.add_transaction(&vtx, pov_daa_score); + (smallvec![(vtx, i as u32)], mh) + } + )) + .reduce( + || (smallvec![], MuHash::new()), + |mut a, mut b| { + a.0.append(&mut b.0); + a.1.combine(&b.1); + a + }, + ) + }) + } + /// Attempts to populate the transaction with UTXO entries and performs all utxo-related tx validations pub(super) fn validate_transaction_in_utxo_context<'a>( &self, @@ -318,3 +353,60 @@ impl VirtualStateProcessor { Ok(()) } } + +#[cfg(test)] +mod tests { + use itertools::Itertools; + + use super::*; + + #[test] + fn test_rayon_reduce_retains_order() { + // this is an independent test to replicate the behavior of + // validate_txs_in_parallel and validate_txs_with_muhash_in_parallel + // and assert that the order of data is retained when doing par_iter + let data: Vec = (1..=1000).collect(); + + let collected: Vec = data + .par_iter() + .filter_map(|a| { + let chance: f64 = rand::random(); + if chance < 0.05 { + return None; + } + Some(*a) + }) + .collect(); + + println!("collected len: {}", collected.len()); + + collected.iter().tuple_windows().for_each(|(prev, curr)| { + // Data was originally sorted, so we check if they remain sorted after filtering + assert!(prev < curr, "expected {} < {} if original sort was preserved", prev, curr); + }); + + let reduced: SmallVec<[u16; 2]> = data + .par_iter() + .filter_map(|a: &u16| { + let chance: f64 = rand::random(); + if chance < 0.05 { + return None; + } + Some(smallvec![*a]) + }) + .reduce( + || smallvec![], + |mut arr, mut curr_data| { + arr.append(&mut curr_data); + arr + }, + ); + + println!("reduced len: {}", reduced.len()); + + reduced.iter().tuple_windows().for_each(|(prev, curr)| { + // Data was originally sorted, so we check if they remain sorted after filtering + assert!(prev < curr, "expected {} < {} if original sort was preserved", prev, curr); + }); + } +} From 0df2de50c442304724a6031bb0a48193c57a95ab Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Sun, 13 Oct 2024 20:54:13 +0300 Subject: [PATCH 16/31] Muhash parallel reduce -- optimize U3072 mul when LHS = one (#581) * semantic: add `from` ext methods * muhash from txs benchmark * optimization: in u3072 mul test if lhs is one * extract `parallelism_in_power_steps` * comment --- consensus/Cargo.toml | 2 +- consensus/benches/check_scripts.rs | 4 +- consensus/benches/hash_benchmarks.rs | 15 ----- consensus/benches/parallel_muhash.rs | 66 +++++++++++++++++++ consensus/core/src/muhash.rs | 14 ++++ consensus/src/consensus/mod.rs | 10 +-- .../virtual_processor/utxo_validation.rs | 3 +- crypto/muhash/src/u3072.rs | 10 +++ utils/src/iter.rs | 6 ++ 9 files changed, 102 insertions(+), 28 deletions(-) delete mode 100644 consensus/benches/hash_benchmarks.rs create mode 100644 consensus/benches/parallel_muhash.rs diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index b9a183ea8c..443e591c8a 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -57,7 +57,7 @@ kaspa-txscript-errors.workspace = true kaspa-addresses.workspace = true [[bench]] -name = "hash_benchmarks" +name = "parallel_muhash" harness = false [[bench]] diff --git a/consensus/benches/check_scripts.rs b/consensus/benches/check_scripts.rs index d65ac63626..4a596da1b2 100644 --- a/consensus/benches/check_scripts.rs +++ b/consensus/benches/check_scripts.rs @@ -9,9 +9,9 @@ use kaspa_consensus_core::subnets::SubnetworkId; use kaspa_consensus_core::tx::{MutableTransaction, Transaction, TransactionInput, TransactionOutpoint, UtxoEntry}; use kaspa_txscript::caches::Cache; use kaspa_txscript::pay_to_address_script; +use kaspa_utils::iter::parallelism_in_power_steps; use rand::{thread_rng, Rng}; use secp256k1::Keypair; -use std::thread::available_parallelism; // You may need to add more detailed mocks depending on your actual code. fn mock_tx(inputs_count: usize, non_uniq_signatures: usize) -> (Transaction, Vec) { @@ -98,7 +98,7 @@ fn benchmark_check_scripts(c: &mut Criterion) { }); // Iterate powers of two up to available parallelism - for i in (1..=(available_parallelism().unwrap().get() as f64).log2().ceil() as u32).map(|x| 2u32.pow(x) as usize) { + for i in parallelism_in_power_steps() { if inputs_count >= i { group.bench_function(format!("rayon, custom thread pool, thread count {i}"), |b| { let tx = MutableTransaction::with_entries(tx.clone(), utxos.clone()); diff --git a/consensus/benches/hash_benchmarks.rs b/consensus/benches/hash_benchmarks.rs deleted file mode 100644 index 8ba6836b8d..0000000000 --- a/consensus/benches/hash_benchmarks.rs +++ /dev/null @@ -1,15 +0,0 @@ -use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use std::str::FromStr; - -use kaspa_hashes::Hash; - -/// Placeholder for actual benchmarks -pub fn hash_benchmark(c: &mut Criterion) { - c.bench_function("Hash::from_str", |b| { - let hash_str = "8e40af02265360d59f4ecf9ae9ebf8f00a3118408f5a9cdcbcc9c0f93642f3af"; - b.iter(|| Hash::from_str(black_box(hash_str))) - }); -} - -criterion_group!(benches, hash_benchmark); -criterion_main!(benches); diff --git a/consensus/benches/parallel_muhash.rs b/consensus/benches/parallel_muhash.rs new file mode 100644 index 0000000000..99ab5b6c3a --- /dev/null +++ b/consensus/benches/parallel_muhash.rs @@ -0,0 +1,66 @@ +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use itertools::Itertools; +use kaspa_consensus_core::{ + muhash::MuHashExtensions, + subnets::SUBNETWORK_ID_NATIVE, + tx::{ScriptPublicKey, SignableTransaction, Transaction, TransactionInput, TransactionOutpoint, TransactionOutput, UtxoEntry}, +}; +use kaspa_hashes::TransactionID; +use kaspa_muhash::MuHash; +use kaspa_utils::iter::parallelism_in_power_steps; +use rayon::prelude::*; + +fn generate_transaction(ins: usize, outs: usize, randomness: u64) -> SignableTransaction { + let mut tx = Transaction::new(0, vec![], vec![], 0, SUBNETWORK_ID_NATIVE, 0, vec![]); + let mut entries = vec![]; + for i in 0..ins { + let mut hasher = TransactionID::new(); + hasher.write(i.to_le_bytes()); + hasher.write(randomness.to_le_bytes()); + let input = TransactionInput::new(TransactionOutpoint::new(hasher.finalize(), 0), vec![10; 66], 0, 1); + let entry = UtxoEntry::new(22222222, ScriptPublicKey::from_vec(0, vec![99; 34]), 23456, false); + tx.inputs.push(input); + entries.push(entry); + } + for _ in 0..outs { + let output = TransactionOutput::new(23456, ScriptPublicKey::from_vec(0, vec![101; 34])); + tx.outputs.push(output); + } + tx.finalize(); + SignableTransaction::with_entries(tx, entries) +} + +pub fn parallel_muhash_benchmark(c: &mut Criterion) { + let mut group = c.benchmark_group("muhash txs"); + let txs = (0..256).map(|i| generate_transaction(2, 2, i)).collect_vec(); + group.bench_function("seq", |b| { + b.iter(|| { + let mut mh = MuHash::new(); + for tx in txs.iter() { + mh.add_transaction(&tx.as_verifiable(), 222); + } + black_box(mh) + }) + }); + + for threads in parallelism_in_power_steps() { + group.bench_function(format!("par {threads}"), |b| { + let pool = rayon::ThreadPoolBuilder::new().num_threads(threads).build().unwrap(); + b.iter(|| { + pool.install(|| { + let mh = + txs.par_iter().map(|tx| MuHash::from_transaction(&tx.as_verifiable(), 222)).reduce(MuHash::new, |mut a, b| { + a.combine(&b); + a + }); + black_box(mh) + }) + }) + }); + } + + group.finish(); +} + +criterion_group!(benches, parallel_muhash_benchmark); +criterion_main!(benches); diff --git a/consensus/core/src/muhash.rs b/consensus/core/src/muhash.rs index 3782f855c0..0286596eba 100644 --- a/consensus/core/src/muhash.rs +++ b/consensus/core/src/muhash.rs @@ -8,6 +8,8 @@ use kaspa_muhash::MuHash; pub trait MuHashExtensions { fn add_transaction(&mut self, tx: &impl VerifiableTransaction, block_daa_score: u64); fn add_utxo(&mut self, outpoint: &TransactionOutpoint, entry: &UtxoEntry); + fn from_transaction(tx: &impl VerifiableTransaction, block_daa_score: u64) -> Self; + fn from_utxo(outpoint: &TransactionOutpoint, entry: &UtxoEntry) -> Self; } impl MuHashExtensions for MuHash { @@ -30,6 +32,18 @@ impl MuHashExtensions for MuHash { write_utxo(&mut writer, entry, outpoint); writer.finalize(); } + + fn from_transaction(tx: &impl VerifiableTransaction, block_daa_score: u64) -> Self { + let mut mh = Self::new(); + mh.add_transaction(tx, block_daa_score); + mh + } + + fn from_utxo(outpoint: &TransactionOutpoint, entry: &UtxoEntry) -> Self { + let mut mh = Self::new(); + mh.add_utxo(outpoint, entry); + mh + } } fn write_utxo(writer: &mut impl HasherBase, entry: &UtxoEntry, outpoint: &TransactionOutpoint) { diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index fd352fe28c..d9e4ac7d14 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -774,14 +774,8 @@ impl ConsensusApi for Consensus { pruning_utxoset_write.utxo_set.write_many(utxoset_chunk).unwrap(); // Parallelize processing - let inner_multiset = utxoset_chunk - .par_iter() - .map(|(outpoint, entry)| { - let mut inner_multiset = MuHash::new(); - inner_multiset.add_utxo(outpoint, entry); - inner_multiset - }) - .reduce(MuHash::new, |mut a, b| { + let inner_multiset = + utxoset_chunk.par_iter().map(|(outpoint, entry)| MuHash::from_utxo(outpoint, entry)).reduce(MuHash::new, |mut a, b| { a.combine(&b); a }); diff --git a/consensus/src/pipeline/virtual_processor/utxo_validation.rs b/consensus/src/pipeline/virtual_processor/utxo_validation.rs index 454722dca4..6516888188 100644 --- a/consensus/src/pipeline/virtual_processor/utxo_validation.rs +++ b/consensus/src/pipeline/virtual_processor/utxo_validation.rs @@ -248,8 +248,7 @@ impl VirtualStateProcessor { .enumerate() .skip(1) // Skip the coinbase tx. .filter_map(|(i, tx)| self.validate_transaction_in_utxo_context(tx, &utxo_view, pov_daa_score, flags).ok().map(|vtx| { - let mut mh = MuHash::new(); - mh.add_transaction(&vtx, pov_daa_score); + let mh = MuHash::from_transaction(&vtx, pov_daa_score); (smallvec![(vtx, i as u32)], mh) } )) diff --git a/crypto/muhash/src/u3072.rs b/crypto/muhash/src/u3072.rs index 82021eb88d..fae82b8cf1 100644 --- a/crypto/muhash/src/u3072.rs +++ b/crypto/muhash/src/u3072.rs @@ -88,6 +88,16 @@ impl U3072 { } fn mul(&mut self, other: &U3072) { + /* + Optimization: short-circuit when LHS is one + - This case is especially frequent during parallel reduce operation where the identity (one) is used for each sub-computation (at the LHS) + - If self ≠ one, the comparison should exit early, otherwise if they are equal -- we gain much more than we lose + - Benchmarks show that general performance remains the same while parallel reduction gains ~35% + */ + if *self == Self::one() { + *self = *other; + return; + } let (mut carry_low, mut carry_high, mut carry_highest) = (0, 0, 0); let mut tmp = Self::one(); diff --git a/utils/src/iter.rs b/utils/src/iter.rs index 58a61d7707..3c4c98c64a 100644 --- a/utils/src/iter.rs +++ b/utils/src/iter.rs @@ -48,3 +48,9 @@ where self.inner.clone().fmt(f) } } + +/// Returns an iterator over powers of two up to (the rounded up) available parallelism: `2, 4, 8, ..., 2^(available_parallelism.log2().ceil())`, +/// i.e., for `std::thread::available_parallelism = 15` the function will return `2, 4, 8, 16` +pub fn parallelism_in_power_steps() -> impl Iterator { + (1..=(std::thread::available_parallelism().unwrap().get() as f64).log2().ceil() as u32).map(|x| 2usize.pow(x)) +} From a40efbb6900333f072043f30bf9c3de2c34ec143 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Tue, 22 Oct 2024 12:40:20 +0300 Subject: [PATCH 17/31] Rust 1.82 fixes + mempool std sig op count check (#583) * rust 1.82 fixes * sig op count std check --- cli/src/cli.rs | 4 ++-- crypto/txscript/src/lib.rs | 1 + mining/errors/src/mempool.rs | 2 +- mining/src/mempool/check_transaction_standard.rs | 5 ++--- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cli/src/cli.rs b/cli/src/cli.rs index 5ca1997ea3..a32956740a 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -1016,7 +1016,7 @@ mod panic_handler { fn stack(error: &Error) -> String; } - pub fn process(info: &std::panic::PanicInfo) -> String { + pub fn process(info: &std::panic::PanicHookInfo) -> String { let mut msg = info.to_string(); // Add the error stack to our message. @@ -1053,7 +1053,7 @@ mod panic_handler { impl KaspaCli { pub fn init_panic_hook(self: &Arc) { let this = self.clone(); - let handler = move |info: &std::panic::PanicInfo| { + let handler = move |info: &std::panic::PanicHookInfo| { let msg = panic_handler::process(info); this.term().writeln(msg.crlf()); panic_handler::console_error(msg); diff --git a/crypto/txscript/src/lib.rs b/crypto/txscript/src/lib.rs index f177962721..5fed84328d 100644 --- a/crypto/txscript/src/lib.rs +++ b/crypto/txscript/src/lib.rs @@ -93,6 +93,7 @@ fn parse_script( script.iter().batching(|it| deserialize_next_opcode(it)) } +#[must_use] pub fn get_sig_op_count( signature_script: &[u8], prev_script_public_key: &ScriptPublicKey, diff --git a/mining/errors/src/mempool.rs b/mining/errors/src/mempool.rs index 319aaa4845..12416be678 100644 --- a/mining/errors/src/mempool.rs +++ b/mining/errors/src/mempool.rs @@ -131,7 +131,7 @@ pub enum NonStandardError { RejectInsufficientFee(TransactionId, u64, u64), #[error("transaction input #{1} has {2} signature operations which is more than the allowed max amount of {3}")] - RejectSignatureCount(TransactionId, usize, u8, u8), + RejectSignatureCount(TransactionId, usize, u64, u8), } impl NonStandardError { diff --git a/mining/src/mempool/check_transaction_standard.rs b/mining/src/mempool/check_transaction_standard.rs index 060677a1e5..ef4d3fb9ee 100644 --- a/mining/src/mempool/check_transaction_standard.rs +++ b/mining/src/mempool/check_transaction_standard.rs @@ -188,12 +188,11 @@ impl Mempool { ScriptClass::PubKey => {} ScriptClass::PubKeyECDSA => {} ScriptClass::ScriptHash => { - get_sig_op_count::( + let num_sig_ops = get_sig_op_count::( &input.signature_script, &entry.script_public_key, ); - let num_sig_ops = 1; - if num_sig_ops > MAX_STANDARD_P2SH_SIG_OPS { + if num_sig_ops > MAX_STANDARD_P2SH_SIG_OPS as u64 { return Err(NonStandardError::RejectSignatureCount(transaction_id, i, num_sig_ops, MAX_STANDARD_P2SH_SIG_OPS)); } } From aac16a9244e558f719e37663e587838664248466 Mon Sep 17 00:00:00 2001 From: Romain Billot Date: Tue, 22 Oct 2024 11:41:22 +0200 Subject: [PATCH 18/31] typo(cli/utils): kaspa wording (#582) Co-authored-by: Michael Sutton --- cli/src/utils.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cli/src/utils.rs b/cli/src/utils.rs index 3e1d0ddb15..e52581b4cc 100644 --- a/cli/src/utils.rs +++ b/cli/src/utils.rs @@ -8,7 +8,7 @@ pub fn try_parse_required_nonzero_kaspa_as_sompi_u64(kasp let sompi_amount = kaspa_amount .to_string() .parse::() - .map_err(|_| Error::custom(format!("Supplied Kasapa amount is not valid: '{kaspa_amount}'")))? + .map_err(|_| Error::custom(format!("Supplied Kaspa amount is not valid: '{kaspa_amount}'")))? * SOMPI_PER_KASPA as f64; if sompi_amount < 0.0 { Err(Error::custom("Supplied Kaspa amount is not valid: '{kaspa_amount}'")) From 3a2bcbb8a39093b169bfe576e03f1c7fa5064678 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 29 Oct 2024 13:13:45 -0600 Subject: [PATCH 19/31] On-demand calculation for Ghostdag for Higher Levels (#494) * Refactor pruning proof validation to many functions Co-authored-by: Ori Newman * Use blue score as work for higher levels Co-authored-by: Ori Newman * Remove pruning processor dependency on gd managers Co-authored-by: Ori Newman * Consistency renaming Co-authored-by: Ori Newman * Update db version Co-authored-by: Ori Newman * GD Optimizations Co-authored-by: Ori Newman * Remove remnant of old impl. optimize db prefixes * Ensure parents are in relations; Add comments apply_proof only inserts parent entries for a header from the proof into the relations store for a level if there was GD data in the old stores for that header. This adds a check to filter out parent records not in relations store * Match depth check to block_at_depth logic * Use singular GD store for header processing * Relax the panic to warn when finished_headers and couldn't find sufficient root This happens when there's not enough headers in the pruning proof but it satisfies validation * Error handling for gd on higher levels relations.get_parents on GD gets extra parents that aren't in the current GD store. so get_blue_work throws an error next, ORIGIN was mising from the GD so add that * remove using deeper requirements in lower levels * Fix missed references to self.ghostdag_stores in validate_pruning_point_proof * Refactoring for single GD header processing * Add assertion to check root vs old_root * Lint fix current_dag_level * Keep DB Version at 3 The new prefixes added are compatible with the old version. We don't want to trigger a db delete with this change * Cleanup apply_proof logic and handle more ghostdag_stores logic * remove simpa changes * Remove rewriting origin to primary GD It's already on there * More refactoring to use single GD store/manager * Lint fixes * warn to trace for common retry * Address initial comments * Remove "primary" in ghostdag store/manager references * Add small safety margin to proof at level 0 This prevents the case where new root is an anticone of old root * Revert to only do proof rebuilding on sanity check * Proper "better" proof check * Update comment on find_selected_parent_header_at_level * Re-apply missed comment * Implement db upgrade logic from 3 to 4 * Explain further the workaround for GD ordering.rs * Minor update to Display of TempGD keys * Various fixes - Keep using old root to minimize proof size. Old root is calculated using the temporary gd stores - fix the off-by-one in block_at_depth and chain_up_to_depth - revert the temp fix to sync with the off-by-one * Revert "Various fixes" This reverts commit bc56e65d5dd93d17c00e12e9f2c05e0a924e24b5. This experimental commit requires a bit more thinking to apply, and optimization can be deferred. * Revert better proof check Recreates the GD stores for the current consensus by checking existing proof * Fix: use cc gd store * When building pruning point proof ghostdag data, ignore blocks before the root * Add trusted blocks to all relevant levels during apply_proof As opposed to applying only to level 0 * Calculate headers estimate in init proof stores * Explain finished headers logic Add back the panic if we couldn't find the required block and our headers are done Add explanation in comment for why trying anyway if finished_headers is acceptable * clarify comment * Rename old_root to depth_based_root explain logic for the two root calculation * More merge fixes * Refactor relations services into self * Use blue_work for find_selected_parent_header_at_level * Comment fixes and small refactor * Revert rename to old root * Lint fix from merged code * Some cleanup - use BlueWorkType - fix some comments * remove last reference to ghostdag_primary_* * Cleaner find_selected_parent_header_at_level Co-authored-by: Michael Sutton * Refactor for better readability and add more docs * Smaller safety margin for all * Lint and logic fix * Reduce loop depth increase on level proof retries Co-authored-by: Michael Sutton * Update consensus/src/processes/pruning_proof/mod.rs Co-authored-by: Michael Sutton * Comment cleanup * Remove unnecessary clone Co-authored-by: Michael Sutton * Rename genesis_hash to root; Remove redundant filter * Cleaner reachability_stores type Co-authored-by: Michael Sutton * Change failed to find sufficient root log to debug * Bump node version to 0.15.3 * A few minor leftovers --------- Co-authored-by: Ori Newman Co-authored-by: Michael Sutton Co-authored-by: Michael Sutton --- Cargo.lock | 118 +-- Cargo.toml | 112 +-- consensus/src/consensus/factory.rs | 19 +- consensus/src/consensus/mod.rs | 14 +- consensus/src/consensus/services.rs | 44 +- consensus/src/consensus/storage.rs | 25 +- consensus/src/consensus/test_consensus.rs | 6 +- consensus/src/model/stores/ghostdag.rs | 21 + .../pipeline/header_processor/processor.rs | 47 +- .../pipeline/pruning_processor/processor.rs | 16 +- .../pipeline/virtual_processor/processor.rs | 18 +- .../virtual_processor/utxo_validation.rs | 2 +- consensus/src/processes/ghostdag/protocol.rs | 23 +- consensus/src/processes/pruning_proof/mod.rs | 707 +++++++++++++++--- database/src/key.rs | 2 + database/src/registry.rs | 4 + kaspad/Cargo.toml | 2 + kaspad/src/daemon.rs | 111 ++- simpa/src/main.rs | 12 +- 19 files changed, 956 insertions(+), 347 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a0db546302..a951993e90 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2265,7 +2265,7 @@ dependencies = [ [[package]] name = "kaspa-addresses" -version = "0.15.2" +version = "0.15.3" dependencies = [ "borsh", "criterion", @@ -2282,7 +2282,7 @@ dependencies = [ [[package]] name = "kaspa-addressmanager" -version = "0.15.2" +version = "0.15.3" dependencies = [ "borsh", "igd-next", @@ -2304,14 +2304,14 @@ dependencies = [ [[package]] name = "kaspa-alloc" -version = "0.15.2" +version = "0.15.3" dependencies = [ "mimalloc", ] [[package]] name = "kaspa-bip32" -version = "0.15.2" +version = "0.15.3" dependencies = [ "borsh", "bs58", @@ -2338,7 +2338,7 @@ dependencies = [ [[package]] name = "kaspa-cli" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "borsh", @@ -2385,7 +2385,7 @@ dependencies = [ [[package]] name = "kaspa-connectionmanager" -version = "0.15.2" +version = "0.15.3" dependencies = [ "duration-string", "futures-util", @@ -2402,7 +2402,7 @@ dependencies = [ [[package]] name = "kaspa-consensus" -version = "0.15.2" +version = "0.15.3" dependencies = [ "arc-swap", "async-channel 2.3.1", @@ -2446,7 +2446,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-client" -version = "0.15.2" +version = "0.15.3" dependencies = [ "ahash", "cfg-if 1.0.0", @@ -2474,7 +2474,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "arc-swap", "async-trait", @@ -2513,7 +2513,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-notify" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", @@ -2532,7 +2532,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-wasm" -version = "0.15.2" +version = "0.15.3" dependencies = [ "cfg-if 1.0.0", "faster-hex", @@ -2556,7 +2556,7 @@ dependencies = [ [[package]] name = "kaspa-consensusmanager" -version = "0.15.2" +version = "0.15.3" dependencies = [ "duration-string", "futures", @@ -2574,7 +2574,7 @@ dependencies = [ [[package]] name = "kaspa-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "cfg-if 1.0.0", "ctrlc", @@ -2592,7 +2592,7 @@ dependencies = [ [[package]] name = "kaspa-daemon" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "borsh", @@ -2614,7 +2614,7 @@ dependencies = [ [[package]] name = "kaspa-database" -version = "0.15.2" +version = "0.15.3" dependencies = [ "bincode", "enum-primitive-derive", @@ -2636,7 +2636,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-client" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2668,7 +2668,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2697,7 +2697,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-server" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2733,7 +2733,7 @@ dependencies = [ [[package]] name = "kaspa-hashes" -version = "0.15.2" +version = "0.15.3" dependencies = [ "blake2b_simd", "borsh", @@ -2754,7 +2754,7 @@ dependencies = [ [[package]] name = "kaspa-index-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -2773,7 +2773,7 @@ dependencies = [ [[package]] name = "kaspa-index-processor" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -2801,7 +2801,7 @@ dependencies = [ [[package]] name = "kaspa-math" -version = "0.15.2" +version = "0.15.3" dependencies = [ "borsh", "criterion", @@ -2822,14 +2822,14 @@ dependencies = [ [[package]] name = "kaspa-merkle" -version = "0.15.2" +version = "0.15.3" dependencies = [ "kaspa-hashes", ] [[package]] name = "kaspa-metrics-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "borsh", @@ -2845,7 +2845,7 @@ dependencies = [ [[package]] name = "kaspa-mining" -version = "0.15.2" +version = "0.15.3" dependencies = [ "criterion", "futures-util", @@ -2872,7 +2872,7 @@ dependencies = [ [[package]] name = "kaspa-mining-errors" -version = "0.15.2" +version = "0.15.3" dependencies = [ "kaspa-consensus-core", "thiserror", @@ -2880,7 +2880,7 @@ dependencies = [ [[package]] name = "kaspa-muhash" -version = "0.15.2" +version = "0.15.3" dependencies = [ "criterion", "kaspa-hashes", @@ -2893,7 +2893,7 @@ dependencies = [ [[package]] name = "kaspa-notify" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -2929,7 +2929,7 @@ dependencies = [ [[package]] name = "kaspa-p2p-flows" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "chrono", @@ -2960,7 +2960,7 @@ dependencies = [ [[package]] name = "kaspa-p2p-lib" -version = "0.15.2" +version = "0.15.3" dependencies = [ "borsh", "ctrlc", @@ -2991,7 +2991,7 @@ dependencies = [ [[package]] name = "kaspa-perf-monitor" -version = "0.15.2" +version = "0.15.3" dependencies = [ "kaspa-core", "log", @@ -3003,7 +3003,7 @@ dependencies = [ [[package]] name = "kaspa-pow" -version = "0.15.2" +version = "0.15.3" dependencies = [ "criterion", "js-sys", @@ -3019,7 +3019,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -3061,7 +3061,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-macros" -version = "0.15.2" +version = "0.15.3" dependencies = [ "convert_case 0.6.0", "proc-macro-error", @@ -3073,7 +3073,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-service" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "kaspa-addresses", @@ -3102,7 +3102,7 @@ dependencies = [ [[package]] name = "kaspa-testing-integration" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -3162,7 +3162,7 @@ dependencies = [ [[package]] name = "kaspa-txscript" -version = "0.15.2" +version = "0.15.3" dependencies = [ "blake2b_simd", "borsh", @@ -3194,7 +3194,7 @@ dependencies = [ [[package]] name = "kaspa-txscript-errors" -version = "0.15.2" +version = "0.15.3" dependencies = [ "secp256k1", "thiserror", @@ -3202,7 +3202,7 @@ dependencies = [ [[package]] name = "kaspa-utils" -version = "0.15.2" +version = "0.15.3" dependencies = [ "arc-swap", "async-channel 2.3.1", @@ -3238,7 +3238,7 @@ dependencies = [ [[package]] name = "kaspa-utils-tower" -version = "0.15.2" +version = "0.15.3" dependencies = [ "bytes", "cfg-if 1.0.0", @@ -3254,7 +3254,7 @@ dependencies = [ [[package]] name = "kaspa-utxoindex" -version = "0.15.2" +version = "0.15.3" dependencies = [ "futures", "kaspa-consensus", @@ -3275,7 +3275,7 @@ dependencies = [ [[package]] name = "kaspa-wallet" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-std", "async-trait", @@ -3287,7 +3287,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-cli-wasm" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "js-sys", @@ -3301,7 +3301,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "aes", "ahash", @@ -3382,7 +3382,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-keys" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "borsh", @@ -3415,7 +3415,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-macros" -version = "0.15.2" +version = "0.15.3" dependencies = [ "convert_case 0.5.0", "proc-macro-error", @@ -3428,7 +3428,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-pskt" -version = "0.15.2" +version = "0.15.3" dependencies = [ "bincode", "derive_builder", @@ -3455,7 +3455,7 @@ dependencies = [ [[package]] name = "kaspa-wasm" -version = "0.15.2" +version = "0.15.3" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -3483,7 +3483,7 @@ dependencies = [ [[package]] name = "kaspa-wasm-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "faster-hex", "hexplay", @@ -3494,7 +3494,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-client" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-std", "async-trait", @@ -3530,7 +3530,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-example-subscriber" -version = "0.15.2" +version = "0.15.3" dependencies = [ "ctrlc", "futures", @@ -3545,7 +3545,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-proxy" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "clap 4.5.19", @@ -3564,7 +3564,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-server" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "borsh", @@ -3592,7 +3592,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-simple-client-example" -version = "0.15.2" +version = "0.15.3" dependencies = [ "futures", "kaspa-rpc-core", @@ -3602,7 +3602,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-wasm" -version = "0.15.2" +version = "0.15.3" dependencies = [ "ahash", "async-std", @@ -3632,7 +3632,7 @@ dependencies = [ [[package]] name = "kaspad" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", @@ -3640,6 +3640,7 @@ dependencies = [ "dhat", "dirs", "futures-util", + "itertools 0.13.0", "kaspa-addresses", "kaspa-addressmanager", "kaspa-alloc", @@ -3667,6 +3668,7 @@ dependencies = [ "num_cpus", "rand", "rayon", + "rocksdb", "serde", "serde_with", "tempfile", @@ -4972,7 +4974,7 @@ dependencies = [ [[package]] name = "rothschild" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "clap 4.5.19", @@ -5385,7 +5387,7 @@ dependencies = [ [[package]] name = "simpa" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", diff --git a/Cargo.toml b/Cargo.toml index dd5eb31320..7141101f9a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -63,7 +63,7 @@ members = [ [workspace.package] rust-version = "1.81.0" -version = "0.15.2" +version = "0.15.3" authors = ["Kaspa developers"] license = "ISC" repository = "https://github.com/kaspanet/rusty-kaspa" @@ -80,61 +80,61 @@ include = [ ] [workspace.dependencies] -# kaspa-testing-integration = { version = "0.15.2", path = "testing/integration" } -kaspa-addresses = { version = "0.15.2", path = "crypto/addresses" } -kaspa-addressmanager = { version = "0.15.2", path = "components/addressmanager" } -kaspa-bip32 = { version = "0.15.2", path = "wallet/bip32" } -kaspa-cli = { version = "0.15.2", path = "cli" } -kaspa-connectionmanager = { version = "0.15.2", path = "components/connectionmanager" } -kaspa-consensus = { version = "0.15.2", path = "consensus" } -kaspa-consensus-core = { version = "0.15.2", path = "consensus/core" } -kaspa-consensus-client = { version = "0.15.2", path = "consensus/client" } -kaspa-consensus-notify = { version = "0.15.2", path = "consensus/notify" } -kaspa-consensus-wasm = { version = "0.15.2", path = "consensus/wasm" } -kaspa-consensusmanager = { version = "0.15.2", path = "components/consensusmanager" } -kaspa-core = { version = "0.15.2", path = "core" } -kaspa-daemon = { version = "0.15.2", path = "daemon" } -kaspa-database = { version = "0.15.2", path = "database" } -kaspa-grpc-client = { version = "0.15.2", path = "rpc/grpc/client" } -kaspa-grpc-core = { version = "0.15.2", path = "rpc/grpc/core" } -kaspa-grpc-server = { version = "0.15.2", path = "rpc/grpc/server" } -kaspa-hashes = { version = "0.15.2", path = "crypto/hashes" } -kaspa-index-core = { version = "0.15.2", path = "indexes/core" } -kaspa-index-processor = { version = "0.15.2", path = "indexes/processor" } -kaspa-math = { version = "0.15.2", path = "math" } -kaspa-merkle = { version = "0.15.2", path = "crypto/merkle" } -kaspa-metrics-core = { version = "0.15.2", path = "metrics/core" } -kaspa-mining = { version = "0.15.2", path = "mining" } -kaspa-mining-errors = { version = "0.15.2", path = "mining/errors" } -kaspa-muhash = { version = "0.15.2", path = "crypto/muhash" } -kaspa-notify = { version = "0.15.2", path = "notify" } -kaspa-p2p-flows = { version = "0.15.2", path = "protocol/flows" } -kaspa-p2p-lib = { version = "0.15.2", path = "protocol/p2p" } -kaspa-perf-monitor = { version = "0.15.2", path = "metrics/perf_monitor" } -kaspa-pow = { version = "0.15.2", path = "consensus/pow" } -kaspa-rpc-core = { version = "0.15.2", path = "rpc/core" } -kaspa-rpc-macros = { version = "0.15.2", path = "rpc/macros" } -kaspa-rpc-service = { version = "0.15.2", path = "rpc/service" } -kaspa-txscript = { version = "0.15.2", path = "crypto/txscript" } -kaspa-txscript-errors = { version = "0.15.2", path = "crypto/txscript/errors" } -kaspa-utils = { version = "0.15.2", path = "utils" } -kaspa-utils-tower = { version = "0.15.2", path = "utils/tower" } -kaspa-utxoindex = { version = "0.15.2", path = "indexes/utxoindex" } -kaspa-wallet = { version = "0.15.2", path = "wallet/native" } -kaspa-wallet-cli-wasm = { version = "0.15.2", path = "wallet/wasm" } -kaspa-wallet-keys = { version = "0.15.2", path = "wallet/keys" } -kaspa-wallet-pskt = { version = "0.15.2", path = "wallet/pskt" } -kaspa-wallet-core = { version = "0.15.2", path = "wallet/core" } -kaspa-wallet-macros = { version = "0.15.2", path = "wallet/macros" } -kaspa-wasm = { version = "0.15.2", path = "wasm" } -kaspa-wasm-core = { version = "0.15.2", path = "wasm/core" } -kaspa-wrpc-client = { version = "0.15.2", path = "rpc/wrpc/client" } -kaspa-wrpc-proxy = { version = "0.15.2", path = "rpc/wrpc/proxy" } -kaspa-wrpc-server = { version = "0.15.2", path = "rpc/wrpc/server" } -kaspa-wrpc-wasm = { version = "0.15.2", path = "rpc/wrpc/wasm" } -kaspa-wrpc-example-subscriber = { version = "0.15.2", path = "rpc/wrpc/examples/subscriber" } -kaspad = { version = "0.15.2", path = "kaspad" } -kaspa-alloc = { version = "0.15.2", path = "utils/alloc" } +# kaspa-testing-integration = { version = "0.15.3", path = "testing/integration" } +kaspa-addresses = { version = "0.15.3", path = "crypto/addresses" } +kaspa-addressmanager = { version = "0.15.3", path = "components/addressmanager" } +kaspa-bip32 = { version = "0.15.3", path = "wallet/bip32" } +kaspa-cli = { version = "0.15.3", path = "cli" } +kaspa-connectionmanager = { version = "0.15.3", path = "components/connectionmanager" } +kaspa-consensus = { version = "0.15.3", path = "consensus" } +kaspa-consensus-core = { version = "0.15.3", path = "consensus/core" } +kaspa-consensus-client = { version = "0.15.3", path = "consensus/client" } +kaspa-consensus-notify = { version = "0.15.3", path = "consensus/notify" } +kaspa-consensus-wasm = { version = "0.15.3", path = "consensus/wasm" } +kaspa-consensusmanager = { version = "0.15.3", path = "components/consensusmanager" } +kaspa-core = { version = "0.15.3", path = "core" } +kaspa-daemon = { version = "0.15.3", path = "daemon" } +kaspa-database = { version = "0.15.3", path = "database" } +kaspa-grpc-client = { version = "0.15.3", path = "rpc/grpc/client" } +kaspa-grpc-core = { version = "0.15.3", path = "rpc/grpc/core" } +kaspa-grpc-server = { version = "0.15.3", path = "rpc/grpc/server" } +kaspa-hashes = { version = "0.15.3", path = "crypto/hashes" } +kaspa-index-core = { version = "0.15.3", path = "indexes/core" } +kaspa-index-processor = { version = "0.15.3", path = "indexes/processor" } +kaspa-math = { version = "0.15.3", path = "math" } +kaspa-merkle = { version = "0.15.3", path = "crypto/merkle" } +kaspa-metrics-core = { version = "0.15.3", path = "metrics/core" } +kaspa-mining = { version = "0.15.3", path = "mining" } +kaspa-mining-errors = { version = "0.15.3", path = "mining/errors" } +kaspa-muhash = { version = "0.15.3", path = "crypto/muhash" } +kaspa-notify = { version = "0.15.3", path = "notify" } +kaspa-p2p-flows = { version = "0.15.3", path = "protocol/flows" } +kaspa-p2p-lib = { version = "0.15.3", path = "protocol/p2p" } +kaspa-perf-monitor = { version = "0.15.3", path = "metrics/perf_monitor" } +kaspa-pow = { version = "0.15.3", path = "consensus/pow" } +kaspa-rpc-core = { version = "0.15.3", path = "rpc/core" } +kaspa-rpc-macros = { version = "0.15.3", path = "rpc/macros" } +kaspa-rpc-service = { version = "0.15.3", path = "rpc/service" } +kaspa-txscript = { version = "0.15.3", path = "crypto/txscript" } +kaspa-txscript-errors = { version = "0.15.3", path = "crypto/txscript/errors" } +kaspa-utils = { version = "0.15.3", path = "utils" } +kaspa-utils-tower = { version = "0.15.3", path = "utils/tower" } +kaspa-utxoindex = { version = "0.15.3", path = "indexes/utxoindex" } +kaspa-wallet = { version = "0.15.3", path = "wallet/native" } +kaspa-wallet-cli-wasm = { version = "0.15.3", path = "wallet/wasm" } +kaspa-wallet-keys = { version = "0.15.3", path = "wallet/keys" } +kaspa-wallet-pskt = { version = "0.15.3", path = "wallet/pskt" } +kaspa-wallet-core = { version = "0.15.3", path = "wallet/core" } +kaspa-wallet-macros = { version = "0.15.3", path = "wallet/macros" } +kaspa-wasm = { version = "0.15.3", path = "wasm" } +kaspa-wasm-core = { version = "0.15.3", path = "wasm/core" } +kaspa-wrpc-client = { version = "0.15.3", path = "rpc/wrpc/client" } +kaspa-wrpc-proxy = { version = "0.15.3", path = "rpc/wrpc/proxy" } +kaspa-wrpc-server = { version = "0.15.3", path = "rpc/wrpc/server" } +kaspa-wrpc-wasm = { version = "0.15.3", path = "rpc/wrpc/wasm" } +kaspa-wrpc-example-subscriber = { version = "0.15.3", path = "rpc/wrpc/examples/subscriber" } +kaspad = { version = "0.15.3", path = "kaspad" } +kaspa-alloc = { version = "0.15.3", path = "utils/alloc" } # external aes = "0.8.3" diff --git a/consensus/src/consensus/factory.rs b/consensus/src/consensus/factory.rs index f3ee51d9c5..f8af5fb5a6 100644 --- a/consensus/src/consensus/factory.rs +++ b/consensus/src/consensus/factory.rs @@ -59,7 +59,7 @@ pub struct MultiConsensusMetadata { version: u32, } -const LATEST_DB_VERSION: u32 = 3; +const LATEST_DB_VERSION: u32 = 4; impl Default for MultiConsensusMetadata { fn default() -> Self { Self { @@ -219,6 +219,23 @@ impl MultiConsensusManagementStore { } } + /// Returns the current version of this database + pub fn version(&self) -> StoreResult { + match self.metadata.read() { + Ok(data) => Ok(data.version), + Err(err) => Err(err), + } + } + + /// Set the database version to a different one + pub fn set_version(&mut self, version: u32) -> StoreResult<()> { + self.metadata.update(DirectDbWriter::new(&self.db), |mut data| { + data.version = version; + data + })?; + Ok(()) + } + pub fn should_upgrade(&self) -> StoreResult { match self.metadata.read() { Ok(data) => Ok(data.version != LATEST_DB_VERSION), diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index d9e4ac7d14..6909562aa1 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -243,7 +243,7 @@ impl Consensus { block_processors_pool, db.clone(), storage.statuses_store.clone(), - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), storage.headers_store.clone(), storage.block_transactions_store.clone(), storage.body_tips_store.clone(), @@ -500,7 +500,7 @@ impl ConsensusApi for Consensus { fn get_virtual_merge_depth_blue_work_threshold(&self) -> BlueWorkType { // PRUNE SAFETY: merge depth root is never close to being pruned (in terms of block depth) - self.get_virtual_merge_depth_root().map_or(BlueWorkType::ZERO, |root| self.ghostdag_primary_store.get_blue_work(root).unwrap()) + self.get_virtual_merge_depth_root().map_or(BlueWorkType::ZERO, |root| self.ghostdag_store.get_blue_work(root).unwrap()) } fn get_sink(&self) -> Hash { @@ -533,7 +533,7 @@ impl ConsensusApi for Consensus { for child in initial_children { if visited.insert(child) { - let blue_work = self.ghostdag_primary_store.get_blue_work(child).unwrap(); + let blue_work = self.ghostdag_store.get_blue_work(child).unwrap(); heap.push(Reverse(SortableBlock::new(child, blue_work))); } } @@ -560,7 +560,7 @@ impl ConsensusApi for Consensus { for child in children { if visited.insert(child) { - let blue_work = self.ghostdag_primary_store.get_blue_work(child).unwrap(); + let blue_work = self.ghostdag_store.get_blue_work(child).unwrap(); heap.push(Reverse(SortableBlock::new(child, blue_work))); } } @@ -909,7 +909,7 @@ impl ConsensusApi for Consensus { Some(BlockStatus::StatusInvalid) => return Err(ConsensusError::InvalidBlock(hash)), _ => {} }; - let ghostdag = self.ghostdag_primary_store.get_data(hash).unwrap_option().ok_or(ConsensusError::MissingData(hash))?; + let ghostdag = self.ghostdag_store.get_data(hash).unwrap_option().ok_or(ConsensusError::MissingData(hash))?; Ok((&*ghostdag).into()) } @@ -985,7 +985,7 @@ impl ConsensusApi for Consensus { Ok(self .services .window_manager - .block_window(&self.ghostdag_primary_store.get_data(hash).unwrap(), WindowType::SampledDifficultyWindow) + .block_window(&self.ghostdag_store.get_data(hash).unwrap(), WindowType::SampledDifficultyWindow) .unwrap() .deref() .iter() @@ -1024,7 +1024,7 @@ impl ConsensusApi for Consensus { match start_hash { Some(hash) => { self.validate_block_exists(hash)?; - let ghostdag_data = self.ghostdag_primary_store.get_data(hash).unwrap(); + let ghostdag_data = self.ghostdag_store.get_data(hash).unwrap(); // The selected parent header is used within to check for sampling activation, so we verify its existence first if !self.headers_store.has(ghostdag_data.selected_parent).unwrap() { return Err(ConsensusError::DifficultyError(DifficultyError::InsufficientWindowData(0))); diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index 38e283a141..97c6d0b769 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -53,8 +53,7 @@ pub struct ConsensusServices { pub reachability_service: MTReachabilityService, pub window_manager: DbWindowManager, pub dag_traversal_manager: DbDagTraversalManager, - pub ghostdag_managers: Arc>, - pub ghostdag_primary_manager: DbGhostdagManager, + pub ghostdag_manager: DbGhostdagManager, pub coinbase_manager: CoinbaseManager, pub pruning_point_manager: DbPruningPointManager, pub pruning_proof_manager: Arc, @@ -82,13 +81,13 @@ impl ConsensusServices { let reachability_service = MTReachabilityService::new(storage.reachability_store.clone()); let dag_traversal_manager = DagTraversalManager::new( params.genesis.hash, - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), relations_service.clone(), reachability_service.clone(), ); let window_manager = DualWindowManager::new( ¶ms.genesis, - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), storage.headers_store.clone(), storage.daa_excluded_store.clone(), storage.block_window_cache_for_difficulty.clone(), @@ -110,27 +109,17 @@ impl ConsensusServices { params.genesis.hash, storage.depth_store.clone(), reachability_service.clone(), - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), ); - let ghostdag_managers = Arc::new( - storage - .ghostdag_stores - .iter() - .cloned() - .enumerate() - .map(|(level, ghostdag_store)| { - GhostdagManager::new( - params.genesis.hash, - params.ghostdag_k, - ghostdag_store, - relations_services[level].clone(), - storage.headers_store.clone(), - reachability_service.clone(), - ) - }) - .collect_vec(), + let ghostdag_manager = GhostdagManager::new( + params.genesis.hash, + params.ghostdag_k, + storage.ghostdag_store.clone(), + relations_services[0].clone(), + storage.headers_store.clone(), + reachability_service.clone(), + false, ); - let ghostdag_primary_manager = ghostdag_managers[0].clone(); let coinbase_manager = CoinbaseManager::new( params.coinbase_payload_script_public_key_max_len, @@ -165,7 +154,7 @@ impl ConsensusServices { params.finality_depth, params.genesis.hash, reachability_service.clone(), - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), storage.headers_store.clone(), storage.past_pruning_points_store.clone(), storage.headers_selected_tip_store.clone(), @@ -184,7 +173,7 @@ impl ConsensusServices { &storage, parents_manager.clone(), reachability_service.clone(), - ghostdag_managers.clone(), + ghostdag_manager.clone(), dag_traversal_manager.clone(), window_manager.clone(), params.max_block_level, @@ -199,7 +188,7 @@ impl ConsensusServices { params.mergeset_size_limit as usize, reachability_service.clone(), dag_traversal_manager.clone(), - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), storage.selected_chain_store.clone(), storage.headers_selected_tip_store.clone(), storage.pruning_point_store.clone(), @@ -213,8 +202,7 @@ impl ConsensusServices { reachability_service, window_manager, dag_traversal_manager, - ghostdag_managers, - ghostdag_primary_manager, + ghostdag_manager, coinbase_manager, pruning_point_manager, pruning_proof_manager, diff --git a/consensus/src/consensus/storage.rs b/consensus/src/consensus/storage.rs index 89a0f5e265..ad3b95d1b9 100644 --- a/consensus/src/consensus/storage.rs +++ b/consensus/src/consensus/storage.rs @@ -50,8 +50,7 @@ pub struct ConsensusStorage { pub selected_chain_store: Arc>, // Append-only stores - pub ghostdag_stores: Arc>>, - pub ghostdag_primary_store: Arc, + pub ghostdag_store: Arc, pub headers_store: Arc, pub block_transactions_store: Arc, pub past_pruning_points_store: Arc, @@ -193,19 +192,12 @@ impl ConsensusStorage { children_builder.build(), ))); - let ghostdag_stores = Arc::new( - (0..=params.max_block_level) - .map(|level| { - Arc::new(DbGhostdagStore::new( - db.clone(), - level, - ghostdag_builder.downscale(level).build(), - ghostdag_compact_builder.downscale(level).build(), - )) - }) - .collect_vec(), - ); - let ghostdag_primary_store = ghostdag_stores[0].clone(); + let ghostdag_store = Arc::new(DbGhostdagStore::new( + db.clone(), + 0, + ghostdag_builder.downscale(0).build(), + ghostdag_compact_builder.downscale(0).build(), + )); let daa_excluded_store = Arc::new(DbDaaStore::new(db.clone(), daa_excluded_builder.build())); let headers_store = Arc::new(DbHeadersStore::new(db.clone(), headers_builder.build(), headers_compact_builder.build())); let depth_store = Arc::new(DbDepthStore::new(db.clone(), header_data_builder.build())); @@ -245,8 +237,7 @@ impl ConsensusStorage { relations_stores, reachability_relations_store, reachability_store, - ghostdag_stores, - ghostdag_primary_store, + ghostdag_store, pruning_point_store, headers_selected_tip_store, body_tips_store, diff --git a/consensus/src/consensus/test_consensus.rs b/consensus/src/consensus/test_consensus.rs index a705d9ecca..472bdbd835 100644 --- a/consensus/src/consensus/test_consensus.rs +++ b/consensus/src/consensus/test_consensus.rs @@ -118,7 +118,7 @@ impl TestConsensus { pub fn build_header_with_parents(&self, hash: Hash, parents: Vec) -> Header { let mut header = header_from_precomputed_hash(hash, parents); - let ghostdag_data = self.consensus.services.ghostdag_primary_manager.ghostdag(header.direct_parents()); + let ghostdag_data = self.consensus.services.ghostdag_manager.ghostdag(header.direct_parents()); header.pruning_point = self .consensus .services @@ -201,7 +201,7 @@ impl TestConsensus { } pub fn ghostdag_store(&self) -> &Arc { - &self.consensus.ghostdag_primary_store + &self.consensus.ghostdag_store } pub fn reachability_store(&self) -> &Arc> { @@ -233,7 +233,7 @@ impl TestConsensus { } pub fn ghostdag_manager(&self) -> &DbGhostdagManager { - &self.consensus.services.ghostdag_primary_manager + &self.consensus.services.ghostdag_manager } } diff --git a/consensus/src/model/stores/ghostdag.rs b/consensus/src/model/stores/ghostdag.rs index bcf860b3a3..fd2600a1c4 100644 --- a/consensus/src/model/stores/ghostdag.rs +++ b/consensus/src/model/stores/ghostdag.rs @@ -270,6 +270,27 @@ impl DbGhostdagStore { } } + pub fn new_temp( + db: Arc, + level: BlockLevel, + cache_policy: CachePolicy, + compact_cache_policy: CachePolicy, + temp_index: u8, + ) -> Self { + assert_ne!(SEPARATOR, level, "level {} is reserved for the separator", level); + let lvl_bytes = level.to_le_bytes(); + let temp_index_bytes = temp_index.to_le_bytes(); + let prefix = DatabaseStorePrefixes::TempGhostdag.into_iter().chain(lvl_bytes).chain(temp_index_bytes).collect_vec(); + let compact_prefix = + DatabaseStorePrefixes::TempGhostdagCompact.into_iter().chain(lvl_bytes).chain(temp_index_bytes).collect_vec(); + Self { + db: Arc::clone(&db), + level, + access: CachedDbAccess::new(db.clone(), cache_policy, prefix), + compact_access: CachedDbAccess::new(db, compact_cache_policy, compact_prefix), + } + } + pub fn clone_with_new_cache(&self, cache_policy: CachePolicy, compact_cache_policy: CachePolicy) -> Self { Self::new(Arc::clone(&self.db), self.level, cache_policy, compact_cache_policy) } diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index 6c93b91d9c..4ecc761af1 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -55,7 +55,7 @@ pub struct HeaderProcessingContext { pub known_parents: Vec, // Staging data - pub ghostdag_data: Option>>, + pub ghostdag_data: Option>, pub block_window_for_difficulty: Option>, pub block_window_for_past_median_time: Option>, pub mergeset_non_daa: Option, @@ -99,7 +99,7 @@ impl HeaderProcessingContext { /// Returns the primary (level 0) GHOSTDAG data of this header. /// NOTE: is expected to be called only after GHOSTDAG computation was pushed into the context pub fn ghostdag_data(&self) -> &Arc { - &self.ghostdag_data.as_ref().unwrap()[0] + self.ghostdag_data.as_ref().unwrap() } } @@ -127,7 +127,7 @@ pub struct HeaderProcessor { pub(super) relations_stores: Arc>>, pub(super) reachability_store: Arc>, pub(super) reachability_relations_store: Arc>, - pub(super) ghostdag_stores: Arc>>, + pub(super) ghostdag_store: Arc, pub(super) statuses_store: Arc>, pub(super) pruning_point_store: Arc>, pub(super) block_window_cache_for_difficulty: Arc, @@ -138,7 +138,7 @@ pub struct HeaderProcessor { pub(super) depth_store: Arc, // Managers and services - pub(super) ghostdag_managers: Arc>, + pub(super) ghostdag_manager: DbGhostdagManager, pub(super) dag_traversal_manager: DbDagTraversalManager, pub(super) window_manager: DbWindowManager, pub(super) depth_manager: DbBlockDepthManager, @@ -178,7 +178,7 @@ impl HeaderProcessor { relations_stores: storage.relations_stores.clone(), reachability_store: storage.reachability_store.clone(), reachability_relations_store: storage.reachability_relations_store.clone(), - ghostdag_stores: storage.ghostdag_stores.clone(), + ghostdag_store: storage.ghostdag_store.clone(), statuses_store: storage.statuses_store.clone(), pruning_point_store: storage.pruning_point_store.clone(), daa_excluded_store: storage.daa_excluded_store.clone(), @@ -188,7 +188,7 @@ impl HeaderProcessor { block_window_cache_for_difficulty: storage.block_window_cache_for_difficulty.clone(), block_window_cache_for_past_median_time: storage.block_window_cache_for_past_median_time.clone(), - ghostdag_managers: services.ghostdag_managers.clone(), + ghostdag_manager: services.ghostdag_manager.clone(), dag_traversal_manager: services.dag_traversal_manager.clone(), window_manager: services.window_manager.clone(), reachability_service: services.reachability_service.clone(), @@ -344,18 +344,14 @@ impl HeaderProcessor { .collect_vec() } - /// Runs the GHOSTDAG algorithm for all block levels and writes the data into the context (if hasn't run already) + /// Runs the GHOSTDAG algorithm and writes the data into the context (if hasn't run already) fn ghostdag(&self, ctx: &mut HeaderProcessingContext) { - let ghostdag_data = (0..=ctx.block_level as usize) - .map(|level| { - self.ghostdag_stores[level] - .get_data(ctx.hash) - .unwrap_option() - .unwrap_or_else(|| Arc::new(self.ghostdag_managers[level].ghostdag(&ctx.known_parents[level]))) - }) - .collect_vec(); - - self.counters.mergeset_counts.fetch_add(ghostdag_data[0].mergeset_size() as u64, Ordering::Relaxed); + let ghostdag_data = self + .ghostdag_store + .get_data(ctx.hash) + .unwrap_option() + .unwrap_or_else(|| Arc::new(self.ghostdag_manager.ghostdag(&ctx.known_parents[0]))); + self.counters.mergeset_counts.fetch_add(ghostdag_data.mergeset_size() as u64, Ordering::Relaxed); ctx.ghostdag_data = Some(ghostdag_data); } @@ -369,10 +365,8 @@ impl HeaderProcessor { // // Append-only stores: these require no lock and hence done first in order to reduce locking time // + self.ghostdag_store.insert_batch(&mut batch, ctx.hash, ghostdag_data).unwrap(); - for (level, datum) in ghostdag_data.iter().enumerate() { - self.ghostdag_stores[level].insert_batch(&mut batch, ctx.hash, datum).unwrap(); - } if let Some(window) = ctx.block_window_for_difficulty { self.block_window_cache_for_difficulty.insert(ctx.hash, window); } @@ -393,8 +387,8 @@ impl HeaderProcessor { // time, and thus serializing this part will do no harm. However this should be benchmarked. The // alternative is to create a separate ReachabilityProcessor and to manage things more tightly. let mut staging = StagingReachabilityStore::new(self.reachability_store.upgradable_read()); - let selected_parent = ghostdag_data[0].selected_parent; - let mut reachability_mergeset = ghostdag_data[0].unordered_mergeset_without_selected_parent(); + let selected_parent = ghostdag_data.selected_parent; + let mut reachability_mergeset = ghostdag_data.unordered_mergeset_without_selected_parent(); reachability::add_block(&mut staging, ctx.hash, selected_parent, &mut reachability_mergeset).unwrap(); // Non-append only stores need to use write locks. @@ -448,10 +442,8 @@ impl HeaderProcessor { // Create a DB batch writer let mut batch = WriteBatch::default(); - for (level, datum) in ghostdag_data.iter().enumerate() { - // This data might have been already written when applying the pruning proof. - self.ghostdag_stores[level].insert_batch(&mut batch, ctx.hash, datum).unwrap_or_exists(); - } + // This data might have been already written when applying the pruning proof. + self.ghostdag_store.insert_batch(&mut batch, ctx.hash, ghostdag_data).unwrap_or_exists(); let mut relations_write = self.relations_stores.write(); ctx.known_parents.into_iter().enumerate().for_each(|(level, parents_by_level)| { @@ -491,8 +483,7 @@ impl HeaderProcessor { PruningPointInfo::from_genesis(self.genesis.hash), (0..=self.max_block_level).map(|_| BlockHashes::new(vec![ORIGIN])).collect(), ); - ctx.ghostdag_data = - Some(self.ghostdag_managers.iter().map(|manager_by_level| Arc::new(manager_by_level.genesis_ghostdag_data())).collect()); + ctx.ghostdag_data = Some(Arc::new(self.ghostdag_manager.genesis_ghostdag_data())); ctx.mergeset_non_daa = Some(Default::default()); ctx.merge_depth_root = Some(ORIGIN); ctx.finality_point = Some(ORIGIN); diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index 35dc211d51..2de19c265d 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -2,7 +2,7 @@ use crate::{ consensus::{ - services::{ConsensusServices, DbGhostdagManager, DbParentsManager, DbPruningPointManager}, + services::{ConsensusServices, DbParentsManager, DbPruningPointManager}, storage::ConsensusStorage, }, model::{ @@ -69,7 +69,6 @@ pub struct PruningProcessor { // Managers and Services reachability_service: MTReachabilityService, - ghostdag_managers: Arc>, pruning_point_manager: DbPruningPointManager, pruning_proof_manager: Arc, parents_manager: DbParentsManager, @@ -107,7 +106,6 @@ impl PruningProcessor { db, storage: storage.clone(), reachability_service: services.reachability_service.clone(), - ghostdag_managers: services.ghostdag_managers.clone(), pruning_point_manager: services.pruning_point_manager.clone(), pruning_proof_manager: services.pruning_proof_manager.clone(), parents_manager: services.parents_manager.clone(), @@ -284,7 +282,7 @@ impl PruningProcessor { let mut batch = WriteBatch::default(); // At this point keep_relations only holds level-0 relations which is the correct filtering criteria for primary GHOSTDAG for kept in keep_relations.keys().copied() { - let Some(ghostdag) = self.ghostdag_primary_store.get_data(kept).unwrap_option() else { + let Some(ghostdag) = self.ghostdag_store.get_data(kept).unwrap_option() else { continue; }; if ghostdag.unordered_mergeset().any(|h| !keep_relations.contains_key(&h)) { @@ -296,7 +294,7 @@ impl PruningProcessor { mutable_ghostdag.selected_parent = ORIGIN; } counter += 1; - self.ghostdag_primary_store.update_batch(&mut batch, kept, &Arc::new(mutable_ghostdag.into())).unwrap(); + self.ghostdag_store.update_batch(&mut batch, kept, &Arc::new(mutable_ghostdag.into())).unwrap(); } } self.db.write(batch).unwrap(); @@ -444,7 +442,10 @@ impl PruningProcessor { let mut staging_level_relations = StagingRelationsStore::new(&mut level_relations_write[lower_level]); relations::delete_level_relations(MemoryWriter, &mut staging_level_relations, current).unwrap_option(); staging_level_relations.commit(&mut batch).unwrap(); - self.ghostdag_stores[lower_level].delete_batch(&mut batch, current).unwrap_option(); + + if lower_level == 0 { + self.ghostdag_store.delete_batch(&mut batch, current).unwrap_option(); + } } } else { // Count only blocks which get fully pruned including DAG relations @@ -463,9 +464,10 @@ impl PruningProcessor { let mut staging_level_relations = StagingRelationsStore::new(&mut level_relations_write[level]); relations::delete_level_relations(MemoryWriter, &mut staging_level_relations, current).unwrap_option(); staging_level_relations.commit(&mut batch).unwrap(); - self.ghostdag_stores[level].delete_batch(&mut batch, current).unwrap_option(); }); + self.ghostdag_store.delete_batch(&mut batch, current).unwrap_option(); + // Remove additional header related data self.daa_excluded_store.delete_batch(&mut batch, current).unwrap(); self.depth_store.delete_batch(&mut batch, current).unwrap(); diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index 9af6879c7b..4b571dddc7 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -116,7 +116,7 @@ pub struct VirtualStateProcessor { // Stores pub(super) statuses_store: Arc>, - pub(super) ghostdag_primary_store: Arc, + pub(super) ghostdag_store: Arc, pub(super) headers_store: Arc, pub(super) daa_excluded_store: Arc, pub(super) block_transactions_store: Arc, @@ -191,7 +191,7 @@ impl VirtualStateProcessor { db, statuses_store: storage.statuses_store.clone(), headers_store: storage.headers_store.clone(), - ghostdag_primary_store: storage.ghostdag_primary_store.clone(), + ghostdag_store: storage.ghostdag_store.clone(), daa_excluded_store: storage.daa_excluded_store.clone(), block_transactions_store: storage.block_transactions_store.clone(), pruning_point_store: storage.pruning_point_store.clone(), @@ -206,7 +206,7 @@ impl VirtualStateProcessor { pruning_utxoset_stores: storage.pruning_utxoset_stores.clone(), lkg_virtual_state: storage.lkg_virtual_state.clone(), - ghostdag_manager: services.ghostdag_primary_manager.clone(), + ghostdag_manager: services.ghostdag_manager.clone(), reachability_service: services.reachability_service.clone(), relations_service: services.relations_service.clone(), dag_traversal_manager: services.dag_traversal_manager.clone(), @@ -303,7 +303,7 @@ impl VirtualStateProcessor { .expect("all possible rule errors are unexpected here"); // Update the pruning processor about the virtual state change - let sink_ghostdag_data = self.ghostdag_primary_store.get_compact_data(new_sink).unwrap(); + let sink_ghostdag_data = self.ghostdag_store.get_compact_data(new_sink).unwrap(); // Empty the channel before sending the new message. If pruning processor is busy, this step makes sure // the internal channel does not grow with no need (since we only care about the most recent message) let _consume = self.pruning_receiver.try_iter().count(); @@ -404,7 +404,7 @@ impl VirtualStateProcessor { } let header = self.headers_store.get_header(current).unwrap(); - let mergeset_data = self.ghostdag_primary_store.get_data(current).unwrap(); + let mergeset_data = self.ghostdag_store.get_data(current).unwrap(); let pov_daa_score = header.daa_score; let selected_parent_multiset_hash = self.utxo_multisets_store.get(selected_parent).unwrap(); @@ -569,7 +569,7 @@ impl VirtualStateProcessor { let mut heap = tips .into_iter() - .map(|block| SortableBlock { hash: block, blue_work: self.ghostdag_primary_store.get_blue_work(block).unwrap() }) + .map(|block| SortableBlock { hash: block, blue_work: self.ghostdag_store.get_blue_work(block).unwrap() }) .collect::>(); // The initial diff point is the previous sink @@ -591,7 +591,7 @@ impl VirtualStateProcessor { // 2. will be removed eventually by the bounded merge check. // Hence as an optimization we prefer removing such blocks in advance to allow valid tips to be considered. let filtering_root = self.depth_store.merge_depth_root(candidate).unwrap(); - let filtering_blue_work = self.ghostdag_primary_store.get_blue_work(filtering_root).unwrap_or_default(); + let filtering_blue_work = self.ghostdag_store.get_blue_work(filtering_root).unwrap_or_default(); return ( candidate, heap.into_sorted_iter().take_while(|s| s.blue_work >= filtering_blue_work).map(|s| s.hash).collect(), @@ -609,7 +609,7 @@ impl VirtualStateProcessor { if self.reachability_service.is_dag_ancestor_of(finality_point, parent) && !self.reachability_service.is_dag_ancestor_of_any(parent, &mut heap.iter().map(|sb| sb.hash)) { - heap.push(SortableBlock { hash: parent, blue_work: self.ghostdag_primary_store.get_blue_work(parent).unwrap() }); + heap.push(SortableBlock { hash: parent, blue_work: self.ghostdag_store.get_blue_work(parent).unwrap() }); } } drop(prune_guard); @@ -1147,7 +1147,7 @@ impl VirtualStateProcessor { // in depth of 2*finality_depth, and can give false negatives for smaller finality violations. let current_pp = self.pruning_point_store.read().pruning_point().unwrap(); let vf = self.virtual_finality_point(&self.lkg_virtual_state.load().ghostdag_data, current_pp); - let vff = self.depth_manager.calc_finality_point(&self.ghostdag_primary_store.get_data(vf).unwrap(), current_pp); + let vff = self.depth_manager.calc_finality_point(&self.ghostdag_store.get_data(vf).unwrap(), current_pp); let last_known_pp = pp_list.iter().rev().find(|pp| match self.statuses_store.read().get(pp.hash).unwrap_option() { Some(status) => status.is_valid(), diff --git a/consensus/src/pipeline/virtual_processor/utxo_validation.rs b/consensus/src/pipeline/virtual_processor/utxo_validation.rs index 6516888188..6daa3deb85 100644 --- a/consensus/src/pipeline/virtual_processor/utxo_validation.rs +++ b/consensus/src/pipeline/virtual_processor/utxo_validation.rs @@ -82,7 +82,7 @@ impl VirtualStateProcessor { for (i, (merged_block, txs)) in once((ctx.selected_parent(), selected_parent_transactions)) .chain( ctx.ghostdag_data - .consensus_ordered_mergeset_without_selected_parent(self.ghostdag_primary_store.deref()) + .consensus_ordered_mergeset_without_selected_parent(self.ghostdag_store.deref()) .map(|b| (b, self.block_transactions_store.get(b).unwrap())), ) .enumerate() diff --git a/consensus/src/processes/ghostdag/protocol.rs b/consensus/src/processes/ghostdag/protocol.rs index 8dfe4e7937..997c4eecb5 100644 --- a/consensus/src/processes/ghostdag/protocol.rs +++ b/consensus/src/processes/ghostdag/protocol.rs @@ -29,6 +29,7 @@ pub struct GhostdagManager, pub(super) reachability_service: U, + use_score_as_work: bool, } impl GhostdagManager { @@ -39,8 +40,9 @@ impl, reachability_service: U, + use_score_as_work: bool, ) -> Self { - Self { genesis_hash, k, ghostdag_store, relations_store, reachability_service, headers_store } + Self { genesis_hash, k, ghostdag_store, relations_store, reachability_service, headers_store, use_score_as_work } } pub fn genesis_ghostdag_data(&self) -> GhostdagData { @@ -115,14 +117,19 @@ impl = std::result::Result; struct CachedPruningPointData { pruning_point: Hash, @@ -88,6 +100,49 @@ impl Clone for CachedPruningPointData { } } +struct TempProofContext { + headers_store: Arc, + ghostdag_stores: Vec>, + relations_stores: Vec, + reachability_stores: Vec>>, + ghostdag_managers: + Vec, DbHeadersStore>>, + db_lifetime: DbLifetime, +} + +#[derive(Clone)] +struct RelationsStoreInFutureOfRoot { + relations_store: T, + reachability_service: U, + root: Hash, +} + +impl RelationsStoreReader for RelationsStoreInFutureOfRoot { + fn get_parents(&self, hash: Hash) -> Result { + self.relations_store.get_parents(hash).map(|hashes| { + Arc::new(hashes.iter().copied().filter(|h| self.reachability_service.is_dag_ancestor_of(self.root, *h)).collect_vec()) + }) + } + + fn get_children(&self, hash: Hash) -> StoreResult> { + // We assume hash is in future of root + assert!(self.reachability_service.is_dag_ancestor_of(self.root, hash)); + self.relations_store.get_children(hash) + } + + fn has(&self, hash: Hash) -> Result { + if self.reachability_service.is_dag_ancestor_of(self.root, hash) { + Ok(false) + } else { + self.relations_store.has(hash) + } + } + + fn counts(&self) -> Result<(usize, usize), kaspa_database::prelude::StoreError> { + unimplemented!() + } +} + pub struct PruningProofManager { db: Arc, @@ -95,8 +150,9 @@ pub struct PruningProofManager { reachability_store: Arc>, reachability_relations_store: Arc>, reachability_service: MTReachabilityService, - ghostdag_stores: Arc>>, + ghostdag_store: Arc, relations_stores: Arc>>, + level_relations_services: Vec>, pruning_point_store: Arc>, past_pruning_points_store: Arc, virtual_stores: Arc>, @@ -105,7 +161,7 @@ pub struct PruningProofManager { depth_store: Arc, selected_chain_store: Arc>, - ghostdag_managers: Arc>, + ghostdag_manager: DbGhostdagManager, traversal_manager: DbDagTraversalManager, window_manager: DbWindowManager, parents_manager: DbParentsManager, @@ -129,7 +185,7 @@ impl PruningProofManager { storage: &Arc, parents_manager: DbParentsManager, reachability_service: MTReachabilityService, - ghostdag_managers: Arc>, + ghostdag_manager: DbGhostdagManager, traversal_manager: DbDagTraversalManager, window_manager: DbWindowManager, max_block_level: BlockLevel, @@ -145,7 +201,7 @@ impl PruningProofManager { reachability_store: storage.reachability_store.clone(), reachability_relations_store: storage.reachability_relations_store.clone(), reachability_service, - ghostdag_stores: storage.ghostdag_stores.clone(), + ghostdag_store: storage.ghostdag_store.clone(), relations_stores: storage.relations_stores.clone(), pruning_point_store: storage.pruning_point_store.clone(), past_pruning_points_store: storage.past_pruning_points_store.clone(), @@ -155,7 +211,6 @@ impl PruningProofManager { selected_chain_store: storage.selected_chain_store.clone(), depth_store: storage.depth_store.clone(), - ghostdag_managers, traversal_manager, window_manager, parents_manager, @@ -168,8 +223,13 @@ impl PruningProofManager { pruning_proof_m, anticone_finalization_depth, ghostdag_k, + ghostdag_manager, is_consensus_exiting, + + level_relations_services: (0..=max_block_level) + .map(|level| MTRelationsService::new(storage.relations_stores.clone().clone(), level)) + .collect_vec(), } } @@ -203,18 +263,30 @@ impl PruningProofManager { let pruning_point_header = proof[0].last().unwrap().clone(); let pruning_point = pruning_point_header.hash; - let proof_zero_set = BlockHashSet::from_iter(proof[0].iter().map(|header| header.hash)); + // Create a copy of the proof, since we're going to be mutating the proof passed to us + let proof_sets = (0..=self.max_block_level) + .map(|level| BlockHashSet::from_iter(proof[level as usize].iter().map(|header| header.hash))) + .collect_vec(); + let mut trusted_gd_map: BlockHashMap = BlockHashMap::new(); for tb in trusted_set.iter() { trusted_gd_map.insert(tb.block.hash(), tb.ghostdag.clone().into()); - if proof_zero_set.contains(&tb.block.hash()) { - continue; - } + let tb_block_level = calc_block_level(&tb.block.header, self.max_block_level); + + (0..=tb_block_level).for_each(|current_proof_level| { + // If this block was in the original proof, ignore it + if proof_sets[current_proof_level as usize].contains(&tb.block.hash()) { + return; + } - proof[0].push(tb.block.header.clone()); + proof[current_proof_level as usize].push(tb.block.header.clone()); + }); } - proof[0].sort_by(|a, b| a.blue_work.cmp(&b.blue_work)); + proof.iter_mut().for_each(|level_proof| { + level_proof.sort_by(|a, b| a.blue_work.cmp(&b.blue_work)); + }); + self.populate_reachability_and_headers(&proof); { @@ -229,47 +301,48 @@ impl PruningProofManager { for (level, headers) in proof.iter().enumerate() { trace!("Applying level {} from the pruning point proof", level); - self.ghostdag_stores[level].insert(ORIGIN, self.ghostdag_managers[level].origin_ghostdag_data()).unwrap(); + let mut level_ancestors: HashSet = HashSet::new(); + level_ancestors.insert(ORIGIN); + for header in headers.iter() { let parents = Arc::new( self.parents_manager .parents_at_level(header, level as BlockLevel) .iter() .copied() - .filter(|parent| self.ghostdag_stores[level].has(*parent).unwrap()) + .filter(|parent| level_ancestors.contains(parent)) .collect_vec() .push_if_empty(ORIGIN), ); self.relations_stores.write()[level].insert(header.hash, parents.clone()).unwrap(); - let gd = if header.hash == self.genesis_hash { - self.ghostdag_managers[level].genesis_ghostdag_data() - } else if level == 0 { - if let Some(gd) = trusted_gd_map.get(&header.hash) { + + if level == 0 { + let gd = if let Some(gd) = trusted_gd_map.get(&header.hash) { gd.clone() } else { - let calculated_gd = self.ghostdag_managers[level].ghostdag(&parents); + let calculated_gd = self.ghostdag_manager.ghostdag(&parents); // Override the ghostdag data with the real blue score and blue work GhostdagData { blue_score: header.blue_score, blue_work: header.blue_work, selected_parent: calculated_gd.selected_parent, - mergeset_blues: calculated_gd.mergeset_blues.clone(), - mergeset_reds: calculated_gd.mergeset_reds.clone(), - blues_anticone_sizes: calculated_gd.blues_anticone_sizes.clone(), + mergeset_blues: calculated_gd.mergeset_blues, + mergeset_reds: calculated_gd.mergeset_reds, + blues_anticone_sizes: calculated_gd.blues_anticone_sizes, } - } - } else { - self.ghostdag_managers[level].ghostdag(&parents) - }; - self.ghostdag_stores[level].insert(header.hash, Arc::new(gd)).unwrap(); + }; + self.ghostdag_store.insert(header.hash, Arc::new(gd)).unwrap(); + } + + level_ancestors.insert(header.hash); } } let virtual_parents = vec![pruning_point]; let virtual_state = Arc::new(VirtualState { parents: virtual_parents.clone(), - ghostdag_data: self.ghostdag_managers[0].ghostdag(&virtual_parents), + ghostdag_data: self.ghostdag_manager.ghostdag(&virtual_parents), ..VirtualState::default() }); self.virtual_stores.write().state.set(virtual_state).unwrap(); @@ -387,18 +460,16 @@ impl PruningProofManager { } } - pub fn validate_pruning_point_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> { - if proof.len() != self.max_block_level as usize + 1 { - return Err(PruningImportError::ProofNotEnoughLevels(self.max_block_level as usize + 1)); - } + fn init_validate_pruning_point_proof_stores_and_processes( + &self, + proof: &PruningPointProof, + ) -> PruningImportResult { if proof[0].is_empty() { return Err(PruningImportError::PruningProofNotEnoughHeaders); } let headers_estimate = self.estimate_proof_unique_size(proof); - let proof_pp_header = proof[0].last().expect("checked if empty"); - let proof_pp = proof_pp_header.hash; - let proof_pp_level = calc_block_level(proof_pp_header, self.max_block_level); + let (db_lifetime, db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)); let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); let headers_store = @@ -428,6 +499,7 @@ impl PruningProofManager { relations_stores[level].clone(), headers_store.clone(), reachability_services[level].clone(), + level != 0, ) }) .collect_vec(); @@ -438,12 +510,30 @@ impl PruningProofManager { let level = level as usize; reachability::init(reachability_stores[level].write().deref_mut()).unwrap(); relations_stores[level].insert_batch(&mut batch, ORIGIN, BlockHashes::new(vec![])).unwrap(); - ghostdag_stores[level].insert(ORIGIN, self.ghostdag_managers[level].origin_ghostdag_data()).unwrap(); + ghostdag_stores[level].insert(ORIGIN, ghostdag_managers[level].origin_ghostdag_data()).unwrap(); } db.write(batch).unwrap(); } + Ok(TempProofContext { db_lifetime, headers_store, ghostdag_stores, relations_stores, reachability_stores, ghostdag_managers }) + } + + fn populate_stores_for_validate_pruning_point_proof( + &self, + proof: &PruningPointProof, + ctx: &mut TempProofContext, + log_validating: bool, + ) -> PruningImportResult> { + let headers_store = &ctx.headers_store; + let ghostdag_stores = &ctx.ghostdag_stores; + let mut relations_stores = ctx.relations_stores.clone(); + let reachability_stores = &ctx.reachability_stores; + let ghostdag_managers = &ctx.ghostdag_managers; + + let proof_pp_header = proof[0].last().expect("checked if empty"); + let proof_pp = proof_pp_header.hash; + let mut selected_tip_by_level = vec![None; self.max_block_level as usize + 1]; for level in (0..=self.max_block_level).rev() { // Before processing this level, check if the process is exiting so we can end early @@ -451,7 +541,9 @@ impl PruningProofManager { return Err(PruningImportError::PruningValidationInterrupted); } - info!("Validating level {level} from the pruning point proof ({} headers)", proof[level as usize].len()); + if log_validating { + info!("Validating level {level} from the pruning point proof ({} headers)", proof[level as usize].len()); + } let level_idx = level as usize; let mut selected_tip = None; for (i, header) in proof[level as usize].iter().enumerate() { @@ -533,49 +625,125 @@ impl PruningProofManager { selected_tip_by_level[level_idx] = selected_tip; } + Ok(selected_tip_by_level.into_iter().map(|selected_tip| selected_tip.unwrap()).collect()) + } + + fn validate_proof_selected_tip( + &self, + proof_selected_tip: Hash, + level: BlockLevel, + proof_pp_level: BlockLevel, + proof_pp: Hash, + proof_pp_header: &Header, + ) -> PruningImportResult<()> { + // A proof selected tip of some level has to be the proof suggested prunint point itself if its level + // is lower or equal to the pruning point level, or a parent of the pruning point on the relevant level + // otherwise. + if level <= proof_pp_level { + if proof_selected_tip != proof_pp { + return Err(PruningImportError::PruningProofSelectedTipIsNotThePruningPoint(proof_selected_tip, level)); + } + } else if !self.parents_manager.parents_at_level(proof_pp_header, level).contains(&proof_selected_tip) { + return Err(PruningImportError::PruningProofSelectedTipNotParentOfPruningPoint(proof_selected_tip, level)); + } + + Ok(()) + } + + // find_proof_and_consensus_common_chain_ancestor_ghostdag_data returns an option of a tuple + // that contains the ghostdag data of the proof and current consensus common ancestor. If no + // such ancestor exists, it returns None. + fn find_proof_and_consensus_common_ancestor_ghostdag_data( + &self, + proof_ghostdag_stores: &[Arc], + current_consensus_ghostdag_stores: &[Arc], + proof_selected_tip: Hash, + level: BlockLevel, + proof_selected_tip_gd: CompactGhostdagData, + ) -> Option<(CompactGhostdagData, CompactGhostdagData)> { + let mut proof_current = proof_selected_tip; + let mut proof_current_gd = proof_selected_tip_gd; + loop { + match current_consensus_ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap_option() { + Some(current_gd) => { + break Some((proof_current_gd, current_gd)); + } + None => { + proof_current = proof_current_gd.selected_parent; + if proof_current.is_origin() { + break None; + } + proof_current_gd = proof_ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap(); + } + }; + } + } + + pub fn validate_pruning_point_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> { + if proof.len() != self.max_block_level as usize + 1 { + return Err(PruningImportError::ProofNotEnoughLevels(self.max_block_level as usize + 1)); + } + + // Initialize the stores for the proof + let mut proof_stores_and_processes = self.init_validate_pruning_point_proof_stores_and_processes(proof)?; + let proof_pp_header = proof[0].last().expect("checked if empty"); + let proof_pp = proof_pp_header.hash; + let proof_pp_level = calc_block_level(proof_pp_header, self.max_block_level); + let proof_selected_tip_by_level = + self.populate_stores_for_validate_pruning_point_proof(proof, &mut proof_stores_and_processes, true)?; + let proof_ghostdag_stores = proof_stores_and_processes.ghostdag_stores; + + // Get the proof for the current consensus and recreate the stores for it + // This is expected to be fast because if a proof exists, it will be cached. + // If no proof exists, this is empty + let mut current_consensus_proof = self.get_pruning_point_proof(); + if current_consensus_proof.is_empty() { + // An empty proof can only happen if we're at genesis. We're going to create a proof for this case that contains the genesis header only + let genesis_header = self.headers_store.get_header(self.genesis_hash).unwrap(); + current_consensus_proof = Arc::new((0..=self.max_block_level).map(|_| vec![genesis_header.clone()]).collect_vec()); + } + let mut current_consensus_stores_and_processes = + self.init_validate_pruning_point_proof_stores_and_processes(¤t_consensus_proof)?; + let _ = self.populate_stores_for_validate_pruning_point_proof( + ¤t_consensus_proof, + &mut current_consensus_stores_and_processes, + false, + )?; + let current_consensus_ghostdag_stores = current_consensus_stores_and_processes.ghostdag_stores; + let pruning_read = self.pruning_point_store.read(); let relations_read = self.relations_stores.read(); let current_pp = pruning_read.get().unwrap().pruning_point; let current_pp_header = self.headers_store.get_header(current_pp).unwrap(); - for (level_idx, selected_tip) in selected_tip_by_level.into_iter().enumerate() { + for (level_idx, selected_tip) in proof_selected_tip_by_level.iter().copied().enumerate() { let level = level_idx as BlockLevel; - let selected_tip = selected_tip.unwrap(); - if level <= proof_pp_level { - if selected_tip != proof_pp { - return Err(PruningImportError::PruningProofSelectedTipIsNotThePruningPoint(selected_tip, level)); - } - } else if !self.parents_manager.parents_at_level(proof_pp_header, level).contains(&selected_tip) { - return Err(PruningImportError::PruningProofSelectedTipNotParentOfPruningPoint(selected_tip, level)); - } + self.validate_proof_selected_tip(selected_tip, level, proof_pp_level, proof_pp, proof_pp_header)?; - let proof_selected_tip_gd = ghostdag_stores[level_idx].get_compact_data(selected_tip).unwrap(); + let proof_selected_tip_gd = proof_ghostdag_stores[level_idx].get_compact_data(selected_tip).unwrap(); + + // Next check is to see if this proof is "better" than what's in the current consensus + // Step 1 - look at only levels that have a full proof (least 2m blocks in the proof) if proof_selected_tip_gd.blue_score < 2 * self.pruning_proof_m { continue; } - let mut proof_current = selected_tip; - let mut proof_current_gd = proof_selected_tip_gd; - let common_ancestor_data = loop { - match self.ghostdag_stores[level_idx].get_compact_data(proof_current).unwrap_option() { - Some(current_gd) => { - break Some((proof_current_gd, current_gd)); - } - None => { - proof_current = proof_current_gd.selected_parent; - if proof_current.is_origin() { - break None; - } - proof_current_gd = ghostdag_stores[level_idx].get_compact_data(proof_current).unwrap(); - } - }; - }; - - if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = common_ancestor_data { + // Step 2 - if we can find a common ancestor between the proof and current consensus + // we can determine if the proof is better. The proof is better if the blue work* difference between the + // old current consensus's tips and the common ancestor is less than the blue work difference between the + // proof's tip and the common ancestor. + // *Note: blue work is the same as blue score on levels higher than 0 + if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = self.find_proof_and_consensus_common_ancestor_ghostdag_data( + &proof_ghostdag_stores, + ¤t_consensus_ghostdag_stores, + selected_tip, + level, + proof_selected_tip_gd, + ) { let selected_tip_blue_work_diff = SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(proof_common_ancestor_gd.blue_work); for parent in self.parents_manager.parents_at_level(¤t_pp_header, level).iter().copied() { - let parent_blue_work = self.ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); + let parent_blue_work = current_consensus_ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); let parent_blue_work_diff = SignedInteger::from(parent_blue_work) - SignedInteger::from(common_ancestor_gd.blue_work); if parent_blue_work_diff >= selected_tip_blue_work_diff { @@ -593,15 +761,24 @@ impl PruningProofManager { return Ok(()); } + // If we got here it means there's no level with shared blocks + // between the proof and the current consensus. In this case we + // consider the proof to be better if it has at least one level + // with 2*self.pruning_proof_m blue blocks where consensus doesn't. for level in (0..=self.max_block_level).rev() { let level_idx = level as usize; + + let proof_selected_tip = proof_selected_tip_by_level[level_idx]; + let proof_selected_tip_gd = proof_ghostdag_stores[level_idx].get_compact_data(proof_selected_tip).unwrap(); + if proof_selected_tip_gd.blue_score < 2 * self.pruning_proof_m { + continue; + } + match relations_read[level_idx].get_parents(current_pp).unwrap_option() { Some(parents) => { - if parents - .iter() - .copied() - .any(|parent| self.ghostdag_stores[level_idx].get_blue_score(parent).unwrap() < 2 * self.pruning_proof_m) - { + if parents.iter().copied().any(|parent| { + current_consensus_ghostdag_stores[level_idx].get_blue_score(parent).unwrap() < 2 * self.pruning_proof_m + }) { return Ok(()); } } @@ -614,45 +791,234 @@ impl PruningProofManager { drop(pruning_read); drop(relations_read); - drop(db_lifetime); + drop(proof_stores_and_processes.db_lifetime); + drop(current_consensus_stores_and_processes.db_lifetime); Err(PruningImportError::PruningProofNotEnoughHeaders) } + // The "current dag level" is the level right before the level whose parents are + // not the same as our header's direct parents + // + // Find the current DAG level by going through all the parents at each level, + // starting from the bottom level and see which is the first level that has + // parents that are NOT our current pp_header's direct parents. + fn find_current_dag_level(&self, pp_header: &Header) -> BlockLevel { + let direct_parents = BlockHashSet::from_iter(pp_header.direct_parents().iter().copied()); + pp_header + .parents_by_level + .iter() + .enumerate() + .skip(1) + .find_map(|(level, parents)| { + if BlockHashSet::from_iter(parents.iter().copied()) == direct_parents { + None + } else { + Some((level - 1) as BlockLevel) + } + }) + .unwrap_or(self.max_block_level) + } + + fn estimated_blue_depth_at_level_0(&self, level: BlockLevel, level_depth: u64, current_dag_level: BlockLevel) -> u64 { + level_depth.checked_shl(level.saturating_sub(current_dag_level) as u32).unwrap_or(level_depth) + } + + /// selected parent at level = the parent of the header at the level + /// with the highest blue_work + fn find_selected_parent_header_at_level( + &self, + header: &Header, + level: BlockLevel, + ) -> PruningProofManagerInternalResult> { + // Parents manager parents_at_level may return parents that aren't in relations_service, so it's important + // to filter to include only parents that are in relations_service. + let sp = self + .parents_manager + .parents_at_level(header, level) + .iter() + .copied() + .filter(|p| self.level_relations_services[level as usize].has(*p).unwrap()) + .filter_map(|p| self.headers_store.get_header(p).unwrap_option().map(|h| SortableBlock::new(p, h.blue_work))) + .max() + .ok_or(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof("no parents with header".to_string()))?; + Ok(self.headers_store.get_header(sp.hash).expect("unwrapped above")) + } + + /// Find a sufficient root at a given level by going through the headers store and looking + /// for a deep enough level block + /// For each root candidate, fill in the ghostdag data to see if it actually is deep enough. + /// If the root is deep enough, it will satisfy these conditions + /// 1. block at depth 2m at this level ∈ Future(root) + /// 2. block at depth m at the next level ∈ Future(root) + /// + /// Returns: the filled ghostdag store from root to tip, the selected tip and the root + fn find_sufficient_root( + &self, + pp_header: &HeaderWithBlockLevel, + level: BlockLevel, + current_dag_level: BlockLevel, + required_block: Option, + temp_db: Arc, + ) -> PruningProofManagerInternalResult<(Arc, Hash, Hash)> { + // Step 1: Determine which selected tip to use + let selected_tip = if pp_header.block_level >= level { + pp_header.header.hash + } else { + self.find_selected_parent_header_at_level(&pp_header.header, level)?.hash + }; + + let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); + let required_level_depth = 2 * self.pruning_proof_m; + + // We only have the headers store (which has level 0 blue_scores) to assemble the proof data from. + // We need to look deeper at higher levels (2x deeper every level) to find 2M (plus margin) blocks at that level + let mut required_base_level_depth = self.estimated_blue_depth_at_level_0( + level, + required_level_depth + 100, // We take a safety margin + current_dag_level, + ); + + let mut is_last_level_header; + let mut tries = 0; + + let block_at_depth_m_at_next_level = required_block.unwrap_or(selected_tip); + + loop { + // Step 2 - Find a deep enough root candidate + let block_at_depth_2m = match self.level_block_at_base_depth(level, selected_tip, required_base_level_depth) { + Ok((header, is_last_header)) => { + is_last_level_header = is_last_header; + header + } + Err(e) => return Err(e), + }; + + let root = if self.reachability_service.is_dag_ancestor_of(block_at_depth_2m, block_at_depth_m_at_next_level) { + block_at_depth_2m + } else if self.reachability_service.is_dag_ancestor_of(block_at_depth_m_at_next_level, block_at_depth_2m) { + block_at_depth_m_at_next_level + } else { + // find common ancestor of block_at_depth_m_at_next_level and block_at_depth_2m in chain of block_at_depth_m_at_next_level + let mut common_ancestor = self.headers_store.get_header(block_at_depth_m_at_next_level).unwrap(); + + while !self.reachability_service.is_dag_ancestor_of(common_ancestor.hash, block_at_depth_2m) { + common_ancestor = match self.find_selected_parent_header_at_level(&common_ancestor, level) { + Ok(header) => header, + // Try to give this last header a chance at being root + Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => break, + Err(e) => return Err(e), + }; + } + + common_ancestor.hash + }; + + if level == 0 { + return Ok((self.ghostdag_store.clone(), selected_tip, root)); + } + + // Step 3 - Fill the ghostdag data from root to tip + let ghostdag_store = Arc::new(DbGhostdagStore::new_temp(temp_db.clone(), level, cache_policy, cache_policy, tries)); + let has_required_block = self.fill_level_proof_ghostdag_data( + root, + pp_header.header.hash, + &ghostdag_store, + Some(block_at_depth_m_at_next_level), + level, + ); + + // Step 4 - Check if we actually have enough depth. + // Need to ensure this does the same 2M+1 depth that block_at_depth does + if has_required_block + && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() >= required_level_depth) + { + break Ok((ghostdag_store, selected_tip, root)); + } + + tries += 1; + if is_last_level_header { + if has_required_block { + // Normally this scenario doesn't occur when syncing with nodes that already have the safety margin change in place. + // However, when syncing with an older node version that doesn't have a safety margin for the proof, it's possible to + // try to find 2500 depth worth of headers at a level, but the proof only contains about 2000 headers. To be able to sync + // with such an older node. As long as we found the required block, we can still proceed. + debug!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_base_level_depth} are already pruned. Required block found so trying anyway."); + break Ok((ghostdag_store, selected_tip, root)); + } else { + panic!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_base_level_depth} are already pruned"); + } + } + + // If we don't have enough depth now, we need to look deeper + required_base_level_depth = (required_base_level_depth as f64 * 1.1) as u64; + debug!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_base_level_depth}"); + } + } + + fn calc_gd_for_all_levels( + &self, + pp_header: &HeaderWithBlockLevel, + temp_db: Arc, + ) -> (Vec>, Vec, Vec) { + let current_dag_level = self.find_current_dag_level(&pp_header.header); + let mut ghostdag_stores: Vec>> = vec![None; self.max_block_level as usize + 1]; + let mut selected_tip_by_level = vec![None; self.max_block_level as usize + 1]; + let mut root_by_level = vec![None; self.max_block_level as usize + 1]; + for level in (0..=self.max_block_level).rev() { + let level_usize = level as usize; + let required_block = if level != self.max_block_level { + let next_level_store = ghostdag_stores[level_usize + 1].as_ref().unwrap().clone(); + let block_at_depth_m_at_next_level = self + .block_at_depth(&*next_level_store, selected_tip_by_level[level_usize + 1].unwrap(), self.pruning_proof_m) + .map_err(|err| format!("level + 1: {}, err: {}", level + 1, err)) + .unwrap(); + Some(block_at_depth_m_at_next_level) + } else { + None + }; + let (store, selected_tip, root) = self + .find_sufficient_root(pp_header, level, current_dag_level, required_block, temp_db.clone()) + .unwrap_or_else(|_| panic!("find_sufficient_root failed for level {level}")); + ghostdag_stores[level_usize] = Some(store); + selected_tip_by_level[level_usize] = Some(selected_tip); + root_by_level[level_usize] = Some(root); + } + + ( + ghostdag_stores.into_iter().map(Option::unwrap).collect_vec(), + selected_tip_by_level.into_iter().map(Option::unwrap).collect_vec(), + root_by_level.into_iter().map(Option::unwrap).collect_vec(), + ) + } + pub(crate) fn build_pruning_point_proof(&self, pp: Hash) -> PruningPointProof { if pp == self.genesis_hash { return vec![]; } + let (_db_lifetime, temp_db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)); let pp_header = self.headers_store.get_header_with_block_level(pp).unwrap(); - let selected_tip_by_level = (0..=self.max_block_level) - .map(|level| { - if level <= pp_header.block_level { - pp - } else { - self.ghostdag_managers[level as usize].find_selected_parent( - self.parents_manager - .parents_at_level(&pp_header.header, level) - .iter() - .filter(|parent| self.ghostdag_stores[level as usize].has(**parent).unwrap()) - .cloned(), - ) - } - }) - .collect_vec(); + let (ghostdag_stores, selected_tip_by_level, roots_by_level) = self.calc_gd_for_all_levels(&pp_header, temp_db); (0..=self.max_block_level) .map(|level| { let level = level as usize; let selected_tip = selected_tip_by_level[level]; let block_at_depth_2m = self - .block_at_depth(&*self.ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) + .block_at_depth(&*ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) .map_err(|err| format!("level: {}, err: {}", level, err)) .unwrap(); - let root = if level != self.max_block_level as usize { + // TODO (relaxed): remove the assertion below + // (New Logic) This is the root we calculated by going through block relations + let root = roots_by_level[level]; + // (Old Logic) This is the root we can calculate given that the GD records are already filled + // The root calc logic below is the original logic before the on-demand higher level GD calculation + // We only need old_root to sanity check the new logic + let old_root = if level != self.max_block_level as usize { let block_at_depth_m_at_next_level = self - .block_at_depth(&*self.ghostdag_stores[level + 1], selected_tip_by_level[level + 1], self.pruning_proof_m) + .block_at_depth(&*ghostdag_stores[level + 1], selected_tip_by_level[level + 1], self.pruning_proof_m) .map_err(|err| format!("level + 1: {}, err: {}", level + 1, err)) .unwrap(); if self.reachability_service.is_dag_ancestor_of(block_at_depth_m_at_next_level, block_at_depth_2m) { @@ -661,7 +1027,7 @@ impl PruningProofManager { block_at_depth_2m } else { self.find_common_ancestor_in_chain_of_a( - &*self.ghostdag_stores[level], + &*ghostdag_stores[level], block_at_depth_m_at_next_level, block_at_depth_2m, ) @@ -672,30 +1038,39 @@ impl PruningProofManager { block_at_depth_2m }; + // new root is expected to be always an ancestor of old_root because new root takes a safety margin + assert!(self.reachability_service.is_dag_ancestor_of(root, old_root)); + let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); let mut queue = BinaryHeap::>::new(); let mut visited = BlockHashSet::new(); - queue.push(Reverse(SortableBlock::new(root, self.ghostdag_stores[level].get_blue_work(root).unwrap()))); + queue.push(Reverse(SortableBlock::new(root, self.headers_store.get_header(root).unwrap().blue_work))); while let Some(current) = queue.pop() { let current = current.0.hash; if !visited.insert(current) { continue; } - if !self.reachability_service.is_dag_ancestor_of(current, selected_tip) { + // The second condition is always expected to be true (ghostdag store will have the entry) + // because we are traversing the exact diamond (future(root) ⋂ past(tip)) for which we calculated + // GD for (see fill_level_proof_ghostdag_data). TODO (relaxed): remove the condition or turn into assertion + if !self.reachability_service.is_dag_ancestor_of(current, selected_tip) + || !ghostdag_stores[level].has(current).is_ok_and(|found| found) + { continue; } headers.push(self.headers_store.get_header(current).unwrap()); for child in self.relations_stores.read()[level].get_children(current).unwrap().read().iter().copied() { - queue.push(Reverse(SortableBlock::new(child, self.ghostdag_stores[level].get_blue_work(child).unwrap()))); + queue.push(Reverse(SortableBlock::new(child, self.headers_store.get_header(child).unwrap().blue_work))); } } + // TODO (relaxed): remove the assertion below // Temp assertion for verifying a bug fix: assert that the full 2M chain is actually contained in the composed level proof let set = BlockHashSet::from_iter(headers.iter().map(|h| h.hash)); let chain_2m = self - .chain_up_to_depth(&*self.ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) + .chain_up_to_depth(&*ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) .map_err(|err| { dbg!(level, selected_tip, block_at_depth_2m, root); format!("Assert 2M chain -- level: {}, err: {}", level, err) @@ -706,13 +1081,13 @@ impl PruningProofManager { if !set.contains(&chain_hash) { let next_level_tip = selected_tip_by_level[level + 1]; let next_level_chain_m = - self.chain_up_to_depth(&*self.ghostdag_stores[level + 1], next_level_tip, self.pruning_proof_m).unwrap(); + self.chain_up_to_depth(&*ghostdag_stores[level + 1], next_level_tip, self.pruning_proof_m).unwrap(); let next_level_block_m = next_level_chain_m.last().copied().unwrap(); dbg!(next_level_chain_m.len()); - dbg!(self.ghostdag_stores[level + 1].get_compact_data(next_level_tip).unwrap().blue_score); - dbg!(self.ghostdag_stores[level + 1].get_compact_data(next_level_block_m).unwrap().blue_score); - dbg!(self.ghostdag_stores[level].get_compact_data(selected_tip).unwrap().blue_score); - dbg!(self.ghostdag_stores[level].get_compact_data(block_at_depth_2m).unwrap().blue_score); + dbg!(ghostdag_stores[level + 1].get_compact_data(next_level_tip).unwrap().blue_score); + dbg!(ghostdag_stores[level + 1].get_compact_data(next_level_block_m).unwrap().blue_score); + dbg!(ghostdag_stores[level].get_compact_data(selected_tip).unwrap().blue_score); + dbg!(ghostdag_stores[level].get_compact_data(block_at_depth_2m).unwrap().blue_score); dbg!(level, selected_tip, block_at_depth_2m, root); panic!("Assert 2M chain -- missing block {} at index {} out of {} chain blocks", chain_hash, i, chain_2m_len); } @@ -723,6 +1098,80 @@ impl PruningProofManager { .collect_vec() } + /// BFS forward iterates from root until selected tip, ignoring blocks in the antipast of selected_tip. + /// For each block along the way, insert that hash into the ghostdag_store + /// If we have a required_block to find, this will return true if that block was found along the way + fn fill_level_proof_ghostdag_data( + &self, + root: Hash, + selected_tip: Hash, + ghostdag_store: &Arc, + required_block: Option, + level: BlockLevel, + ) -> bool { + let relations_service = RelationsStoreInFutureOfRoot { + relations_store: self.level_relations_services[level as usize].clone(), + reachability_service: self.reachability_service.clone(), + root, + }; + let gd_manager = GhostdagManager::new( + root, + self.ghostdag_k, + ghostdag_store.clone(), + relations_service.clone(), + self.headers_store.clone(), + self.reachability_service.clone(), + level != 0, + ); + + ghostdag_store.insert(root, Arc::new(gd_manager.genesis_ghostdag_data())).unwrap(); + ghostdag_store.insert(ORIGIN, gd_manager.origin_ghostdag_data()).unwrap(); + + let mut topological_heap: BinaryHeap<_> = Default::default(); + let mut visited = BlockHashSet::new(); + for child in relations_service.get_children(root).unwrap().read().iter().copied() { + topological_heap.push(Reverse(SortableBlock { + hash: child, + // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology + blue_work: self.headers_store.get_header(child).unwrap().blue_work, + })); + } + + let mut has_required_block = required_block.is_some_and(|required_block| root == required_block); + loop { + let Some(current) = topological_heap.pop() else { + break; + }; + let current_hash = current.0.hash; + if !visited.insert(current_hash) { + continue; + } + + if !self.reachability_service.is_dag_ancestor_of(current_hash, selected_tip) { + // We don't care about blocks in the antipast of the selected tip + continue; + } + + if !has_required_block && required_block.is_some_and(|required_block| current_hash == required_block) { + has_required_block = true; + } + + let current_gd = gd_manager.ghostdag(&relations_service.get_parents(current_hash).unwrap()); + + ghostdag_store.insert(current_hash, Arc::new(current_gd)).unwrap_or_exists(); + + for child in relations_service.get_children(current_hash).unwrap().read().iter().copied() { + topological_heap.push(Reverse(SortableBlock { + hash: child, + // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology + blue_work: self.headers_store.get_header(child).unwrap().blue_work, + })); + } + } + + has_required_block + } + /// Copy of `block_at_depth` which returns the full chain up to depth. Temporarily used for assertion purposes. fn chain_up_to_depth( &self, @@ -780,6 +1229,42 @@ impl PruningProofManager { Ok(current) } + /// Finds the block on a given level that is at base_depth deep from it. + /// Also returns if the block was the last one in the level + /// base_depth = the blue score depth at level 0 + fn level_block_at_base_depth( + &self, + level: BlockLevel, + high: Hash, + base_depth: u64, + ) -> PruningProofManagerInternalResult<(Hash, bool)> { + let high_header = self + .headers_store + .get_header(high) + .map_err(|err| PruningProofManagerInternalError::BlockAtDepth(format!("high: {high}, depth: {base_depth}, {err}")))?; + let high_header_score = high_header.blue_score; + let mut current_header = high_header; + + let mut is_last_header = false; + + while current_header.blue_score + base_depth >= high_header_score { + if current_header.direct_parents().is_empty() { + break; + } + + current_header = match self.find_selected_parent_header_at_level(¤t_header, level) { + Ok(header) => header, + Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { + // We want to give this root a shot if all its past is pruned + is_last_header = true; + break; + } + Err(e) => return Err(e), + }; + } + Ok((current_header.hash, is_last_header)) + } + fn find_common_ancestor_in_chain_of_a( &self, ghostdag_store: &impl GhostdagStoreReader, @@ -816,7 +1301,7 @@ impl PruningProofManager { let mut current = hash; for _ in 0..=self.ghostdag_k { hashes.push(current); - let Some(parent) = self.ghostdag_stores[0].get_selected_parent(current).unwrap_option() else { + let Some(parent) = self.ghostdag_store.get_selected_parent(current).unwrap_option() else { break; }; if parent == self.genesis_hash || parent == blockhash::ORIGIN { @@ -836,7 +1321,7 @@ impl PruningProofManager { .traversal_manager .anticone(pruning_point, virtual_parents, None) .expect("no error is expected when max_traversal_allowed is None"); - let mut anticone = self.ghostdag_managers[0].sort_blocks(anticone); + let mut anticone = self.ghostdag_manager.sort_blocks(anticone); anticone.insert(0, pruning_point); let mut daa_window_blocks = BlockHashMap::new(); @@ -847,14 +1332,14 @@ impl PruningProofManager { for anticone_block in anticone.iter().copied() { let window = self .window_manager - .block_window(&self.ghostdag_stores[0].get_data(anticone_block).unwrap(), WindowType::FullDifficultyWindow) + .block_window(&self.ghostdag_store.get_data(anticone_block).unwrap(), WindowType::FullDifficultyWindow) .unwrap(); for hash in window.deref().iter().map(|block| block.0.hash) { if let Entry::Vacant(e) = daa_window_blocks.entry(hash) { e.insert(TrustedHeader { header: self.headers_store.get_header(hash).unwrap(), - ghostdag: (&*self.ghostdag_stores[0].get_data(hash).unwrap()).into(), + ghostdag: (&*self.ghostdag_store.get_data(hash).unwrap()).into(), }); } } @@ -862,7 +1347,7 @@ impl PruningProofManager { let ghostdag_chain = self.get_ghostdag_chain_k_depth(anticone_block); for hash in ghostdag_chain { if let Entry::Vacant(e) = ghostdag_blocks.entry(hash) { - let ghostdag = self.ghostdag_stores[0].get_data(hash).unwrap(); + let ghostdag = self.ghostdag_store.get_data(hash).unwrap(); e.insert((&*ghostdag).into()); // We fill `ghostdag_blocks` only for kaspad-go legacy reasons, but the real set we @@ -894,7 +1379,7 @@ impl PruningProofManager { if header.blue_work < min_blue_work { continue; } - let ghostdag = (&*self.ghostdag_stores[0].get_data(current).unwrap()).into(); + let ghostdag = (&*self.ghostdag_store.get_data(current).unwrap()).into(); e.insert(TrustedHeader { header, ghostdag }); } let parents = self.relations_stores.read()[0].get_parents(current).unwrap(); diff --git a/database/src/key.rs b/database/src/key.rs index e8aeff0916..83fa8ebb2e 100644 --- a/database/src/key.rs +++ b/database/src/key.rs @@ -73,6 +73,8 @@ impl Display for DbKey { match prefix { Ghostdag | GhostdagCompact + | TempGhostdag + | TempGhostdagCompact | RelationsParents | RelationsChildren | Reachability diff --git a/database/src/registry.rs b/database/src/registry.rs index 752efb97b3..36a728ebe6 100644 --- a/database/src/registry.rs +++ b/database/src/registry.rs @@ -41,6 +41,10 @@ pub enum DatabaseStorePrefixes { ReachabilityTreeChildren = 30, ReachabilityFutureCoveringSet = 31, + // ---- Ghostdag Proof + TempGhostdag = 40, + TempGhostdagCompact = 41, + // ---- Metadata ---- MultiConsensusMetadata = 124, ConsensusEntries = 125, diff --git a/kaspad/Cargo.toml b/kaspad/Cargo.toml index 15a408dad5..3507339f29 100644 --- a/kaspad/Cargo.toml +++ b/kaspad/Cargo.toml @@ -46,10 +46,12 @@ clap.workspace = true dhat = { workspace = true, optional = true } dirs.workspace = true futures-util.workspace = true +itertools.workspace = true log.workspace = true num_cpus.workspace = true rand.workspace = true rayon.workspace = true +rocksdb.workspace = true serde.workspace = true tempfile.workspace = true thiserror.workspace = true diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index 4175206eb5..db9f32c165 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -6,9 +6,12 @@ use kaspa_consensus_core::{ errors::config::{ConfigError, ConfigResult}, }; use kaspa_consensus_notify::{root::ConsensusNotificationRoot, service::NotifyService}; -use kaspa_core::{core::Core, debug, info}; +use kaspa_core::{core::Core, debug, info, trace}; use kaspa_core::{kaspad_env::version, task::tick::TickService}; -use kaspa_database::prelude::CachePolicy; +use kaspa_database::{ + prelude::{CachePolicy, DbWriter, DirectDbWriter}, + registry::DatabaseStorePrefixes, +}; use kaspa_grpc_server::service::GrpcService; use kaspa_notify::{address::tracker::Tracker, subscription::context::SubscriptionContext}; use kaspa_rpc_service::service::RpcCoreService; @@ -33,6 +36,7 @@ use kaspa_mining::{ }; use kaspa_p2p_flows::{flow_context::FlowContext, service::P2pService}; +use itertools::Itertools; use kaspa_perf_monitor::{builder::Builder as PerfMonitorBuilder, counters::CountersSnapshot}; use kaspa_utxoindex::{api::UtxoIndexProxy, UtxoIndex}; use kaspa_wrpc_server::service::{Options as WrpcServerOptions, WebSocketCounters as WrpcServerCounters, WrpcEncoding, WrpcService}; @@ -316,13 +320,106 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm && (meta_db.get_pinned(b"multi-consensus-metadata-key").is_ok_and(|r| r.is_some()) || MultiConsensusManagementStore::new(meta_db.clone()).should_upgrade().unwrap()) { - let msg = - "Node database is from a different Kaspad *DB* version and needs to be fully deleted, do you confirm the delete? (y/n)"; - get_user_approval_or_exit(msg, args.yes); + let mut mcms = MultiConsensusManagementStore::new(meta_db.clone()); + let version = mcms.version().unwrap(); + + // TODO: Update this entire section to a more robust implementation that allows applying multiple upgrade strategies. + // If I'm at version 3 and latest version is 7, I need to be able to upgrade to that version following the intermediate + // steps without having to delete the DB + if version == 3 { + let active_consensus_dir_name = mcms.active_consensus_dir_name().unwrap(); + + match active_consensus_dir_name { + Some(current_consensus_db) => { + // Apply soft upgrade logic: delete GD data from higher levels + // and then update DB version to 4 + let consensus_db = kaspa_database::prelude::ConnBuilder::default() + .with_db_path(consensus_db_dir.clone().join(current_consensus_db)) + .with_files_limit(1) + .build() + .unwrap(); + info!("Scanning for deprecated records to cleanup"); + + let mut gd_record_count: u32 = 0; + let mut compact_record_count: u32 = 0; + + let start_level: u8 = 1; + let start_level_bytes = start_level.to_le_bytes(); + let ghostdag_prefix_vec = DatabaseStorePrefixes::Ghostdag.into_iter().chain(start_level_bytes).collect_vec(); + let ghostdag_prefix = ghostdag_prefix_vec.as_slice(); + + // This section is used to count the records to be deleted. It's not used for the actual delete. + for result in consensus_db.iterator(rocksdb::IteratorMode::From(ghostdag_prefix, rocksdb::Direction::Forward)) { + let (key, _) = result.unwrap(); + if !key.starts_with(&[DatabaseStorePrefixes::Ghostdag.into()]) { + break; + } + + gd_record_count += 1; + } + + let compact_prefix_vec = DatabaseStorePrefixes::GhostdagCompact.into_iter().chain(start_level_bytes).collect_vec(); + let compact_prefix = compact_prefix_vec.as_slice(); + + for result in consensus_db.iterator(rocksdb::IteratorMode::From(compact_prefix, rocksdb::Direction::Forward)) { + let (key, _) = result.unwrap(); + if !key.starts_with(&[DatabaseStorePrefixes::GhostdagCompact.into()]) { + break; + } + + compact_record_count += 1; + } + + trace!("Number of Ghostdag records to cleanup: {}", gd_record_count); + trace!("Number of GhostdagCompact records to cleanup: {}", compact_record_count); + info!("Number of deprecated records to cleanup: {}", gd_record_count + compact_record_count); + + let msg = + "Node database currently at version 3. Upgrade process to version 4 needs to be applied. Continue? (y/n)"; + get_user_approval_or_exit(msg, args.yes); + + // Actual delete only happens after user consents to the upgrade: + let mut writer = DirectDbWriter::new(&consensus_db); + + let end_level: u8 = config.max_block_level + 1; + let end_level_bytes = end_level.to_le_bytes(); - info!("Deleting databases from previous Kaspad version"); + let start_ghostdag_prefix_vec = DatabaseStorePrefixes::Ghostdag.into_iter().chain(start_level_bytes).collect_vec(); + let end_ghostdag_prefix_vec = DatabaseStorePrefixes::Ghostdag.into_iter().chain(end_level_bytes).collect_vec(); - is_db_reset_needed = true; + let start_compact_prefix_vec = + DatabaseStorePrefixes::GhostdagCompact.into_iter().chain(start_level_bytes).collect_vec(); + let end_compact_prefix_vec = + DatabaseStorePrefixes::GhostdagCompact.into_iter().chain(end_level_bytes).collect_vec(); + + // Apply delete of range from level 1 to max (+1) for Ghostdag and GhostdagCompact: + writer.delete_range(start_ghostdag_prefix_vec.clone(), end_ghostdag_prefix_vec.clone()).unwrap(); + writer.delete_range(start_compact_prefix_vec.clone(), end_compact_prefix_vec.clone()).unwrap(); + + // Compact the deleted rangeto apply the delete immediately + consensus_db.compact_range(Some(start_ghostdag_prefix_vec.as_slice()), Some(end_ghostdag_prefix_vec.as_slice())); + consensus_db.compact_range(Some(start_compact_prefix_vec.as_slice()), Some(end_compact_prefix_vec.as_slice())); + + // Also update the version to one higher: + mcms.set_version(version + 1).unwrap(); + } + None => { + let msg = + "Node database is from a different Kaspad *DB* version and needs to be fully deleted, do you confirm the delete? (y/n)"; + get_user_approval_or_exit(msg, args.yes); + + is_db_reset_needed = true; + } + } + } else { + let msg = + "Node database is from a different Kaspad *DB* version and needs to be fully deleted, do you confirm the delete? (y/n)"; + get_user_approval_or_exit(msg, args.yes); + + info!("Deleting databases from previous Kaspad version"); + + is_db_reset_needed = true; + } } // Will be true if any of the other condition above except args.reset_db diff --git a/simpa/src/main.rs b/simpa/src/main.rs index c66656be3c..2994b0a090 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -425,12 +425,12 @@ fn topologically_ordered_hashes(src_consensus: &Consensus, genesis_hash: Hash) - } fn print_stats(src_consensus: &Consensus, hashes: &[Hash], delay: f64, bps: f64, k: KType) -> usize { - let blues_mean = - hashes.iter().map(|&h| src_consensus.ghostdag_primary_store.get_data(h).unwrap().mergeset_blues.len()).sum::() as f64 - / hashes.len() as f64; - let reds_mean = - hashes.iter().map(|&h| src_consensus.ghostdag_primary_store.get_data(h).unwrap().mergeset_reds.len()).sum::() as f64 - / hashes.len() as f64; + let blues_mean = hashes.iter().map(|&h| src_consensus.ghostdag_store.get_data(h).unwrap().mergeset_blues.len()).sum::() + as f64 + / hashes.len() as f64; + let reds_mean = hashes.iter().map(|&h| src_consensus.ghostdag_store.get_data(h).unwrap().mergeset_reds.len()).sum::() + as f64 + / hashes.len() as f64; let parents_mean = hashes.iter().map(|&h| src_consensus.headers_store.get_header(h).unwrap().direct_parents().len()).sum::() as f64 / hashes.len() as f64; From 9e720c25549622bb195ed1e06f9feb699ec63e2a Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Tue, 5 Nov 2024 16:25:34 +0200 Subject: [PATCH 20/31] Standartize fork activation logic (#588) * Use ForkActivation for all fork activations * Avoid using negation in some ifs * Add is_within_range_from_activation * Move 'is always' check inside is_within_range_from_activation * lints --- consensus/core/src/config/params.rs | 93 ++++++++++++------- consensus/src/consensus/mod.rs | 4 +- consensus/src/consensus/services.rs | 4 +- .../body_validation_in_isolation.rs | 2 +- .../src/pipeline/body_processor/processor.rs | 8 +- .../pipeline/virtual_processor/processor.rs | 8 +- .../virtual_processor/utxo_validation.rs | 4 +- .../processes/transaction_validator/mod.rs | 10 +- .../transaction_validator_populated.rs | 4 +- consensus/src/processes/window.rs | 18 ++-- simpa/src/main.rs | 8 +- .../src/consensus_integration_tests.rs | 18 ++-- 12 files changed, 106 insertions(+), 75 deletions(-) diff --git a/consensus/core/src/config/params.rs b/consensus/core/src/config/params.rs index f3479b4c2b..9c4a500e52 100644 --- a/consensus/core/src/config/params.rs +++ b/consensus/core/src/config/params.rs @@ -15,6 +15,33 @@ use std::{ time::{SystemTime, UNIX_EPOCH}, }; +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +pub struct ForkActivation(u64); + +impl ForkActivation { + pub const fn new(daa_score: u64) -> Self { + Self(daa_score) + } + + pub const fn never() -> Self { + Self(u64::MAX) + } + + pub const fn always() -> Self { + Self(0) + } + + pub fn is_active(self, current_daa_score: u64) -> bool { + current_daa_score >= self.0 + } + + /// Checks if the fork was "recently" activated, i.e., in the time frame of the provided range. + /// This function returns false for forks that were always active, since they were never activated. + pub fn is_within_range_from_activation(self, current_daa_score: u64, range: u64) -> bool { + self != Self::always() && self.is_active(current_daa_score) && current_daa_score < self.0 + range + } +} + /// Consensus parameters. Contains settings and configurations which are consensus-sensitive. /// Changing one of these on a network node would exclude and prevent it from reaching consensus /// with the other unmodified nodes. @@ -41,7 +68,7 @@ pub struct Params { pub target_time_per_block: u64, /// DAA score from which the window sampling starts for difficulty and past median time calculation - pub sampling_activation_daa_score: u64, + pub sampling_activation: ForkActivation, /// Defines the highest allowed proof of work difficulty value for a block as a [`Uint256`] pub max_difficulty_target: Uint256, @@ -81,7 +108,7 @@ pub struct Params { pub storage_mass_parameter: u64, /// DAA score from which storage mass calculation and transaction mass field are activated as a consensus rule - pub storage_mass_activation_daa_score: u64, + pub storage_mass_activation: ForkActivation, /// DAA score after which the pre-deflationary period switches to the deflationary period pub deflationary_phase_daa_score: u64, @@ -117,10 +144,10 @@ impl Params { #[inline] #[must_use] pub fn past_median_time_window_size(&self, selected_parent_daa_score: u64) -> usize { - if selected_parent_daa_score < self.sampling_activation_daa_score { - self.legacy_past_median_time_window_size() - } else { + if self.sampling_activation.is_active(selected_parent_daa_score) { self.sampled_past_median_time_window_size() + } else { + self.legacy_past_median_time_window_size() } } @@ -129,10 +156,10 @@ impl Params { #[inline] #[must_use] pub fn timestamp_deviation_tolerance(&self, selected_parent_daa_score: u64) -> u64 { - if selected_parent_daa_score < self.sampling_activation_daa_score { - self.legacy_timestamp_deviation_tolerance - } else { + if self.sampling_activation.is_active(selected_parent_daa_score) { self.new_timestamp_deviation_tolerance + } else { + self.legacy_timestamp_deviation_tolerance } } @@ -141,10 +168,10 @@ impl Params { #[inline] #[must_use] pub fn past_median_time_sample_rate(&self, selected_parent_daa_score: u64) -> u64 { - if selected_parent_daa_score < self.sampling_activation_daa_score { - 1 - } else { + if self.sampling_activation.is_active(selected_parent_daa_score) { self.past_median_time_sample_rate + } else { + 1 } } @@ -153,10 +180,10 @@ impl Params { #[inline] #[must_use] pub fn difficulty_window_size(&self, selected_parent_daa_score: u64) -> usize { - if selected_parent_daa_score < self.sampling_activation_daa_score { - self.legacy_difficulty_window_size - } else { + if self.sampling_activation.is_active(selected_parent_daa_score) { self.sampled_difficulty_window_size + } else { + self.legacy_difficulty_window_size } } @@ -165,10 +192,10 @@ impl Params { #[inline] #[must_use] pub fn difficulty_sample_rate(&self, selected_parent_daa_score: u64) -> u64 { - if selected_parent_daa_score < self.sampling_activation_daa_score { - 1 - } else { + if self.sampling_activation.is_active(selected_parent_daa_score) { self.difficulty_sample_rate + } else { + 1 } } @@ -188,18 +215,18 @@ impl Params { } pub fn daa_window_duration_in_blocks(&self, selected_parent_daa_score: u64) -> u64 { - if selected_parent_daa_score < self.sampling_activation_daa_score { - self.legacy_difficulty_window_size as u64 - } else { + if self.sampling_activation.is_active(selected_parent_daa_score) { self.difficulty_sample_rate * self.sampled_difficulty_window_size as u64 + } else { + self.legacy_difficulty_window_size as u64 } } fn expected_daa_window_duration_in_milliseconds(&self, selected_parent_daa_score: u64) -> u64 { - if selected_parent_daa_score < self.sampling_activation_daa_score { - self.target_time_per_block * self.legacy_difficulty_window_size as u64 - } else { + if self.sampling_activation.is_active(selected_parent_daa_score) { self.target_time_per_block * self.difficulty_sample_rate * self.sampled_difficulty_window_size as u64 + } else { + self.target_time_per_block * self.legacy_difficulty_window_size as u64 } } @@ -322,7 +349,7 @@ pub const MAINNET_PARAMS: Params = Params { past_median_time_sample_rate: Bps::<1>::past_median_time_sample_rate(), past_median_time_sampled_window_size: MEDIAN_TIME_SAMPLED_WINDOW_SIZE, target_time_per_block: 1000, - sampling_activation_daa_score: u64::MAX, + sampling_activation: ForkActivation::never(), max_difficulty_target: MAX_DIFFICULTY_TARGET, max_difficulty_target_f64: MAX_DIFFICULTY_TARGET_AS_F64, difficulty_sample_rate: Bps::<1>::difficulty_adjustment_sample_rate(), @@ -352,7 +379,7 @@ pub const MAINNET_PARAMS: Params = Params { max_block_mass: 500_000, storage_mass_parameter: STORAGE_MASS_PARAMETER, - storage_mass_activation_daa_score: u64::MAX, + storage_mass_activation: ForkActivation::never(), // deflationary_phase_daa_score is the DAA score after which the pre-deflationary period // switches to the deflationary period. This number is calculated as follows: @@ -385,7 +412,7 @@ pub const TESTNET_PARAMS: Params = Params { past_median_time_sample_rate: Bps::<1>::past_median_time_sample_rate(), past_median_time_sampled_window_size: MEDIAN_TIME_SAMPLED_WINDOW_SIZE, target_time_per_block: 1000, - sampling_activation_daa_score: u64::MAX, + sampling_activation: ForkActivation::never(), max_difficulty_target: MAX_DIFFICULTY_TARGET, max_difficulty_target_f64: MAX_DIFFICULTY_TARGET_AS_F64, difficulty_sample_rate: Bps::<1>::difficulty_adjustment_sample_rate(), @@ -415,7 +442,7 @@ pub const TESTNET_PARAMS: Params = Params { max_block_mass: 500_000, storage_mass_parameter: STORAGE_MASS_PARAMETER, - storage_mass_activation_daa_score: u64::MAX, + storage_mass_activation: ForkActivation::never(), // deflationary_phase_daa_score is the DAA score after which the pre-deflationary period // switches to the deflationary period. This number is calculated as follows: @@ -447,7 +474,7 @@ pub const TESTNET11_PARAMS: Params = Params { legacy_timestamp_deviation_tolerance: LEGACY_TIMESTAMP_DEVIATION_TOLERANCE, new_timestamp_deviation_tolerance: NEW_TIMESTAMP_DEVIATION_TOLERANCE, past_median_time_sampled_window_size: MEDIAN_TIME_SAMPLED_WINDOW_SIZE, - sampling_activation_daa_score: 0, // Sampling is activated from network inception + sampling_activation: ForkActivation::always(), // Sampling is activated from network inception max_difficulty_target: MAX_DIFFICULTY_TARGET, max_difficulty_target_f64: MAX_DIFFICULTY_TARGET_AS_F64, sampled_difficulty_window_size: DIFFICULTY_SAMPLED_WINDOW_SIZE as usize, @@ -485,7 +512,7 @@ pub const TESTNET11_PARAMS: Params = Params { max_block_mass: 500_000, storage_mass_parameter: STORAGE_MASS_PARAMETER, - storage_mass_activation_daa_score: 0, + storage_mass_activation: ForkActivation::always(), skip_proof_of_work: false, max_block_level: 250, @@ -498,7 +525,7 @@ pub const SIMNET_PARAMS: Params = Params { legacy_timestamp_deviation_tolerance: LEGACY_TIMESTAMP_DEVIATION_TOLERANCE, new_timestamp_deviation_tolerance: NEW_TIMESTAMP_DEVIATION_TOLERANCE, past_median_time_sampled_window_size: MEDIAN_TIME_SAMPLED_WINDOW_SIZE, - sampling_activation_daa_score: 0, // Sampling is activated from network inception + sampling_activation: ForkActivation::always(), // Sampling is activated from network inception max_difficulty_target: MAX_DIFFICULTY_TARGET, max_difficulty_target_f64: MAX_DIFFICULTY_TARGET_AS_F64, sampled_difficulty_window_size: DIFFICULTY_SAMPLED_WINDOW_SIZE as usize, @@ -538,7 +565,7 @@ pub const SIMNET_PARAMS: Params = Params { max_block_mass: 500_000, storage_mass_parameter: STORAGE_MASS_PARAMETER, - storage_mass_activation_daa_score: 0, + storage_mass_activation: ForkActivation::always(), skip_proof_of_work: true, // For simnet only, PoW can be simulated by default max_block_level: 250, @@ -554,7 +581,7 @@ pub const DEVNET_PARAMS: Params = Params { past_median_time_sample_rate: Bps::<1>::past_median_time_sample_rate(), past_median_time_sampled_window_size: MEDIAN_TIME_SAMPLED_WINDOW_SIZE, target_time_per_block: 1000, - sampling_activation_daa_score: u64::MAX, + sampling_activation: ForkActivation::never(), max_difficulty_target: MAX_DIFFICULTY_TARGET, max_difficulty_target_f64: MAX_DIFFICULTY_TARGET_AS_F64, difficulty_sample_rate: Bps::<1>::difficulty_adjustment_sample_rate(), @@ -584,7 +611,7 @@ pub const DEVNET_PARAMS: Params = Params { max_block_mass: 500_000, storage_mass_parameter: STORAGE_MASS_PARAMETER, - storage_mass_activation_daa_score: u64::MAX, + storage_mass_activation: ForkActivation::never(), // deflationary_phase_daa_score is the DAA score after which the pre-deflationary period // switches to the deflationary period. This number is calculated as follows: diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index 6909562aa1..a47b4218fc 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -257,7 +257,7 @@ impl Consensus { pruning_lock.clone(), notification_root.clone(), counters.clone(), - params.storage_mass_activation_daa_score, + params.storage_mass_activation, )); let virtual_processor = Arc::new(VirtualStateProcessor::new( @@ -753,7 +753,7 @@ impl ConsensusApi for Consensus { } fn calc_transaction_hash_merkle_root(&self, txs: &[Transaction], pov_daa_score: u64) -> Hash { - let storage_mass_activated = pov_daa_score > self.config.storage_mass_activation_daa_score; + let storage_mass_activated = self.config.storage_mass_activation.is_active(pov_daa_score); calc_hash_merkle_root(txs.iter(), storage_mass_activated) } diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index 97c6d0b769..608ae61b49 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -94,7 +94,7 @@ impl ConsensusServices { storage.block_window_cache_for_past_median_time.clone(), params.max_difficulty_target, params.target_time_per_block, - params.sampling_activation_daa_score, + params.sampling_activation, params.legacy_difficulty_window_size, params.sampled_difficulty_window_size, params.min_difficulty_window_len, @@ -146,7 +146,7 @@ impl ConsensusServices { params.coinbase_maturity, tx_script_cache_counters, mass_calculator.clone(), - params.storage_mass_activation_daa_score, + params.storage_mass_activation, ); let pruning_point_manager = PruningPointManager::new( diff --git a/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs b/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs index c413552b99..4c6139846b 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs @@ -6,7 +6,7 @@ use kaspa_consensus_core::{block::Block, merkle::calc_hash_merkle_root, tx::Tran impl BlockBodyProcessor { pub fn validate_body_in_isolation(self: &Arc, block: &Block) -> BlockProcessResult { - let storage_mass_activated = block.header.daa_score > self.storage_mass_activation_daa_score; + let storage_mass_activated = self.storage_mass_activation.is_active(block.header.daa_score); Self::check_has_transactions(block)?; Self::check_hash_merkle_root(block, storage_mass_activated)?; diff --git a/consensus/src/pipeline/body_processor/processor.rs b/consensus/src/pipeline/body_processor/processor.rs index 4191a01cec..6885c78b5e 100644 --- a/consensus/src/pipeline/body_processor/processor.rs +++ b/consensus/src/pipeline/body_processor/processor.rs @@ -23,7 +23,7 @@ use crossbeam_channel::{Receiver, Sender}; use kaspa_consensus_core::{ block::Block, blockstatus::BlockStatus::{self, StatusHeaderOnly, StatusInvalid}, - config::genesis::GenesisBlock, + config::{genesis::GenesisBlock, params::ForkActivation}, mass::MassCalculator, tx::Transaction, }; @@ -81,7 +81,7 @@ pub struct BlockBodyProcessor { counters: Arc, /// Storage mass hardfork DAA score - pub(crate) storage_mass_activation_daa_score: u64, + pub(crate) storage_mass_activation: ForkActivation, } impl BlockBodyProcessor { @@ -108,7 +108,7 @@ impl BlockBodyProcessor { pruning_lock: SessionLock, notification_root: Arc, counters: Arc, - storage_mass_activation_daa_score: u64, + storage_mass_activation: ForkActivation, ) -> Self { Self { receiver, @@ -131,7 +131,7 @@ impl BlockBodyProcessor { task_manager: BlockTaskDependencyManager::new(), notification_root, counters, - storage_mass_activation_daa_score, + storage_mass_activation, } } diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index 4b571dddc7..c654fef430 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -52,7 +52,7 @@ use kaspa_consensus_core::{ block::{BlockTemplate, MutableBlock, TemplateBuildMode, TemplateTransactionSelector}, blockstatus::BlockStatus::{StatusDisqualifiedFromChain, StatusUTXOValid}, coinbase::MinerData, - config::genesis::GenesisBlock, + config::{genesis::GenesisBlock, params::ForkActivation}, header::Header, merkle::calc_hash_merkle_root, pruning::PruningPointsList, @@ -159,7 +159,7 @@ pub struct VirtualStateProcessor { counters: Arc, // Storage mass hardfork DAA score - pub(crate) storage_mass_activation_daa_score: u64, + pub(crate) storage_mass_activation: ForkActivation, } impl VirtualStateProcessor { @@ -220,7 +220,7 @@ impl VirtualStateProcessor { pruning_lock, notification_root, counters, - storage_mass_activation_daa_score: params.storage_mass_activation_daa_score, + storage_mass_activation: params.storage_mass_activation, } } @@ -983,7 +983,7 @@ impl VirtualStateProcessor { let parents_by_level = self.parents_manager.calc_block_parents(pruning_info.pruning_point, &virtual_state.parents); // Hash according to hardfork activation - let storage_mass_activated = virtual_state.daa_score > self.storage_mass_activation_daa_score; + let storage_mass_activated = self.storage_mass_activation.is_active(virtual_state.daa_score); let hash_merkle_root = calc_hash_merkle_root(txs.iter(), storage_mass_activated); let accepted_id_merkle_root = kaspa_merkle::calc_merkle_root(virtual_state.accepted_tx_ids.iter().copied()); diff --git a/consensus/src/pipeline/virtual_processor/utxo_validation.rs b/consensus/src/pipeline/virtual_processor/utxo_validation.rs index 6daa3deb85..4a62a4ae8e 100644 --- a/consensus/src/pipeline/virtual_processor/utxo_validation.rs +++ b/consensus/src/pipeline/virtual_processor/utxo_validation.rs @@ -14,6 +14,7 @@ use kaspa_consensus_core::{ acceptance_data::{AcceptedTxEntry, MergesetBlockAcceptanceData}, api::args::TransactionValidationArgs, coinbase::*, + config::params::ForkActivation, hashing, header::Header, mass::Kip9Version, @@ -328,7 +329,8 @@ impl VirtualStateProcessor { // For non-activated nets (mainnet, TN10) we can update mempool rules to KIP9 beta asap. For // TN11 we need to hard-fork consensus first (since the new beta rules are more permissive) - let kip9_version = if self.storage_mass_activation_daa_score == u64::MAX { Kip9Version::Beta } else { Kip9Version::Alpha }; + let kip9_version = + if self.storage_mass_activation == ForkActivation::never() { Kip9Version::Beta } else { Kip9Version::Alpha }; // Calc the full contextual mass including storage mass let contextual_mass = self diff --git a/consensus/src/processes/transaction_validator/mod.rs b/consensus/src/processes/transaction_validator/mod.rs index 008b0c4dd1..f9d9f79c89 100644 --- a/consensus/src/processes/transaction_validator/mod.rs +++ b/consensus/src/processes/transaction_validator/mod.rs @@ -11,7 +11,7 @@ use kaspa_txscript::{ SigCacheKey, }; -use kaspa_consensus_core::mass::MassCalculator; +use kaspa_consensus_core::{config::params::ForkActivation, mass::MassCalculator}; #[derive(Clone)] pub struct TransactionValidator { @@ -27,7 +27,7 @@ pub struct TransactionValidator { pub(crate) mass_calculator: MassCalculator, /// Storage mass hardfork DAA score - storage_mass_activation_daa_score: u64, + storage_mass_activation: ForkActivation, } impl TransactionValidator { @@ -41,7 +41,7 @@ impl TransactionValidator { coinbase_maturity: u64, counters: Arc, mass_calculator: MassCalculator, - storage_mass_activation_daa_score: u64, + storage_mass_activation: ForkActivation, ) -> Self { Self { max_tx_inputs, @@ -53,7 +53,7 @@ impl TransactionValidator { coinbase_maturity, sig_cache: Cache::with_counters(10_000, counters), mass_calculator, - storage_mass_activation_daa_score, + storage_mass_activation, } } @@ -77,7 +77,7 @@ impl TransactionValidator { coinbase_maturity, sig_cache: Cache::with_counters(10_000, counters), mass_calculator: MassCalculator::new(0, 0, 0, 0), - storage_mass_activation_daa_score: u64::MAX, + storage_mass_activation: ForkActivation::never(), } } } diff --git a/consensus/src/processes/transaction_validator/transaction_validator_populated.rs b/consensus/src/processes/transaction_validator/transaction_validator_populated.rs index dbf1aa37ea..bbb74f0ae1 100644 --- a/consensus/src/processes/transaction_validator/transaction_validator_populated.rs +++ b/consensus/src/processes/transaction_validator/transaction_validator_populated.rs @@ -44,11 +44,11 @@ impl TransactionValidator { let total_in = self.check_transaction_input_amounts(tx)?; let total_out = Self::check_transaction_output_values(tx, total_in)?; let fee = total_in - total_out; - if flags != TxValidationFlags::SkipMassCheck && pov_daa_score > self.storage_mass_activation_daa_score { + if flags != TxValidationFlags::SkipMassCheck && self.storage_mass_activation.is_active(pov_daa_score) { // Storage mass hardfork was activated self.check_mass_commitment(tx)?; - if pov_daa_score < self.storage_mass_activation_daa_score + 10 && self.storage_mass_activation_daa_score > 0 { + if self.storage_mass_activation.is_within_range_from_activation(pov_daa_score, 10) { warn!("--------- Storage mass hardfork was activated successfully!!! --------- (DAA score: {})", pov_daa_score); } } diff --git a/consensus/src/processes/window.rs b/consensus/src/processes/window.rs index 9c582af280..ca0f71cf20 100644 --- a/consensus/src/processes/window.rs +++ b/consensus/src/processes/window.rs @@ -9,7 +9,7 @@ use crate::{ }; use kaspa_consensus_core::{ blockhash::BlockHashExtensions, - config::genesis::GenesisBlock, + config::{genesis::GenesisBlock, params::ForkActivation}, errors::{block::RuleError, difficulty::DifficultyResult}, BlockHashSet, BlueWorkType, }; @@ -249,7 +249,7 @@ pub struct SampledWindowManager, block_window_cache_for_past_median_time: Arc, target_time_per_block: u64, - sampling_activation_daa_score: u64, + sampling_activation: ForkActivation, difficulty_window_size: usize, difficulty_sample_rate: u64, past_median_time_window_size: usize, @@ -269,7 +269,7 @@ impl, max_difficulty_target: Uint256, target_time_per_block: u64, - sampling_activation_daa_score: u64, + sampling_activation: ForkActivation, difficulty_window_size: usize, min_difficulty_window_len: usize, difficulty_sample_rate: u64, @@ -294,7 +294,7 @@ impl { ghostdag_store: Arc, headers_store: Arc, - sampling_activation_daa_score: u64, + sampling_activation: ForkActivation, full_window_manager: FullWindowManager, sampled_window_manager: SampledWindowManager, } @@ -541,7 +541,7 @@ impl, max_difficulty_target: Uint256, target_time_per_block: u64, - sampling_activation_daa_score: u64, + sampling_activation: ForkActivation, full_difficulty_window_size: usize, sampled_difficulty_window_size: usize, min_difficulty_window_len: usize, @@ -571,19 +571,19 @@ impl bool { let sp_daa_score = self.headers_store.get_daa_score(ghostdag_data.selected_parent).unwrap(); - sp_daa_score >= self.sampling_activation_daa_score + self.sampling_activation.is_active(sp_daa_score) } } diff --git a/simpa/src/main.rs b/simpa/src/main.rs index 2994b0a090..a2365e1c9f 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -13,7 +13,7 @@ use kaspa_consensus::{ headers::HeaderStoreReader, relations::RelationsStoreReader, }, - params::{Params, Testnet11Bps, DEVNET_PARAMS, NETWORK_DELAY_BOUND, TESTNET11_PARAMS}, + params::{ForkActivation, Params, Testnet11Bps, DEVNET_PARAMS, NETWORK_DELAY_BOUND, TESTNET11_PARAMS}, }; use kaspa_consensus_core::{ api::ConsensusApi, block::Block, blockstatus::BlockStatus, config::bps::calculate_ghostdag_k, errors::block::BlockProcessResult, @@ -189,7 +189,7 @@ fn main_impl(mut args: Args) { } args.bps = if args.testnet11 { Testnet11Bps::bps() as f64 } else { args.bps }; let mut params = if args.testnet11 { TESTNET11_PARAMS } else { DEVNET_PARAMS }; - params.storage_mass_activation_daa_score = 400; + params.storage_mass_activation = ForkActivation::new(400); params.storage_mass_parameter = 10_000; let mut builder = ConfigBuilder::new(params) .apply_args(|config| apply_args_to_consensus_params(&args, &mut config.params)) @@ -306,12 +306,12 @@ fn apply_args_to_consensus_params(args: &Args, params: &mut Params) { if args.daa_legacy { // Scale DAA and median-time windows linearly with BPS - params.sampling_activation_daa_score = u64::MAX; + params.sampling_activation = ForkActivation::never(); params.legacy_timestamp_deviation_tolerance = (params.legacy_timestamp_deviation_tolerance as f64 * args.bps) as u64; params.legacy_difficulty_window_size = (params.legacy_difficulty_window_size as f64 * args.bps) as usize; } else { // Use the new sampling algorithms - params.sampling_activation_daa_score = 0; + params.sampling_activation = ForkActivation::always(); params.past_median_time_sample_rate = (10.0 * args.bps) as u64; params.new_timestamp_deviation_tolerance = (600.0 * args.bps) as u64; params.difficulty_sample_rate = (2.0 * args.bps) as u64; diff --git a/testing/integration/src/consensus_integration_tests.rs b/testing/integration/src/consensus_integration_tests.rs index e66baaf691..2256cd3b34 100644 --- a/testing/integration/src/consensus_integration_tests.rs +++ b/testing/integration/src/consensus_integration_tests.rs @@ -16,7 +16,9 @@ use kaspa_consensus::model::stores::headers::HeaderStoreReader; use kaspa_consensus::model::stores::reachability::DbReachabilityStore; use kaspa_consensus::model::stores::relations::DbRelationsStore; use kaspa_consensus::model::stores::selected_chain::SelectedChainStoreReader; -use kaspa_consensus::params::{Params, DEVNET_PARAMS, MAINNET_PARAMS, MAX_DIFFICULTY_TARGET, MAX_DIFFICULTY_TARGET_AS_F64}; +use kaspa_consensus::params::{ + ForkActivation, Params, DEVNET_PARAMS, MAINNET_PARAMS, MAX_DIFFICULTY_TARGET, MAX_DIFFICULTY_TARGET_AS_F64, +}; use kaspa_consensus::pipeline::monitor::ConsensusMonitor; use kaspa_consensus::pipeline::ProcessingCounters; use kaspa_consensus::processes::reachability::tests::{DagBlock, DagBuilder, StoreValidationExtensions}; @@ -553,7 +555,7 @@ async fn median_time_test() { config: ConfigBuilder::new(MAINNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.sampling_activation_daa_score = u64::MAX; + p.sampling_activation = ForkActivation::never(); }) .build(), }, @@ -562,7 +564,7 @@ async fn median_time_test() { config: ConfigBuilder::new(MAINNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.sampling_activation_daa_score = 0; + p.sampling_activation = ForkActivation::always(); p.new_timestamp_deviation_tolerance = 120; p.past_median_time_sample_rate = 3; p.past_median_time_sampled_window_size = (2 * 120 - 1) / 3; @@ -807,7 +809,7 @@ impl KaspadGoParams { past_median_time_sample_rate: 1, past_median_time_sampled_window_size: 2 * self.TimestampDeviationTolerance - 1, target_time_per_block: self.TargetTimePerBlock / 1_000_000, - sampling_activation_daa_score: u64::MAX, + sampling_activation: ForkActivation::never(), max_block_parents: self.MaxBlockParents, max_difficulty_target: MAX_DIFFICULTY_TARGET, max_difficulty_target_f64: MAX_DIFFICULTY_TARGET_AS_F64, @@ -830,7 +832,7 @@ impl KaspadGoParams { mass_per_sig_op: self.MassPerSigOp, max_block_mass: self.MaxBlockMass, storage_mass_parameter: STORAGE_MASS_PARAMETER, - storage_mass_activation_daa_score: u64::MAX, + storage_mass_activation: ForkActivation::never(), deflationary_phase_daa_score: self.DeflationaryPhaseDaaScore, pre_deflationary_phase_base_subsidy: self.PreDeflationaryPhaseBaseSubsidy, coinbase_maturity: MAINNET_PARAMS.coinbase_maturity, @@ -1388,7 +1390,7 @@ async fn difficulty_test() { .edit_consensus_params(|p| { p.ghostdag_k = 1; p.legacy_difficulty_window_size = FULL_WINDOW_SIZE; - p.sampling_activation_daa_score = u64::MAX; + p.sampling_activation = ForkActivation::never(); // Define past median time so that calls to add_block_with_min_time create blocks // which timestamps fit within the min-max timestamps found in the difficulty window p.legacy_timestamp_deviation_tolerance = 60; @@ -1404,7 +1406,7 @@ async fn difficulty_test() { p.ghostdag_k = 1; p.sampled_difficulty_window_size = SAMPLED_WINDOW_SIZE; p.difficulty_sample_rate = SAMPLE_RATE; - p.sampling_activation_daa_score = 0; + p.sampling_activation = ForkActivation::always(); // Define past median time so that calls to add_block_with_min_time create blocks // which timestamps fit within the min-max timestamps found in the difficulty window p.past_median_time_sample_rate = PMT_SAMPLE_RATE; @@ -1423,7 +1425,7 @@ async fn difficulty_test() { p.target_time_per_block /= HIGH_BPS; p.sampled_difficulty_window_size = HIGH_BPS_SAMPLED_WINDOW_SIZE; p.difficulty_sample_rate = SAMPLE_RATE * HIGH_BPS; - p.sampling_activation_daa_score = 0; + p.sampling_activation = ForkActivation::always(); // Define past median time so that calls to add_block_with_min_time create blocks // which timestamps fit within the min-max timestamps found in the difficulty window p.past_median_time_sample_rate = PMT_SAMPLE_RATE * HIGH_BPS; From e925f12b80ba26bd1e40069817335b18d34115c9 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Wed, 6 Nov 2024 01:36:34 -0700 Subject: [PATCH 21/31] Refactoring for cleaner pruning proof module (#589) * Cleanup manual block level calc There were two areas in pruning proof mod that manually calculated block level. This replaces those with a call to calc_block_level * Refactor pruning proof build functions * Refactor apply pruning proof functions * Refactor validate pruning functions * Add comments for clarity --- .../src/processes/pruning_proof/apply.rs | 236 ++++ .../src/processes/pruning_proof/build.rs | 532 ++++++++ consensus/src/processes/pruning_proof/mod.rs | 1101 +---------------- .../src/processes/pruning_proof/validate.rs | 376 ++++++ 4 files changed, 1169 insertions(+), 1076 deletions(-) create mode 100644 consensus/src/processes/pruning_proof/apply.rs create mode 100644 consensus/src/processes/pruning_proof/build.rs create mode 100644 consensus/src/processes/pruning_proof/validate.rs diff --git a/consensus/src/processes/pruning_proof/apply.rs b/consensus/src/processes/pruning_proof/apply.rs new file mode 100644 index 0000000000..8463d64647 --- /dev/null +++ b/consensus/src/processes/pruning_proof/apply.rs @@ -0,0 +1,236 @@ +use std::{ + cmp::Reverse, + collections::{hash_map::Entry::Vacant, BinaryHeap, HashSet}, + sync::Arc, +}; + +use itertools::Itertools; +use kaspa_consensus_core::{ + blockhash::{BlockHashes, ORIGIN}, + errors::pruning::{PruningImportError, PruningImportResult}, + header::Header, + pruning::PruningPointProof, + trusted::TrustedBlock, + BlockHashMap, BlockHashSet, BlockLevel, HashMapCustomHasher, +}; +use kaspa_core::{debug, trace}; +use kaspa_hashes::Hash; +use kaspa_pow::calc_block_level; +use kaspa_utils::{binary_heap::BinaryHeapExtensions, vec::VecExtensions}; +use rocksdb::WriteBatch; + +use crate::{ + model::{ + services::reachability::ReachabilityService, + stores::{ + ghostdag::{GhostdagData, GhostdagStore}, + headers::HeaderStore, + reachability::StagingReachabilityStore, + relations::StagingRelationsStore, + selected_chain::SelectedChainStore, + virtual_state::{VirtualState, VirtualStateStore}, + }, + }, + processes::{ + ghostdag::{mergeset::unordered_mergeset_without_selected_parent, ordering::SortableBlock}, + reachability::inquirer as reachability, + relations::RelationsStoreExtensions, + }, +}; + +use super::PruningProofManager; + +impl PruningProofManager { + pub fn apply_proof(&self, mut proof: PruningPointProof, trusted_set: &[TrustedBlock]) -> PruningImportResult<()> { + let pruning_point_header = proof[0].last().unwrap().clone(); + let pruning_point = pruning_point_header.hash; + + // Create a copy of the proof, since we're going to be mutating the proof passed to us + let proof_sets = (0..=self.max_block_level) + .map(|level| BlockHashSet::from_iter(proof[level as usize].iter().map(|header| header.hash))) + .collect_vec(); + + let mut trusted_gd_map: BlockHashMap = BlockHashMap::new(); + for tb in trusted_set.iter() { + trusted_gd_map.insert(tb.block.hash(), tb.ghostdag.clone().into()); + let tb_block_level = calc_block_level(&tb.block.header, self.max_block_level); + + (0..=tb_block_level).for_each(|current_proof_level| { + // If this block was in the original proof, ignore it + if proof_sets[current_proof_level as usize].contains(&tb.block.hash()) { + return; + } + + proof[current_proof_level as usize].push(tb.block.header.clone()); + }); + } + + proof.iter_mut().for_each(|level_proof| { + level_proof.sort_by(|a, b| a.blue_work.cmp(&b.blue_work)); + }); + + self.populate_reachability_and_headers(&proof); + + { + let reachability_read = self.reachability_store.read(); + for tb in trusted_set.iter() { + // Header-only trusted blocks are expected to be in pruning point past + if tb.block.is_header_only() && !reachability_read.is_dag_ancestor_of(tb.block.hash(), pruning_point) { + return Err(PruningImportError::PruningPointPastMissingReachability(tb.block.hash())); + } + } + } + + for (level, headers) in proof.iter().enumerate() { + trace!("Applying level {} from the pruning point proof", level); + let mut level_ancestors: HashSet = HashSet::new(); + level_ancestors.insert(ORIGIN); + + for header in headers.iter() { + let parents = Arc::new( + self.parents_manager + .parents_at_level(header, level as BlockLevel) + .iter() + .copied() + .filter(|parent| level_ancestors.contains(parent)) + .collect_vec() + .push_if_empty(ORIGIN), + ); + + self.relations_stores.write()[level].insert(header.hash, parents.clone()).unwrap(); + + if level == 0 { + let gd = if let Some(gd) = trusted_gd_map.get(&header.hash) { + gd.clone() + } else { + let calculated_gd = self.ghostdag_manager.ghostdag(&parents); + // Override the ghostdag data with the real blue score and blue work + GhostdagData { + blue_score: header.blue_score, + blue_work: header.blue_work, + selected_parent: calculated_gd.selected_parent, + mergeset_blues: calculated_gd.mergeset_blues, + mergeset_reds: calculated_gd.mergeset_reds, + blues_anticone_sizes: calculated_gd.blues_anticone_sizes, + } + }; + self.ghostdag_store.insert(header.hash, Arc::new(gd)).unwrap(); + } + + level_ancestors.insert(header.hash); + } + } + + let virtual_parents = vec![pruning_point]; + let virtual_state = Arc::new(VirtualState { + parents: virtual_parents.clone(), + ghostdag_data: self.ghostdag_manager.ghostdag(&virtual_parents), + ..VirtualState::default() + }); + self.virtual_stores.write().state.set(virtual_state).unwrap(); + + let mut batch = WriteBatch::default(); + self.body_tips_store.write().init_batch(&mut batch, &virtual_parents).unwrap(); + self.headers_selected_tip_store + .write() + .set_batch(&mut batch, SortableBlock { hash: pruning_point, blue_work: pruning_point_header.blue_work }) + .unwrap(); + self.selected_chain_store.write().init_with_pruning_point(&mut batch, pruning_point).unwrap(); + self.depth_store.insert_batch(&mut batch, pruning_point, ORIGIN, ORIGIN).unwrap(); + self.db.write(batch).unwrap(); + + Ok(()) + } + + pub fn populate_reachability_and_headers(&self, proof: &PruningPointProof) { + let capacity_estimate = self.estimate_proof_unique_size(proof); + let mut dag = BlockHashMap::with_capacity(capacity_estimate); + let mut up_heap = BinaryHeap::with_capacity(capacity_estimate); + for header in proof.iter().flatten().cloned() { + if let Vacant(e) = dag.entry(header.hash) { + // TODO: Check if pow passes + let block_level = calc_block_level(&header, self.max_block_level); + self.headers_store.insert(header.hash, header.clone(), block_level).unwrap(); + + let mut parents = BlockHashSet::with_capacity(header.direct_parents().len() * 2); + // We collect all available parent relations in order to maximize reachability information. + // By taking into account parents from all levels we ensure that the induced DAG has valid + // reachability information for each level-specific sub-DAG -- hence a single reachability + // oracle can serve them all + for level in 0..=self.max_block_level { + for parent in self.parents_manager.parents_at_level(&header, level) { + parents.insert(*parent); + } + } + + struct DagEntry { + header: Arc
, + parents: Arc, + } + + up_heap.push(Reverse(SortableBlock { hash: header.hash, blue_work: header.blue_work })); + e.insert(DagEntry { header, parents: Arc::new(parents) }); + } + } + + debug!("Estimated proof size: {}, actual size: {}", capacity_estimate, dag.len()); + + for reverse_sortable_block in up_heap.into_sorted_iter() { + // TODO: Convert to into_iter_sorted once it gets stable + let hash = reverse_sortable_block.0.hash; + let dag_entry = dag.get(&hash).unwrap(); + + // Filter only existing parents + let parents_in_dag = BinaryHeap::from_iter( + dag_entry + .parents + .iter() + .cloned() + .filter(|parent| dag.contains_key(parent)) + .map(|parent| SortableBlock { hash: parent, blue_work: dag.get(&parent).unwrap().header.blue_work }), + ); + + let reachability_read = self.reachability_store.upgradable_read(); + + // Find the maximal parent antichain from the possibly redundant set of existing parents + let mut reachability_parents: Vec = Vec::new(); + for parent in parents_in_dag.into_sorted_iter() { + if reachability_read.is_dag_ancestor_of_any(parent.hash, &mut reachability_parents.iter().map(|parent| parent.hash)) { + continue; + } + + reachability_parents.push(parent); + } + let reachability_parents_hashes = + BlockHashes::new(reachability_parents.iter().map(|parent| parent.hash).collect_vec().push_if_empty(ORIGIN)); + let selected_parent = reachability_parents.iter().max().map(|parent| parent.hash).unwrap_or(ORIGIN); + + // Prepare batch + let mut batch = WriteBatch::default(); + let mut reachability_relations_write = self.reachability_relations_store.write(); + let mut staging_reachability = StagingReachabilityStore::new(reachability_read); + let mut staging_reachability_relations = StagingRelationsStore::new(&mut reachability_relations_write); + + // Stage + staging_reachability_relations.insert(hash, reachability_parents_hashes.clone()).unwrap(); + let mergeset = unordered_mergeset_without_selected_parent( + &staging_reachability_relations, + &staging_reachability, + selected_parent, + &reachability_parents_hashes, + ); + reachability::add_block(&mut staging_reachability, hash, selected_parent, &mut mergeset.iter().copied()).unwrap(); + + // Commit + let reachability_write = staging_reachability.commit(&mut batch).unwrap(); + staging_reachability_relations.commit(&mut batch).unwrap(); + + // Write + self.db.write(batch).unwrap(); + + // Drop + drop(reachability_write); + drop(reachability_relations_write); + } + } +} diff --git a/consensus/src/processes/pruning_proof/build.rs b/consensus/src/processes/pruning_proof/build.rs new file mode 100644 index 0000000000..8ae6fb34ca --- /dev/null +++ b/consensus/src/processes/pruning_proof/build.rs @@ -0,0 +1,532 @@ +use std::{cmp::Reverse, collections::BinaryHeap, sync::Arc}; + +use itertools::Itertools; +use kaspa_consensus_core::{ + blockhash::{BlockHashExtensions, BlockHashes, ORIGIN}, + header::Header, + pruning::PruningPointProof, + BlockHashSet, BlockLevel, HashMapCustomHasher, +}; +use kaspa_core::debug; +use kaspa_database::prelude::{CachePolicy, ConnBuilder, StoreError, StoreResult, StoreResultEmptyTuple, StoreResultExtensions, DB}; +use kaspa_hashes::Hash; + +use crate::{ + model::{ + services::reachability::ReachabilityService, + stores::{ + ghostdag::{DbGhostdagStore, GhostdagStore, GhostdagStoreReader}, + headers::{HeaderStoreReader, HeaderWithBlockLevel}, + relations::RelationsStoreReader, + }, + }, + processes::{ + ghostdag::{ordering::SortableBlock, protocol::GhostdagManager}, + pruning_proof::PruningProofManagerInternalError, + }, +}; + +use super::{PruningProofManager, PruningProofManagerInternalResult}; + +#[derive(Clone)] +struct RelationsStoreInFutureOfRoot { + relations_store: T, + reachability_service: U, + root: Hash, +} + +impl RelationsStoreReader for RelationsStoreInFutureOfRoot { + fn get_parents(&self, hash: Hash) -> Result { + self.relations_store.get_parents(hash).map(|hashes| { + Arc::new(hashes.iter().copied().filter(|h| self.reachability_service.is_dag_ancestor_of(self.root, *h)).collect_vec()) + }) + } + + fn get_children(&self, hash: Hash) -> StoreResult> { + // We assume hash is in future of root + assert!(self.reachability_service.is_dag_ancestor_of(self.root, hash)); + self.relations_store.get_children(hash) + } + + fn has(&self, hash: Hash) -> Result { + if self.reachability_service.is_dag_ancestor_of(self.root, hash) { + Ok(false) + } else { + self.relations_store.has(hash) + } + } + + fn counts(&self) -> Result<(usize, usize), kaspa_database::prelude::StoreError> { + unimplemented!() + } +} + +impl PruningProofManager { + pub(crate) fn build_pruning_point_proof(&self, pp: Hash) -> PruningPointProof { + if pp == self.genesis_hash { + return vec![]; + } + + let (_db_lifetime, temp_db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)); + let pp_header = self.headers_store.get_header_with_block_level(pp).unwrap(); + let (ghostdag_stores, selected_tip_by_level, roots_by_level) = self.calc_gd_for_all_levels(&pp_header, temp_db); + + (0..=self.max_block_level) + .map(|level| { + let level = level as usize; + let selected_tip = selected_tip_by_level[level]; + let block_at_depth_2m = self + .block_at_depth(&*ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) + .map_err(|err| format!("level: {}, err: {}", level, err)) + .unwrap(); + + // TODO (relaxed): remove the assertion below + // (New Logic) This is the root we calculated by going through block relations + let root = roots_by_level[level]; + // (Old Logic) This is the root we can calculate given that the GD records are already filled + // The root calc logic below is the original logic before the on-demand higher level GD calculation + // We only need old_root to sanity check the new logic + let old_root = if level != self.max_block_level as usize { + let block_at_depth_m_at_next_level = self + .block_at_depth(&*ghostdag_stores[level + 1], selected_tip_by_level[level + 1], self.pruning_proof_m) + .map_err(|err| format!("level + 1: {}, err: {}", level + 1, err)) + .unwrap(); + if self.reachability_service.is_dag_ancestor_of(block_at_depth_m_at_next_level, block_at_depth_2m) { + block_at_depth_m_at_next_level + } else if self.reachability_service.is_dag_ancestor_of(block_at_depth_2m, block_at_depth_m_at_next_level) { + block_at_depth_2m + } else { + self.find_common_ancestor_in_chain_of_a( + &*ghostdag_stores[level], + block_at_depth_m_at_next_level, + block_at_depth_2m, + ) + .map_err(|err| format!("level: {}, err: {}", level, err)) + .unwrap() + } + } else { + block_at_depth_2m + }; + + // new root is expected to be always an ancestor of old_root because new root takes a safety margin + assert!(self.reachability_service.is_dag_ancestor_of(root, old_root)); + + let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); + let mut queue = BinaryHeap::>::new(); + let mut visited = BlockHashSet::new(); + queue.push(Reverse(SortableBlock::new(root, self.headers_store.get_header(root).unwrap().blue_work))); + while let Some(current) = queue.pop() { + let current = current.0.hash; + if !visited.insert(current) { + continue; + } + + // The second condition is always expected to be true (ghostdag store will have the entry) + // because we are traversing the exact diamond (future(root) ⋂ past(tip)) for which we calculated + // GD for (see fill_level_proof_ghostdag_data). TODO (relaxed): remove the condition or turn into assertion + if !self.reachability_service.is_dag_ancestor_of(current, selected_tip) + || !ghostdag_stores[level].has(current).is_ok_and(|found| found) + { + continue; + } + + headers.push(self.headers_store.get_header(current).unwrap()); + for child in self.relations_stores.read()[level].get_children(current).unwrap().read().iter().copied() { + queue.push(Reverse(SortableBlock::new(child, self.headers_store.get_header(child).unwrap().blue_work))); + } + } + + // TODO (relaxed): remove the assertion below + // Temp assertion for verifying a bug fix: assert that the full 2M chain is actually contained in the composed level proof + let set = BlockHashSet::from_iter(headers.iter().map(|h| h.hash)); + let chain_2m = self + .chain_up_to_depth(&*ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) + .map_err(|err| { + dbg!(level, selected_tip, block_at_depth_2m, root); + format!("Assert 2M chain -- level: {}, err: {}", level, err) + }) + .unwrap(); + let chain_2m_len = chain_2m.len(); + for (i, chain_hash) in chain_2m.into_iter().enumerate() { + if !set.contains(&chain_hash) { + let next_level_tip = selected_tip_by_level[level + 1]; + let next_level_chain_m = + self.chain_up_to_depth(&*ghostdag_stores[level + 1], next_level_tip, self.pruning_proof_m).unwrap(); + let next_level_block_m = next_level_chain_m.last().copied().unwrap(); + dbg!(next_level_chain_m.len()); + dbg!(ghostdag_stores[level + 1].get_compact_data(next_level_tip).unwrap().blue_score); + dbg!(ghostdag_stores[level + 1].get_compact_data(next_level_block_m).unwrap().blue_score); + dbg!(ghostdag_stores[level].get_compact_data(selected_tip).unwrap().blue_score); + dbg!(ghostdag_stores[level].get_compact_data(block_at_depth_2m).unwrap().blue_score); + dbg!(level, selected_tip, block_at_depth_2m, root); + panic!("Assert 2M chain -- missing block {} at index {} out of {} chain blocks", chain_hash, i, chain_2m_len); + } + } + + headers + }) + .collect_vec() + } + + fn calc_gd_for_all_levels( + &self, + pp_header: &HeaderWithBlockLevel, + temp_db: Arc, + ) -> (Vec>, Vec, Vec) { + let current_dag_level = self.find_current_dag_level(&pp_header.header); + let mut ghostdag_stores: Vec>> = vec![None; self.max_block_level as usize + 1]; + let mut selected_tip_by_level = vec![None; self.max_block_level as usize + 1]; + let mut root_by_level = vec![None; self.max_block_level as usize + 1]; + for level in (0..=self.max_block_level).rev() { + let level_usize = level as usize; + let required_block = if level != self.max_block_level { + let next_level_store = ghostdag_stores[level_usize + 1].as_ref().unwrap().clone(); + let block_at_depth_m_at_next_level = self + .block_at_depth(&*next_level_store, selected_tip_by_level[level_usize + 1].unwrap(), self.pruning_proof_m) + .map_err(|err| format!("level + 1: {}, err: {}", level + 1, err)) + .unwrap(); + Some(block_at_depth_m_at_next_level) + } else { + None + }; + let (store, selected_tip, root) = self + .find_sufficient_root(pp_header, level, current_dag_level, required_block, temp_db.clone()) + .unwrap_or_else(|_| panic!("find_sufficient_root failed for level {level}")); + ghostdag_stores[level_usize] = Some(store); + selected_tip_by_level[level_usize] = Some(selected_tip); + root_by_level[level_usize] = Some(root); + } + + ( + ghostdag_stores.into_iter().map(Option::unwrap).collect_vec(), + selected_tip_by_level.into_iter().map(Option::unwrap).collect_vec(), + root_by_level.into_iter().map(Option::unwrap).collect_vec(), + ) + } + + /// Find a sufficient root at a given level by going through the headers store and looking + /// for a deep enough level block + /// For each root candidate, fill in the ghostdag data to see if it actually is deep enough. + /// If the root is deep enough, it will satisfy these conditions + /// 1. block at depth 2m at this level ∈ Future(root) + /// 2. block at depth m at the next level ∈ Future(root) + /// + /// Returns: the filled ghostdag store from root to tip, the selected tip and the root + fn find_sufficient_root( + &self, + pp_header: &HeaderWithBlockLevel, + level: BlockLevel, + current_dag_level: BlockLevel, + required_block: Option, + temp_db: Arc, + ) -> PruningProofManagerInternalResult<(Arc, Hash, Hash)> { + // Step 1: Determine which selected tip to use + let selected_tip = if pp_header.block_level >= level { + pp_header.header.hash + } else { + self.find_selected_parent_header_at_level(&pp_header.header, level)?.hash + }; + + let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); + let required_level_depth = 2 * self.pruning_proof_m; + + // We only have the headers store (which has level 0 blue_scores) to assemble the proof data from. + // We need to look deeper at higher levels (2x deeper every level) to find 2M (plus margin) blocks at that level + let mut required_base_level_depth = self.estimated_blue_depth_at_level_0( + level, + required_level_depth + 100, // We take a safety margin + current_dag_level, + ); + + let mut is_last_level_header; + let mut tries = 0; + + let block_at_depth_m_at_next_level = required_block.unwrap_or(selected_tip); + + loop { + // Step 2 - Find a deep enough root candidate + let block_at_depth_2m = match self.level_block_at_base_depth(level, selected_tip, required_base_level_depth) { + Ok((header, is_last_header)) => { + is_last_level_header = is_last_header; + header + } + Err(e) => return Err(e), + }; + + let root = if self.reachability_service.is_dag_ancestor_of(block_at_depth_2m, block_at_depth_m_at_next_level) { + block_at_depth_2m + } else if self.reachability_service.is_dag_ancestor_of(block_at_depth_m_at_next_level, block_at_depth_2m) { + block_at_depth_m_at_next_level + } else { + // find common ancestor of block_at_depth_m_at_next_level and block_at_depth_2m in chain of block_at_depth_m_at_next_level + let mut common_ancestor = self.headers_store.get_header(block_at_depth_m_at_next_level).unwrap(); + + while !self.reachability_service.is_dag_ancestor_of(common_ancestor.hash, block_at_depth_2m) { + common_ancestor = match self.find_selected_parent_header_at_level(&common_ancestor, level) { + Ok(header) => header, + // Try to give this last header a chance at being root + Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => break, + Err(e) => return Err(e), + }; + } + + common_ancestor.hash + }; + + if level == 0 { + return Ok((self.ghostdag_store.clone(), selected_tip, root)); + } + + // Step 3 - Fill the ghostdag data from root to tip + let ghostdag_store = Arc::new(DbGhostdagStore::new_temp(temp_db.clone(), level, cache_policy, cache_policy, tries)); + let has_required_block = self.fill_level_proof_ghostdag_data( + root, + pp_header.header.hash, + &ghostdag_store, + Some(block_at_depth_m_at_next_level), + level, + ); + + // Step 4 - Check if we actually have enough depth. + // Need to ensure this does the same 2M+1 depth that block_at_depth does + if has_required_block + && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() >= required_level_depth) + { + break Ok((ghostdag_store, selected_tip, root)); + } + + tries += 1; + if is_last_level_header { + if has_required_block { + // Normally this scenario doesn't occur when syncing with nodes that already have the safety margin change in place. + // However, when syncing with an older node version that doesn't have a safety margin for the proof, it's possible to + // try to find 2500 depth worth of headers at a level, but the proof only contains about 2000 headers. To be able to sync + // with such an older node. As long as we found the required block, we can still proceed. + debug!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_base_level_depth} are already pruned. Required block found so trying anyway."); + break Ok((ghostdag_store, selected_tip, root)); + } else { + panic!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_base_level_depth} are already pruned"); + } + } + + // If we don't have enough depth now, we need to look deeper + required_base_level_depth = (required_base_level_depth as f64 * 1.1) as u64; + debug!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_base_level_depth}"); + } + } + + /// BFS forward iterates from root until selected tip, ignoring blocks in the antipast of selected_tip. + /// For each block along the way, insert that hash into the ghostdag_store + /// If we have a required_block to find, this will return true if that block was found along the way + fn fill_level_proof_ghostdag_data( + &self, + root: Hash, + selected_tip: Hash, + ghostdag_store: &Arc, + required_block: Option, + level: BlockLevel, + ) -> bool { + let relations_service = RelationsStoreInFutureOfRoot { + relations_store: self.level_relations_services[level as usize].clone(), + reachability_service: self.reachability_service.clone(), + root, + }; + let gd_manager = GhostdagManager::new( + root, + self.ghostdag_k, + ghostdag_store.clone(), + relations_service.clone(), + self.headers_store.clone(), + self.reachability_service.clone(), + level != 0, + ); + + ghostdag_store.insert(root, Arc::new(gd_manager.genesis_ghostdag_data())).unwrap(); + ghostdag_store.insert(ORIGIN, gd_manager.origin_ghostdag_data()).unwrap(); + + let mut topological_heap: BinaryHeap<_> = Default::default(); + let mut visited = BlockHashSet::new(); + for child in relations_service.get_children(root).unwrap().read().iter().copied() { + topological_heap.push(Reverse(SortableBlock { + hash: child, + // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology + blue_work: self.headers_store.get_header(child).unwrap().blue_work, + })); + } + + let mut has_required_block = required_block.is_some_and(|required_block| root == required_block); + loop { + let Some(current) = topological_heap.pop() else { + break; + }; + let current_hash = current.0.hash; + if !visited.insert(current_hash) { + continue; + } + + if !self.reachability_service.is_dag_ancestor_of(current_hash, selected_tip) { + // We don't care about blocks in the antipast of the selected tip + continue; + } + + if !has_required_block && required_block.is_some_and(|required_block| current_hash == required_block) { + has_required_block = true; + } + + let current_gd = gd_manager.ghostdag(&relations_service.get_parents(current_hash).unwrap()); + + ghostdag_store.insert(current_hash, Arc::new(current_gd)).unwrap_or_exists(); + + for child in relations_service.get_children(current_hash).unwrap().read().iter().copied() { + topological_heap.push(Reverse(SortableBlock { + hash: child, + // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology + blue_work: self.headers_store.get_header(child).unwrap().blue_work, + })); + } + } + + has_required_block + } + + // The "current dag level" is the level right before the level whose parents are + // not the same as our header's direct parents + // + // Find the current DAG level by going through all the parents at each level, + // starting from the bottom level and see which is the first level that has + // parents that are NOT our current pp_header's direct parents. + fn find_current_dag_level(&self, pp_header: &Header) -> BlockLevel { + let direct_parents = BlockHashSet::from_iter(pp_header.direct_parents().iter().copied()); + pp_header + .parents_by_level + .iter() + .enumerate() + .skip(1) + .find_map(|(level, parents)| { + if BlockHashSet::from_iter(parents.iter().copied()) == direct_parents { + None + } else { + Some((level - 1) as BlockLevel) + } + }) + .unwrap_or(self.max_block_level) + } + + fn estimated_blue_depth_at_level_0(&self, level: BlockLevel, level_depth: u64, current_dag_level: BlockLevel) -> u64 { + level_depth.checked_shl(level.saturating_sub(current_dag_level) as u32).unwrap_or(level_depth) + } + + /// selected parent at level = the parent of the header at the level + /// with the highest blue_work + fn find_selected_parent_header_at_level( + &self, + header: &Header, + level: BlockLevel, + ) -> PruningProofManagerInternalResult> { + // Parents manager parents_at_level may return parents that aren't in relations_service, so it's important + // to filter to include only parents that are in relations_service. + let sp = self + .parents_manager + .parents_at_level(header, level) + .iter() + .copied() + .filter(|p| self.level_relations_services[level as usize].has(*p).unwrap()) + .filter_map(|p| self.headers_store.get_header(p).unwrap_option().map(|h| SortableBlock::new(p, h.blue_work))) + .max() + .ok_or(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof("no parents with header".to_string()))?; + Ok(self.headers_store.get_header(sp.hash).expect("unwrapped above")) + } + + /// Finds the block on a given level that is at base_depth deep from it. + /// Also returns if the block was the last one in the level + /// base_depth = the blue score depth at level 0 + fn level_block_at_base_depth( + &self, + level: BlockLevel, + high: Hash, + base_depth: u64, + ) -> PruningProofManagerInternalResult<(Hash, bool)> { + let high_header = self + .headers_store + .get_header(high) + .map_err(|err| PruningProofManagerInternalError::BlockAtDepth(format!("high: {high}, depth: {base_depth}, {err}")))?; + let high_header_score = high_header.blue_score; + let mut current_header = high_header; + + let mut is_last_header = false; + + while current_header.blue_score + base_depth >= high_header_score { + if current_header.direct_parents().is_empty() { + break; + } + + current_header = match self.find_selected_parent_header_at_level(¤t_header, level) { + Ok(header) => header, + Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { + // We want to give this root a shot if all its past is pruned + is_last_header = true; + break; + } + Err(e) => return Err(e), + }; + } + Ok((current_header.hash, is_last_header)) + } + + /// Copy of `block_at_depth` which returns the full chain up to depth. Temporarily used for assertion purposes. + fn chain_up_to_depth( + &self, + ghostdag_store: &impl GhostdagStoreReader, + high: Hash, + depth: u64, + ) -> Result, PruningProofManagerInternalError> { + let high_gd = ghostdag_store + .get_compact_data(high) + .map_err(|err| PruningProofManagerInternalError::BlockAtDepth(format!("high: {high}, depth: {depth}, {err}")))?; + let mut current_gd = high_gd; + let mut current = high; + let mut res = vec![current]; + while current_gd.blue_score + depth >= high_gd.blue_score { + if current_gd.selected_parent.is_origin() { + break; + } + let prev = current; + current = current_gd.selected_parent; + res.push(current); + current_gd = ghostdag_store.get_compact_data(current).map_err(|err| { + PruningProofManagerInternalError::BlockAtDepth(format!( + "high: {}, depth: {}, current: {}, high blue score: {}, current blue score: {}, {}", + high, depth, prev, high_gd.blue_score, current_gd.blue_score, err + )) + })?; + } + Ok(res) + } + + fn find_common_ancestor_in_chain_of_a( + &self, + ghostdag_store: &impl GhostdagStoreReader, + a: Hash, + b: Hash, + ) -> Result { + let a_gd = ghostdag_store + .get_compact_data(a) + .map_err(|err| PruningProofManagerInternalError::FindCommonAncestor(format!("a: {a}, b: {b}, {err}")))?; + let mut current_gd = a_gd; + let mut current; + let mut loop_counter = 0; + loop { + current = current_gd.selected_parent; + loop_counter += 1; + if current.is_origin() { + break Err(PruningProofManagerInternalError::NoCommonAncestor(format!("a: {a}, b: {b} ({loop_counter} loop steps)"))); + } + if self.reachability_service.is_dag_ancestor_of(current, b) { + break Ok(current); + } + current_gd = ghostdag_store + .get_compact_data(current) + .map_err(|err| PruningProofManagerInternalError::FindCommonAncestor(format!("a: {a}, b: {b}, {err}")))?; + } + } +} diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index e9690ec38d..2b3ba5f9d8 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -1,40 +1,32 @@ +mod apply; +mod build; +mod validate; + use std::{ - cmp::{max, Reverse}, collections::{ - hash_map::Entry::{self, Vacant}, - BinaryHeap, HashSet, VecDeque, - }, - ops::{Deref, DerefMut}, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, + hash_map::Entry::{self}, + VecDeque, }, + ops::Deref, + sync::{atomic::AtomicBool, Arc}, }; use itertools::Itertools; -use kaspa_math::int::SignedInteger; use parking_lot::{Mutex, RwLock}; use rocksdb::WriteBatch; use kaspa_consensus_core::{ - blockhash::{self, BlockHashExtensions, BlockHashes, ORIGIN}, - errors::{ - consensus::{ConsensusError, ConsensusResult}, - pruning::{PruningImportError, PruningImportResult}, - }, + blockhash::{self, BlockHashExtensions}, + errors::consensus::{ConsensusError, ConsensusResult}, header::Header, pruning::{PruningPointProof, PruningPointTrustedData}, - trusted::{TrustedBlock, TrustedGhostdagData, TrustedHeader}, + trusted::{TrustedGhostdagData, TrustedHeader}, BlockHashMap, BlockHashSet, BlockLevel, HashMapCustomHasher, KType, }; -use kaspa_core::{debug, info, trace}; -use kaspa_database::{ - prelude::{CachePolicy, ConnBuilder, StoreError, StoreResult, StoreResultEmptyTuple, StoreResultExtensions}, - utils::DbLifetime, -}; +use kaspa_core::info; +use kaspa_database::{prelude::StoreResultExtensions, utils::DbLifetime}; use kaspa_hashes::Hash; use kaspa_pow::calc_block_level; -use kaspa_utils::{binary_heap::BinaryHeapExtensions, vec::VecExtensions}; use thiserror::Error; use crate::{ @@ -43,35 +35,26 @@ use crate::{ storage::ConsensusStorage, }, model::{ - services::{ - reachability::{MTReachabilityService, ReachabilityService}, - relations::MTRelationsService, - }, + services::{reachability::MTReachabilityService, relations::MTRelationsService}, stores::{ depth::DbDepthStore, - ghostdag::{CompactGhostdagData, DbGhostdagStore, GhostdagData, GhostdagStore, GhostdagStoreReader}, - headers::{DbHeadersStore, HeaderStore, HeaderStoreReader, HeaderWithBlockLevel}, + ghostdag::{DbGhostdagStore, GhostdagStoreReader}, + headers::{DbHeadersStore, HeaderStore, HeaderStoreReader}, headers_selected_tip::DbHeadersSelectedTipStore, past_pruning_points::{DbPastPruningPointsStore, PastPruningPointsStore}, pruning::{DbPruningStore, PruningStoreReader}, - reachability::{DbReachabilityStore, ReachabilityStoreReader, StagingReachabilityStore}, - relations::{DbRelationsStore, RelationsStoreReader, StagingRelationsStore}, - selected_chain::{DbSelectedChainStore, SelectedChainStore}, + reachability::DbReachabilityStore, + relations::{DbRelationsStore, RelationsStoreReader}, + selected_chain::DbSelectedChainStore, tips::DbTipsStore, - virtual_state::{VirtualState, VirtualStateStore, VirtualStateStoreReader, VirtualStores}, + virtual_state::{VirtualStateStoreReader, VirtualStores}, DB, }, }, - processes::{ - ghostdag::ordering::SortableBlock, reachability::inquirer as reachability, relations::RelationsStoreExtensions, - window::WindowType, - }, + processes::window::WindowType, }; -use super::{ - ghostdag::{mergeset::unordered_mergeset_without_selected_parent, protocol::GhostdagManager}, - window::WindowManager, -}; +use super::{ghostdag::protocol::GhostdagManager, window::WindowManager}; #[derive(Error, Debug)] enum PruningProofManagerInternalError { @@ -110,39 +93,6 @@ struct TempProofContext { db_lifetime: DbLifetime, } -#[derive(Clone)] -struct RelationsStoreInFutureOfRoot { - relations_store: T, - reachability_service: U, - root: Hash, -} - -impl RelationsStoreReader for RelationsStoreInFutureOfRoot { - fn get_parents(&self, hash: Hash) -> Result { - self.relations_store.get_parents(hash).map(|hashes| { - Arc::new(hashes.iter().copied().filter(|h| self.reachability_service.is_dag_ancestor_of(self.root, *h)).collect_vec()) - }) - } - - fn get_children(&self, hash: Hash) -> StoreResult> { - // We assume hash is in future of root - assert!(self.reachability_service.is_dag_ancestor_of(self.root, hash)); - self.relations_store.get_children(hash) - } - - fn has(&self, hash: Hash) -> Result { - if self.reachability_service.is_dag_ancestor_of(self.root, hash) { - Ok(false) - } else { - self.relations_store.has(hash) - } - } - - fn counts(&self) -> Result<(usize, usize), kaspa_database::prelude::StoreError> { - unimplemented!() - } -} - pub struct PruningProofManager { db: Arc, @@ -241,10 +191,7 @@ impl PruningProofManager { continue; } - let state = kaspa_pow::State::new(header); - let (_, pow) = state.check_pow(header.nonce); - let signed_block_level = self.max_block_level as i64 - pow.bits() as i64; - let block_level = max(signed_block_level, 0) as BlockLevel; + let block_level = calc_block_level(header, self.max_block_level); self.headers_store.insert(header.hash, header.clone(), block_level).unwrap(); } @@ -259,949 +206,14 @@ impl PruningProofManager { drop(pruning_point_write); } - pub fn apply_proof(&self, mut proof: PruningPointProof, trusted_set: &[TrustedBlock]) -> PruningImportResult<()> { - let pruning_point_header = proof[0].last().unwrap().clone(); - let pruning_point = pruning_point_header.hash; - - // Create a copy of the proof, since we're going to be mutating the proof passed to us - let proof_sets = (0..=self.max_block_level) - .map(|level| BlockHashSet::from_iter(proof[level as usize].iter().map(|header| header.hash))) - .collect_vec(); - - let mut trusted_gd_map: BlockHashMap = BlockHashMap::new(); - for tb in trusted_set.iter() { - trusted_gd_map.insert(tb.block.hash(), tb.ghostdag.clone().into()); - let tb_block_level = calc_block_level(&tb.block.header, self.max_block_level); - - (0..=tb_block_level).for_each(|current_proof_level| { - // If this block was in the original proof, ignore it - if proof_sets[current_proof_level as usize].contains(&tb.block.hash()) { - return; - } - - proof[current_proof_level as usize].push(tb.block.header.clone()); - }); - } - - proof.iter_mut().for_each(|level_proof| { - level_proof.sort_by(|a, b| a.blue_work.cmp(&b.blue_work)); - }); - - self.populate_reachability_and_headers(&proof); - - { - let reachability_read = self.reachability_store.read(); - for tb in trusted_set.iter() { - // Header-only trusted blocks are expected to be in pruning point past - if tb.block.is_header_only() && !reachability_read.is_dag_ancestor_of(tb.block.hash(), pruning_point) { - return Err(PruningImportError::PruningPointPastMissingReachability(tb.block.hash())); - } - } - } - - for (level, headers) in proof.iter().enumerate() { - trace!("Applying level {} from the pruning point proof", level); - let mut level_ancestors: HashSet = HashSet::new(); - level_ancestors.insert(ORIGIN); - - for header in headers.iter() { - let parents = Arc::new( - self.parents_manager - .parents_at_level(header, level as BlockLevel) - .iter() - .copied() - .filter(|parent| level_ancestors.contains(parent)) - .collect_vec() - .push_if_empty(ORIGIN), - ); - - self.relations_stores.write()[level].insert(header.hash, parents.clone()).unwrap(); - - if level == 0 { - let gd = if let Some(gd) = trusted_gd_map.get(&header.hash) { - gd.clone() - } else { - let calculated_gd = self.ghostdag_manager.ghostdag(&parents); - // Override the ghostdag data with the real blue score and blue work - GhostdagData { - blue_score: header.blue_score, - blue_work: header.blue_work, - selected_parent: calculated_gd.selected_parent, - mergeset_blues: calculated_gd.mergeset_blues, - mergeset_reds: calculated_gd.mergeset_reds, - blues_anticone_sizes: calculated_gd.blues_anticone_sizes, - } - }; - self.ghostdag_store.insert(header.hash, Arc::new(gd)).unwrap(); - } - - level_ancestors.insert(header.hash); - } - } - - let virtual_parents = vec![pruning_point]; - let virtual_state = Arc::new(VirtualState { - parents: virtual_parents.clone(), - ghostdag_data: self.ghostdag_manager.ghostdag(&virtual_parents), - ..VirtualState::default() - }); - self.virtual_stores.write().state.set(virtual_state).unwrap(); - - let mut batch = WriteBatch::default(); - self.body_tips_store.write().init_batch(&mut batch, &virtual_parents).unwrap(); - self.headers_selected_tip_store - .write() - .set_batch(&mut batch, SortableBlock { hash: pruning_point, blue_work: pruning_point_header.blue_work }) - .unwrap(); - self.selected_chain_store.write().init_with_pruning_point(&mut batch, pruning_point).unwrap(); - self.depth_store.insert_batch(&mut batch, pruning_point, ORIGIN, ORIGIN).unwrap(); - self.db.write(batch).unwrap(); - - Ok(()) - } - + // Used in apply and validate fn estimate_proof_unique_size(&self, proof: &PruningPointProof) -> usize { let approx_history_size = proof[0][0].daa_score; let approx_unique_full_levels = f64::log2(approx_history_size as f64 / self.pruning_proof_m as f64).max(0f64) as usize; proof.iter().map(|l| l.len()).sum::().min((approx_unique_full_levels + 1) * self.pruning_proof_m as usize) } - pub fn populate_reachability_and_headers(&self, proof: &PruningPointProof) { - let capacity_estimate = self.estimate_proof_unique_size(proof); - let mut dag = BlockHashMap::with_capacity(capacity_estimate); - let mut up_heap = BinaryHeap::with_capacity(capacity_estimate); - for header in proof.iter().flatten().cloned() { - if let Vacant(e) = dag.entry(header.hash) { - let state = kaspa_pow::State::new(&header); - let (_, pow) = state.check_pow(header.nonce); // TODO: Check if pow passes - let signed_block_level = self.max_block_level as i64 - pow.bits() as i64; - let block_level = max(signed_block_level, 0) as BlockLevel; - self.headers_store.insert(header.hash, header.clone(), block_level).unwrap(); - - let mut parents = BlockHashSet::with_capacity(header.direct_parents().len() * 2); - // We collect all available parent relations in order to maximize reachability information. - // By taking into account parents from all levels we ensure that the induced DAG has valid - // reachability information for each level-specific sub-DAG -- hence a single reachability - // oracle can serve them all - for level in 0..=self.max_block_level { - for parent in self.parents_manager.parents_at_level(&header, level) { - parents.insert(*parent); - } - } - - struct DagEntry { - header: Arc
, - parents: Arc, - } - - up_heap.push(Reverse(SortableBlock { hash: header.hash, blue_work: header.blue_work })); - e.insert(DagEntry { header, parents: Arc::new(parents) }); - } - } - - debug!("Estimated proof size: {}, actual size: {}", capacity_estimate, dag.len()); - - for reverse_sortable_block in up_heap.into_sorted_iter() { - // TODO: Convert to into_iter_sorted once it gets stable - let hash = reverse_sortable_block.0.hash; - let dag_entry = dag.get(&hash).unwrap(); - - // Filter only existing parents - let parents_in_dag = BinaryHeap::from_iter( - dag_entry - .parents - .iter() - .cloned() - .filter(|parent| dag.contains_key(parent)) - .map(|parent| SortableBlock { hash: parent, blue_work: dag.get(&parent).unwrap().header.blue_work }), - ); - - let reachability_read = self.reachability_store.upgradable_read(); - - // Find the maximal parent antichain from the possibly redundant set of existing parents - let mut reachability_parents: Vec = Vec::new(); - for parent in parents_in_dag.into_sorted_iter() { - if reachability_read.is_dag_ancestor_of_any(parent.hash, &mut reachability_parents.iter().map(|parent| parent.hash)) { - continue; - } - - reachability_parents.push(parent); - } - let reachability_parents_hashes = - BlockHashes::new(reachability_parents.iter().map(|parent| parent.hash).collect_vec().push_if_empty(ORIGIN)); - let selected_parent = reachability_parents.iter().max().map(|parent| parent.hash).unwrap_or(ORIGIN); - - // Prepare batch - let mut batch = WriteBatch::default(); - let mut reachability_relations_write = self.reachability_relations_store.write(); - let mut staging_reachability = StagingReachabilityStore::new(reachability_read); - let mut staging_reachability_relations = StagingRelationsStore::new(&mut reachability_relations_write); - - // Stage - staging_reachability_relations.insert(hash, reachability_parents_hashes.clone()).unwrap(); - let mergeset = unordered_mergeset_without_selected_parent( - &staging_reachability_relations, - &staging_reachability, - selected_parent, - &reachability_parents_hashes, - ); - reachability::add_block(&mut staging_reachability, hash, selected_parent, &mut mergeset.iter().copied()).unwrap(); - - // Commit - let reachability_write = staging_reachability.commit(&mut batch).unwrap(); - staging_reachability_relations.commit(&mut batch).unwrap(); - - // Write - self.db.write(batch).unwrap(); - - // Drop - drop(reachability_write); - drop(reachability_relations_write); - } - } - - fn init_validate_pruning_point_proof_stores_and_processes( - &self, - proof: &PruningPointProof, - ) -> PruningImportResult { - if proof[0].is_empty() { - return Err(PruningImportError::PruningProofNotEnoughHeaders); - } - - let headers_estimate = self.estimate_proof_unique_size(proof); - - let (db_lifetime, db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)); - let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); - let headers_store = - Arc::new(DbHeadersStore::new(db.clone(), CachePolicy::Count(headers_estimate), CachePolicy::Count(headers_estimate))); - let ghostdag_stores = (0..=self.max_block_level) - .map(|level| Arc::new(DbGhostdagStore::new(db.clone(), level, cache_policy, cache_policy))) - .collect_vec(); - let mut relations_stores = - (0..=self.max_block_level).map(|level| DbRelationsStore::new(db.clone(), level, cache_policy, cache_policy)).collect_vec(); - let reachability_stores = (0..=self.max_block_level) - .map(|level| Arc::new(RwLock::new(DbReachabilityStore::with_block_level(db.clone(), cache_policy, cache_policy, level)))) - .collect_vec(); - - let reachability_services = (0..=self.max_block_level) - .map(|level| MTReachabilityService::new(reachability_stores[level as usize].clone())) - .collect_vec(); - - let ghostdag_managers = ghostdag_stores - .iter() - .cloned() - .enumerate() - .map(|(level, ghostdag_store)| { - GhostdagManager::new( - self.genesis_hash, - self.ghostdag_k, - ghostdag_store, - relations_stores[level].clone(), - headers_store.clone(), - reachability_services[level].clone(), - level != 0, - ) - }) - .collect_vec(); - - { - let mut batch = WriteBatch::default(); - for level in 0..=self.max_block_level { - let level = level as usize; - reachability::init(reachability_stores[level].write().deref_mut()).unwrap(); - relations_stores[level].insert_batch(&mut batch, ORIGIN, BlockHashes::new(vec![])).unwrap(); - ghostdag_stores[level].insert(ORIGIN, ghostdag_managers[level].origin_ghostdag_data()).unwrap(); - } - - db.write(batch).unwrap(); - } - - Ok(TempProofContext { db_lifetime, headers_store, ghostdag_stores, relations_stores, reachability_stores, ghostdag_managers }) - } - - fn populate_stores_for_validate_pruning_point_proof( - &self, - proof: &PruningPointProof, - ctx: &mut TempProofContext, - log_validating: bool, - ) -> PruningImportResult> { - let headers_store = &ctx.headers_store; - let ghostdag_stores = &ctx.ghostdag_stores; - let mut relations_stores = ctx.relations_stores.clone(); - let reachability_stores = &ctx.reachability_stores; - let ghostdag_managers = &ctx.ghostdag_managers; - - let proof_pp_header = proof[0].last().expect("checked if empty"); - let proof_pp = proof_pp_header.hash; - - let mut selected_tip_by_level = vec![None; self.max_block_level as usize + 1]; - for level in (0..=self.max_block_level).rev() { - // Before processing this level, check if the process is exiting so we can end early - if self.is_consensus_exiting.load(Ordering::Relaxed) { - return Err(PruningImportError::PruningValidationInterrupted); - } - - if log_validating { - info!("Validating level {level} from the pruning point proof ({} headers)", proof[level as usize].len()); - } - let level_idx = level as usize; - let mut selected_tip = None; - for (i, header) in proof[level as usize].iter().enumerate() { - let header_level = calc_block_level(header, self.max_block_level); - if header_level < level { - return Err(PruningImportError::PruningProofWrongBlockLevel(header.hash, header_level, level)); - } - - headers_store.insert(header.hash, header.clone(), header_level).unwrap_or_exists(); - - let parents = self - .parents_manager - .parents_at_level(header, level) - .iter() - .copied() - .filter(|parent| ghostdag_stores[level_idx].has(*parent).unwrap()) - .collect_vec(); - - // Only the first block at each level is allowed to have no known parents - if parents.is_empty() && i != 0 { - return Err(PruningImportError::PruningProofHeaderWithNoKnownParents(header.hash, level)); - } - - let parents: BlockHashes = parents.push_if_empty(ORIGIN).into(); - - if relations_stores[level_idx].has(header.hash).unwrap() { - return Err(PruningImportError::PruningProofDuplicateHeaderAtLevel(header.hash, level)); - } - - relations_stores[level_idx].insert(header.hash, parents.clone()).unwrap(); - let ghostdag_data = Arc::new(ghostdag_managers[level_idx].ghostdag(&parents)); - ghostdag_stores[level_idx].insert(header.hash, ghostdag_data.clone()).unwrap(); - selected_tip = Some(match selected_tip { - Some(tip) => ghostdag_managers[level_idx].find_selected_parent([tip, header.hash]), - None => header.hash, - }); - - let mut reachability_mergeset = { - let reachability_read = reachability_stores[level_idx].read(); - ghostdag_data - .unordered_mergeset_without_selected_parent() - .filter(|hash| reachability_read.has(*hash).unwrap()) - .collect_vec() // We collect to vector so reachability_read can be released and let `reachability::add_block` use a write lock. - .into_iter() - }; - reachability::add_block( - reachability_stores[level_idx].write().deref_mut(), - header.hash, - ghostdag_data.selected_parent, - &mut reachability_mergeset, - ) - .unwrap(); - - if selected_tip.unwrap() == header.hash { - reachability::hint_virtual_selected_parent(reachability_stores[level_idx].write().deref_mut(), header.hash) - .unwrap(); - } - } - - if level < self.max_block_level { - let block_at_depth_m_at_next_level = self - .block_at_depth( - &*ghostdag_stores[level_idx + 1], - selected_tip_by_level[level_idx + 1].unwrap(), - self.pruning_proof_m, - ) - .unwrap(); - if !relations_stores[level_idx].has(block_at_depth_m_at_next_level).unwrap() { - return Err(PruningImportError::PruningProofMissingBlockAtDepthMFromNextLevel(level, level + 1)); - } - } - - if selected_tip.unwrap() != proof_pp - && !self.parents_manager.parents_at_level(proof_pp_header, level).contains(&selected_tip.unwrap()) - { - return Err(PruningImportError::PruningProofMissesBlocksBelowPruningPoint(selected_tip.unwrap(), level)); - } - - selected_tip_by_level[level_idx] = selected_tip; - } - - Ok(selected_tip_by_level.into_iter().map(|selected_tip| selected_tip.unwrap()).collect()) - } - - fn validate_proof_selected_tip( - &self, - proof_selected_tip: Hash, - level: BlockLevel, - proof_pp_level: BlockLevel, - proof_pp: Hash, - proof_pp_header: &Header, - ) -> PruningImportResult<()> { - // A proof selected tip of some level has to be the proof suggested prunint point itself if its level - // is lower or equal to the pruning point level, or a parent of the pruning point on the relevant level - // otherwise. - if level <= proof_pp_level { - if proof_selected_tip != proof_pp { - return Err(PruningImportError::PruningProofSelectedTipIsNotThePruningPoint(proof_selected_tip, level)); - } - } else if !self.parents_manager.parents_at_level(proof_pp_header, level).contains(&proof_selected_tip) { - return Err(PruningImportError::PruningProofSelectedTipNotParentOfPruningPoint(proof_selected_tip, level)); - } - - Ok(()) - } - - // find_proof_and_consensus_common_chain_ancestor_ghostdag_data returns an option of a tuple - // that contains the ghostdag data of the proof and current consensus common ancestor. If no - // such ancestor exists, it returns None. - fn find_proof_and_consensus_common_ancestor_ghostdag_data( - &self, - proof_ghostdag_stores: &[Arc], - current_consensus_ghostdag_stores: &[Arc], - proof_selected_tip: Hash, - level: BlockLevel, - proof_selected_tip_gd: CompactGhostdagData, - ) -> Option<(CompactGhostdagData, CompactGhostdagData)> { - let mut proof_current = proof_selected_tip; - let mut proof_current_gd = proof_selected_tip_gd; - loop { - match current_consensus_ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap_option() { - Some(current_gd) => { - break Some((proof_current_gd, current_gd)); - } - None => { - proof_current = proof_current_gd.selected_parent; - if proof_current.is_origin() { - break None; - } - proof_current_gd = proof_ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap(); - } - }; - } - } - - pub fn validate_pruning_point_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> { - if proof.len() != self.max_block_level as usize + 1 { - return Err(PruningImportError::ProofNotEnoughLevels(self.max_block_level as usize + 1)); - } - - // Initialize the stores for the proof - let mut proof_stores_and_processes = self.init_validate_pruning_point_proof_stores_and_processes(proof)?; - let proof_pp_header = proof[0].last().expect("checked if empty"); - let proof_pp = proof_pp_header.hash; - let proof_pp_level = calc_block_level(proof_pp_header, self.max_block_level); - let proof_selected_tip_by_level = - self.populate_stores_for_validate_pruning_point_proof(proof, &mut proof_stores_and_processes, true)?; - let proof_ghostdag_stores = proof_stores_and_processes.ghostdag_stores; - - // Get the proof for the current consensus and recreate the stores for it - // This is expected to be fast because if a proof exists, it will be cached. - // If no proof exists, this is empty - let mut current_consensus_proof = self.get_pruning_point_proof(); - if current_consensus_proof.is_empty() { - // An empty proof can only happen if we're at genesis. We're going to create a proof for this case that contains the genesis header only - let genesis_header = self.headers_store.get_header(self.genesis_hash).unwrap(); - current_consensus_proof = Arc::new((0..=self.max_block_level).map(|_| vec![genesis_header.clone()]).collect_vec()); - } - let mut current_consensus_stores_and_processes = - self.init_validate_pruning_point_proof_stores_and_processes(¤t_consensus_proof)?; - let _ = self.populate_stores_for_validate_pruning_point_proof( - ¤t_consensus_proof, - &mut current_consensus_stores_and_processes, - false, - )?; - let current_consensus_ghostdag_stores = current_consensus_stores_and_processes.ghostdag_stores; - - let pruning_read = self.pruning_point_store.read(); - let relations_read = self.relations_stores.read(); - let current_pp = pruning_read.get().unwrap().pruning_point; - let current_pp_header = self.headers_store.get_header(current_pp).unwrap(); - - for (level_idx, selected_tip) in proof_selected_tip_by_level.iter().copied().enumerate() { - let level = level_idx as BlockLevel; - self.validate_proof_selected_tip(selected_tip, level, proof_pp_level, proof_pp, proof_pp_header)?; - - let proof_selected_tip_gd = proof_ghostdag_stores[level_idx].get_compact_data(selected_tip).unwrap(); - - // Next check is to see if this proof is "better" than what's in the current consensus - // Step 1 - look at only levels that have a full proof (least 2m blocks in the proof) - if proof_selected_tip_gd.blue_score < 2 * self.pruning_proof_m { - continue; - } - - // Step 2 - if we can find a common ancestor between the proof and current consensus - // we can determine if the proof is better. The proof is better if the blue work* difference between the - // old current consensus's tips and the common ancestor is less than the blue work difference between the - // proof's tip and the common ancestor. - // *Note: blue work is the same as blue score on levels higher than 0 - if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = self.find_proof_and_consensus_common_ancestor_ghostdag_data( - &proof_ghostdag_stores, - ¤t_consensus_ghostdag_stores, - selected_tip, - level, - proof_selected_tip_gd, - ) { - let selected_tip_blue_work_diff = - SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(proof_common_ancestor_gd.blue_work); - for parent in self.parents_manager.parents_at_level(¤t_pp_header, level).iter().copied() { - let parent_blue_work = current_consensus_ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); - let parent_blue_work_diff = - SignedInteger::from(parent_blue_work) - SignedInteger::from(common_ancestor_gd.blue_work); - if parent_blue_work_diff >= selected_tip_blue_work_diff { - return Err(PruningImportError::PruningProofInsufficientBlueWork); - } - } - - return Ok(()); - } - } - - if current_pp == self.genesis_hash { - // If the proof has better tips and the current pruning point is still - // genesis, we consider the proof state to be better. - return Ok(()); - } - - // If we got here it means there's no level with shared blocks - // between the proof and the current consensus. In this case we - // consider the proof to be better if it has at least one level - // with 2*self.pruning_proof_m blue blocks where consensus doesn't. - for level in (0..=self.max_block_level).rev() { - let level_idx = level as usize; - - let proof_selected_tip = proof_selected_tip_by_level[level_idx]; - let proof_selected_tip_gd = proof_ghostdag_stores[level_idx].get_compact_data(proof_selected_tip).unwrap(); - if proof_selected_tip_gd.blue_score < 2 * self.pruning_proof_m { - continue; - } - - match relations_read[level_idx].get_parents(current_pp).unwrap_option() { - Some(parents) => { - if parents.iter().copied().any(|parent| { - current_consensus_ghostdag_stores[level_idx].get_blue_score(parent).unwrap() < 2 * self.pruning_proof_m - }) { - return Ok(()); - } - } - None => { - // If the current pruning point doesn't have a parent at this level, we consider the proof state to be better. - return Ok(()); - } - } - } - - drop(pruning_read); - drop(relations_read); - drop(proof_stores_and_processes.db_lifetime); - drop(current_consensus_stores_and_processes.db_lifetime); - - Err(PruningImportError::PruningProofNotEnoughHeaders) - } - - // The "current dag level" is the level right before the level whose parents are - // not the same as our header's direct parents - // - // Find the current DAG level by going through all the parents at each level, - // starting from the bottom level and see which is the first level that has - // parents that are NOT our current pp_header's direct parents. - fn find_current_dag_level(&self, pp_header: &Header) -> BlockLevel { - let direct_parents = BlockHashSet::from_iter(pp_header.direct_parents().iter().copied()); - pp_header - .parents_by_level - .iter() - .enumerate() - .skip(1) - .find_map(|(level, parents)| { - if BlockHashSet::from_iter(parents.iter().copied()) == direct_parents { - None - } else { - Some((level - 1) as BlockLevel) - } - }) - .unwrap_or(self.max_block_level) - } - - fn estimated_blue_depth_at_level_0(&self, level: BlockLevel, level_depth: u64, current_dag_level: BlockLevel) -> u64 { - level_depth.checked_shl(level.saturating_sub(current_dag_level) as u32).unwrap_or(level_depth) - } - - /// selected parent at level = the parent of the header at the level - /// with the highest blue_work - fn find_selected_parent_header_at_level( - &self, - header: &Header, - level: BlockLevel, - ) -> PruningProofManagerInternalResult> { - // Parents manager parents_at_level may return parents that aren't in relations_service, so it's important - // to filter to include only parents that are in relations_service. - let sp = self - .parents_manager - .parents_at_level(header, level) - .iter() - .copied() - .filter(|p| self.level_relations_services[level as usize].has(*p).unwrap()) - .filter_map(|p| self.headers_store.get_header(p).unwrap_option().map(|h| SortableBlock::new(p, h.blue_work))) - .max() - .ok_or(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof("no parents with header".to_string()))?; - Ok(self.headers_store.get_header(sp.hash).expect("unwrapped above")) - } - - /// Find a sufficient root at a given level by going through the headers store and looking - /// for a deep enough level block - /// For each root candidate, fill in the ghostdag data to see if it actually is deep enough. - /// If the root is deep enough, it will satisfy these conditions - /// 1. block at depth 2m at this level ∈ Future(root) - /// 2. block at depth m at the next level ∈ Future(root) - /// - /// Returns: the filled ghostdag store from root to tip, the selected tip and the root - fn find_sufficient_root( - &self, - pp_header: &HeaderWithBlockLevel, - level: BlockLevel, - current_dag_level: BlockLevel, - required_block: Option, - temp_db: Arc, - ) -> PruningProofManagerInternalResult<(Arc, Hash, Hash)> { - // Step 1: Determine which selected tip to use - let selected_tip = if pp_header.block_level >= level { - pp_header.header.hash - } else { - self.find_selected_parent_header_at_level(&pp_header.header, level)?.hash - }; - - let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); - let required_level_depth = 2 * self.pruning_proof_m; - - // We only have the headers store (which has level 0 blue_scores) to assemble the proof data from. - // We need to look deeper at higher levels (2x deeper every level) to find 2M (plus margin) blocks at that level - let mut required_base_level_depth = self.estimated_blue_depth_at_level_0( - level, - required_level_depth + 100, // We take a safety margin - current_dag_level, - ); - - let mut is_last_level_header; - let mut tries = 0; - - let block_at_depth_m_at_next_level = required_block.unwrap_or(selected_tip); - - loop { - // Step 2 - Find a deep enough root candidate - let block_at_depth_2m = match self.level_block_at_base_depth(level, selected_tip, required_base_level_depth) { - Ok((header, is_last_header)) => { - is_last_level_header = is_last_header; - header - } - Err(e) => return Err(e), - }; - - let root = if self.reachability_service.is_dag_ancestor_of(block_at_depth_2m, block_at_depth_m_at_next_level) { - block_at_depth_2m - } else if self.reachability_service.is_dag_ancestor_of(block_at_depth_m_at_next_level, block_at_depth_2m) { - block_at_depth_m_at_next_level - } else { - // find common ancestor of block_at_depth_m_at_next_level and block_at_depth_2m in chain of block_at_depth_m_at_next_level - let mut common_ancestor = self.headers_store.get_header(block_at_depth_m_at_next_level).unwrap(); - - while !self.reachability_service.is_dag_ancestor_of(common_ancestor.hash, block_at_depth_2m) { - common_ancestor = match self.find_selected_parent_header_at_level(&common_ancestor, level) { - Ok(header) => header, - // Try to give this last header a chance at being root - Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => break, - Err(e) => return Err(e), - }; - } - - common_ancestor.hash - }; - - if level == 0 { - return Ok((self.ghostdag_store.clone(), selected_tip, root)); - } - - // Step 3 - Fill the ghostdag data from root to tip - let ghostdag_store = Arc::new(DbGhostdagStore::new_temp(temp_db.clone(), level, cache_policy, cache_policy, tries)); - let has_required_block = self.fill_level_proof_ghostdag_data( - root, - pp_header.header.hash, - &ghostdag_store, - Some(block_at_depth_m_at_next_level), - level, - ); - - // Step 4 - Check if we actually have enough depth. - // Need to ensure this does the same 2M+1 depth that block_at_depth does - if has_required_block - && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() >= required_level_depth) - { - break Ok((ghostdag_store, selected_tip, root)); - } - - tries += 1; - if is_last_level_header { - if has_required_block { - // Normally this scenario doesn't occur when syncing with nodes that already have the safety margin change in place. - // However, when syncing with an older node version that doesn't have a safety margin for the proof, it's possible to - // try to find 2500 depth worth of headers at a level, but the proof only contains about 2000 headers. To be able to sync - // with such an older node. As long as we found the required block, we can still proceed. - debug!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_base_level_depth} are already pruned. Required block found so trying anyway."); - break Ok((ghostdag_store, selected_tip, root)); - } else { - panic!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_base_level_depth} are already pruned"); - } - } - - // If we don't have enough depth now, we need to look deeper - required_base_level_depth = (required_base_level_depth as f64 * 1.1) as u64; - debug!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_base_level_depth}"); - } - } - - fn calc_gd_for_all_levels( - &self, - pp_header: &HeaderWithBlockLevel, - temp_db: Arc, - ) -> (Vec>, Vec, Vec) { - let current_dag_level = self.find_current_dag_level(&pp_header.header); - let mut ghostdag_stores: Vec>> = vec![None; self.max_block_level as usize + 1]; - let mut selected_tip_by_level = vec![None; self.max_block_level as usize + 1]; - let mut root_by_level = vec![None; self.max_block_level as usize + 1]; - for level in (0..=self.max_block_level).rev() { - let level_usize = level as usize; - let required_block = if level != self.max_block_level { - let next_level_store = ghostdag_stores[level_usize + 1].as_ref().unwrap().clone(); - let block_at_depth_m_at_next_level = self - .block_at_depth(&*next_level_store, selected_tip_by_level[level_usize + 1].unwrap(), self.pruning_proof_m) - .map_err(|err| format!("level + 1: {}, err: {}", level + 1, err)) - .unwrap(); - Some(block_at_depth_m_at_next_level) - } else { - None - }; - let (store, selected_tip, root) = self - .find_sufficient_root(pp_header, level, current_dag_level, required_block, temp_db.clone()) - .unwrap_or_else(|_| panic!("find_sufficient_root failed for level {level}")); - ghostdag_stores[level_usize] = Some(store); - selected_tip_by_level[level_usize] = Some(selected_tip); - root_by_level[level_usize] = Some(root); - } - - ( - ghostdag_stores.into_iter().map(Option::unwrap).collect_vec(), - selected_tip_by_level.into_iter().map(Option::unwrap).collect_vec(), - root_by_level.into_iter().map(Option::unwrap).collect_vec(), - ) - } - - pub(crate) fn build_pruning_point_proof(&self, pp: Hash) -> PruningPointProof { - if pp == self.genesis_hash { - return vec![]; - } - - let (_db_lifetime, temp_db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)); - let pp_header = self.headers_store.get_header_with_block_level(pp).unwrap(); - let (ghostdag_stores, selected_tip_by_level, roots_by_level) = self.calc_gd_for_all_levels(&pp_header, temp_db); - - (0..=self.max_block_level) - .map(|level| { - let level = level as usize; - let selected_tip = selected_tip_by_level[level]; - let block_at_depth_2m = self - .block_at_depth(&*ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) - .map_err(|err| format!("level: {}, err: {}", level, err)) - .unwrap(); - - // TODO (relaxed): remove the assertion below - // (New Logic) This is the root we calculated by going through block relations - let root = roots_by_level[level]; - // (Old Logic) This is the root we can calculate given that the GD records are already filled - // The root calc logic below is the original logic before the on-demand higher level GD calculation - // We only need old_root to sanity check the new logic - let old_root = if level != self.max_block_level as usize { - let block_at_depth_m_at_next_level = self - .block_at_depth(&*ghostdag_stores[level + 1], selected_tip_by_level[level + 1], self.pruning_proof_m) - .map_err(|err| format!("level + 1: {}, err: {}", level + 1, err)) - .unwrap(); - if self.reachability_service.is_dag_ancestor_of(block_at_depth_m_at_next_level, block_at_depth_2m) { - block_at_depth_m_at_next_level - } else if self.reachability_service.is_dag_ancestor_of(block_at_depth_2m, block_at_depth_m_at_next_level) { - block_at_depth_2m - } else { - self.find_common_ancestor_in_chain_of_a( - &*ghostdag_stores[level], - block_at_depth_m_at_next_level, - block_at_depth_2m, - ) - .map_err(|err| format!("level: {}, err: {}", level, err)) - .unwrap() - } - } else { - block_at_depth_2m - }; - - // new root is expected to be always an ancestor of old_root because new root takes a safety margin - assert!(self.reachability_service.is_dag_ancestor_of(root, old_root)); - - let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); - let mut queue = BinaryHeap::>::new(); - let mut visited = BlockHashSet::new(); - queue.push(Reverse(SortableBlock::new(root, self.headers_store.get_header(root).unwrap().blue_work))); - while let Some(current) = queue.pop() { - let current = current.0.hash; - if !visited.insert(current) { - continue; - } - - // The second condition is always expected to be true (ghostdag store will have the entry) - // because we are traversing the exact diamond (future(root) ⋂ past(tip)) for which we calculated - // GD for (see fill_level_proof_ghostdag_data). TODO (relaxed): remove the condition or turn into assertion - if !self.reachability_service.is_dag_ancestor_of(current, selected_tip) - || !ghostdag_stores[level].has(current).is_ok_and(|found| found) - { - continue; - } - - headers.push(self.headers_store.get_header(current).unwrap()); - for child in self.relations_stores.read()[level].get_children(current).unwrap().read().iter().copied() { - queue.push(Reverse(SortableBlock::new(child, self.headers_store.get_header(child).unwrap().blue_work))); - } - } - - // TODO (relaxed): remove the assertion below - // Temp assertion for verifying a bug fix: assert that the full 2M chain is actually contained in the composed level proof - let set = BlockHashSet::from_iter(headers.iter().map(|h| h.hash)); - let chain_2m = self - .chain_up_to_depth(&*ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) - .map_err(|err| { - dbg!(level, selected_tip, block_at_depth_2m, root); - format!("Assert 2M chain -- level: {}, err: {}", level, err) - }) - .unwrap(); - let chain_2m_len = chain_2m.len(); - for (i, chain_hash) in chain_2m.into_iter().enumerate() { - if !set.contains(&chain_hash) { - let next_level_tip = selected_tip_by_level[level + 1]; - let next_level_chain_m = - self.chain_up_to_depth(&*ghostdag_stores[level + 1], next_level_tip, self.pruning_proof_m).unwrap(); - let next_level_block_m = next_level_chain_m.last().copied().unwrap(); - dbg!(next_level_chain_m.len()); - dbg!(ghostdag_stores[level + 1].get_compact_data(next_level_tip).unwrap().blue_score); - dbg!(ghostdag_stores[level + 1].get_compact_data(next_level_block_m).unwrap().blue_score); - dbg!(ghostdag_stores[level].get_compact_data(selected_tip).unwrap().blue_score); - dbg!(ghostdag_stores[level].get_compact_data(block_at_depth_2m).unwrap().blue_score); - dbg!(level, selected_tip, block_at_depth_2m, root); - panic!("Assert 2M chain -- missing block {} at index {} out of {} chain blocks", chain_hash, i, chain_2m_len); - } - } - - headers - }) - .collect_vec() - } - - /// BFS forward iterates from root until selected tip, ignoring blocks in the antipast of selected_tip. - /// For each block along the way, insert that hash into the ghostdag_store - /// If we have a required_block to find, this will return true if that block was found along the way - fn fill_level_proof_ghostdag_data( - &self, - root: Hash, - selected_tip: Hash, - ghostdag_store: &Arc, - required_block: Option, - level: BlockLevel, - ) -> bool { - let relations_service = RelationsStoreInFutureOfRoot { - relations_store: self.level_relations_services[level as usize].clone(), - reachability_service: self.reachability_service.clone(), - root, - }; - let gd_manager = GhostdagManager::new( - root, - self.ghostdag_k, - ghostdag_store.clone(), - relations_service.clone(), - self.headers_store.clone(), - self.reachability_service.clone(), - level != 0, - ); - - ghostdag_store.insert(root, Arc::new(gd_manager.genesis_ghostdag_data())).unwrap(); - ghostdag_store.insert(ORIGIN, gd_manager.origin_ghostdag_data()).unwrap(); - - let mut topological_heap: BinaryHeap<_> = Default::default(); - let mut visited = BlockHashSet::new(); - for child in relations_service.get_children(root).unwrap().read().iter().copied() { - topological_heap.push(Reverse(SortableBlock { - hash: child, - // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology - blue_work: self.headers_store.get_header(child).unwrap().blue_work, - })); - } - - let mut has_required_block = required_block.is_some_and(|required_block| root == required_block); - loop { - let Some(current) = topological_heap.pop() else { - break; - }; - let current_hash = current.0.hash; - if !visited.insert(current_hash) { - continue; - } - - if !self.reachability_service.is_dag_ancestor_of(current_hash, selected_tip) { - // We don't care about blocks in the antipast of the selected tip - continue; - } - - if !has_required_block && required_block.is_some_and(|required_block| current_hash == required_block) { - has_required_block = true; - } - - let current_gd = gd_manager.ghostdag(&relations_service.get_parents(current_hash).unwrap()); - - ghostdag_store.insert(current_hash, Arc::new(current_gd)).unwrap_or_exists(); - - for child in relations_service.get_children(current_hash).unwrap().read().iter().copied() { - topological_heap.push(Reverse(SortableBlock { - hash: child, - // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology - blue_work: self.headers_store.get_header(child).unwrap().blue_work, - })); - } - } - - has_required_block - } - - /// Copy of `block_at_depth` which returns the full chain up to depth. Temporarily used for assertion purposes. - fn chain_up_to_depth( - &self, - ghostdag_store: &impl GhostdagStoreReader, - high: Hash, - depth: u64, - ) -> Result, PruningProofManagerInternalError> { - let high_gd = ghostdag_store - .get_compact_data(high) - .map_err(|err| PruningProofManagerInternalError::BlockAtDepth(format!("high: {high}, depth: {depth}, {err}")))?; - let mut current_gd = high_gd; - let mut current = high; - let mut res = vec![current]; - while current_gd.blue_score + depth >= high_gd.blue_score { - if current_gd.selected_parent.is_origin() { - break; - } - let prev = current; - current = current_gd.selected_parent; - res.push(current); - current_gd = ghostdag_store.get_compact_data(current).map_err(|err| { - PruningProofManagerInternalError::BlockAtDepth(format!( - "high: {}, depth: {}, current: {}, high blue score: {}, current blue score: {}, {}", - high, depth, prev, high_gd.blue_score, current_gd.blue_score, err - )) - })?; - } - Ok(res) - } - + // Used in build and validate fn block_at_depth( &self, ghostdag_store: &impl GhostdagStoreReader, @@ -1229,69 +241,6 @@ impl PruningProofManager { Ok(current) } - /// Finds the block on a given level that is at base_depth deep from it. - /// Also returns if the block was the last one in the level - /// base_depth = the blue score depth at level 0 - fn level_block_at_base_depth( - &self, - level: BlockLevel, - high: Hash, - base_depth: u64, - ) -> PruningProofManagerInternalResult<(Hash, bool)> { - let high_header = self - .headers_store - .get_header(high) - .map_err(|err| PruningProofManagerInternalError::BlockAtDepth(format!("high: {high}, depth: {base_depth}, {err}")))?; - let high_header_score = high_header.blue_score; - let mut current_header = high_header; - - let mut is_last_header = false; - - while current_header.blue_score + base_depth >= high_header_score { - if current_header.direct_parents().is_empty() { - break; - } - - current_header = match self.find_selected_parent_header_at_level(¤t_header, level) { - Ok(header) => header, - Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { - // We want to give this root a shot if all its past is pruned - is_last_header = true; - break; - } - Err(e) => return Err(e), - }; - } - Ok((current_header.hash, is_last_header)) - } - - fn find_common_ancestor_in_chain_of_a( - &self, - ghostdag_store: &impl GhostdagStoreReader, - a: Hash, - b: Hash, - ) -> Result { - let a_gd = ghostdag_store - .get_compact_data(a) - .map_err(|err| PruningProofManagerInternalError::FindCommonAncestor(format!("a: {a}, b: {b}, {err}")))?; - let mut current_gd = a_gd; - let mut current; - let mut loop_counter = 0; - loop { - current = current_gd.selected_parent; - loop_counter += 1; - if current.is_origin() { - break Err(PruningProofManagerInternalError::NoCommonAncestor(format!("a: {a}, b: {b} ({loop_counter} loop steps)"))); - } - if self.reachability_service.is_dag_ancestor_of(current, b) { - break Ok(current); - } - current_gd = ghostdag_store - .get_compact_data(current) - .map_err(|err| PruningProofManagerInternalError::FindCommonAncestor(format!("a: {a}, b: {b}, {err}")))?; - } - } - /// Returns the k + 1 chain blocks below this hash (inclusive). If data is missing /// the search is halted and a partial chain is returned. /// diff --git a/consensus/src/processes/pruning_proof/validate.rs b/consensus/src/processes/pruning_proof/validate.rs new file mode 100644 index 0000000000..63650cdc53 --- /dev/null +++ b/consensus/src/processes/pruning_proof/validate.rs @@ -0,0 +1,376 @@ +use std::{ + ops::DerefMut, + sync::{atomic::Ordering, Arc}, +}; + +use itertools::Itertools; +use kaspa_consensus_core::{ + blockhash::{BlockHashExtensions, BlockHashes, ORIGIN}, + errors::pruning::{PruningImportError, PruningImportResult}, + header::Header, + pruning::PruningPointProof, + BlockLevel, +}; +use kaspa_core::info; +use kaspa_database::prelude::{CachePolicy, ConnBuilder, StoreResultEmptyTuple, StoreResultExtensions}; +use kaspa_hashes::Hash; +use kaspa_math::int::SignedInteger; +use kaspa_pow::calc_block_level; +use kaspa_utils::vec::VecExtensions; +use parking_lot::lock_api::RwLock; +use rocksdb::WriteBatch; + +use crate::{ + model::{ + services::reachability::MTReachabilityService, + stores::{ + ghostdag::{CompactGhostdagData, DbGhostdagStore, GhostdagStore, GhostdagStoreReader}, + headers::{DbHeadersStore, HeaderStore, HeaderStoreReader}, + pruning::PruningStoreReader, + reachability::{DbReachabilityStore, ReachabilityStoreReader}, + relations::{DbRelationsStore, RelationsStoreReader}, + }, + }, + processes::{ghostdag::protocol::GhostdagManager, reachability::inquirer as reachability, relations::RelationsStoreExtensions}, +}; + +use super::{PruningProofManager, TempProofContext}; + +impl PruningProofManager { + pub fn validate_pruning_point_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> { + if proof.len() != self.max_block_level as usize + 1 { + return Err(PruningImportError::ProofNotEnoughLevels(self.max_block_level as usize + 1)); + } + + // Initialize the stores for the proof + let mut proof_stores_and_processes = self.init_validate_pruning_point_proof_stores_and_processes(proof)?; + let proof_pp_header = proof[0].last().expect("checked if empty"); + let proof_pp = proof_pp_header.hash; + let proof_pp_level = calc_block_level(proof_pp_header, self.max_block_level); + let proof_selected_tip_by_level = + self.populate_stores_for_validate_pruning_point_proof(proof, &mut proof_stores_and_processes, true)?; + let proof_ghostdag_stores = proof_stores_and_processes.ghostdag_stores; + + // Get the proof for the current consensus and recreate the stores for it + // This is expected to be fast because if a proof exists, it will be cached. + // If no proof exists, this is empty + let mut current_consensus_proof = self.get_pruning_point_proof(); + if current_consensus_proof.is_empty() { + // An empty proof can only happen if we're at genesis. We're going to create a proof for this case that contains the genesis header only + let genesis_header = self.headers_store.get_header(self.genesis_hash).unwrap(); + current_consensus_proof = Arc::new((0..=self.max_block_level).map(|_| vec![genesis_header.clone()]).collect_vec()); + } + let mut current_consensus_stores_and_processes = + self.init_validate_pruning_point_proof_stores_and_processes(¤t_consensus_proof)?; + let _ = self.populate_stores_for_validate_pruning_point_proof( + ¤t_consensus_proof, + &mut current_consensus_stores_and_processes, + false, + )?; + let current_consensus_ghostdag_stores = current_consensus_stores_and_processes.ghostdag_stores; + + let pruning_read = self.pruning_point_store.read(); + let relations_read = self.relations_stores.read(); + let current_pp = pruning_read.get().unwrap().pruning_point; + let current_pp_header = self.headers_store.get_header(current_pp).unwrap(); + + for (level_idx, selected_tip) in proof_selected_tip_by_level.iter().copied().enumerate() { + let level = level_idx as BlockLevel; + self.validate_proof_selected_tip(selected_tip, level, proof_pp_level, proof_pp, proof_pp_header)?; + + let proof_selected_tip_gd = proof_ghostdag_stores[level_idx].get_compact_data(selected_tip).unwrap(); + + // Next check is to see if this proof is "better" than what's in the current consensus + // Step 1 - look at only levels that have a full proof (least 2m blocks in the proof) + if proof_selected_tip_gd.blue_score < 2 * self.pruning_proof_m { + continue; + } + + // Step 2 - if we can find a common ancestor between the proof and current consensus + // we can determine if the proof is better. The proof is better if the blue work* difference between the + // old current consensus's tips and the common ancestor is less than the blue work difference between the + // proof's tip and the common ancestor. + // *Note: blue work is the same as blue score on levels higher than 0 + if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = self.find_proof_and_consensus_common_ancestor_ghostdag_data( + &proof_ghostdag_stores, + ¤t_consensus_ghostdag_stores, + selected_tip, + level, + proof_selected_tip_gd, + ) { + let selected_tip_blue_work_diff = + SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(proof_common_ancestor_gd.blue_work); + for parent in self.parents_manager.parents_at_level(¤t_pp_header, level).iter().copied() { + let parent_blue_work = current_consensus_ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); + let parent_blue_work_diff = + SignedInteger::from(parent_blue_work) - SignedInteger::from(common_ancestor_gd.blue_work); + if parent_blue_work_diff >= selected_tip_blue_work_diff { + return Err(PruningImportError::PruningProofInsufficientBlueWork); + } + } + + return Ok(()); + } + } + + if current_pp == self.genesis_hash { + // If the proof has better tips and the current pruning point is still + // genesis, we consider the proof state to be better. + return Ok(()); + } + + // If we got here it means there's no level with shared blocks + // between the proof and the current consensus. In this case we + // consider the proof to be better if it has at least one level + // with 2*self.pruning_proof_m blue blocks where consensus doesn't. + for level in (0..=self.max_block_level).rev() { + let level_idx = level as usize; + + let proof_selected_tip = proof_selected_tip_by_level[level_idx]; + let proof_selected_tip_gd = proof_ghostdag_stores[level_idx].get_compact_data(proof_selected_tip).unwrap(); + if proof_selected_tip_gd.blue_score < 2 * self.pruning_proof_m { + continue; + } + + match relations_read[level_idx].get_parents(current_pp).unwrap_option() { + Some(parents) => { + if parents.iter().copied().any(|parent| { + current_consensus_ghostdag_stores[level_idx].get_blue_score(parent).unwrap() < 2 * self.pruning_proof_m + }) { + return Ok(()); + } + } + None => { + // If the current pruning point doesn't have a parent at this level, we consider the proof state to be better. + return Ok(()); + } + } + } + + drop(pruning_read); + drop(relations_read); + drop(proof_stores_and_processes.db_lifetime); + drop(current_consensus_stores_and_processes.db_lifetime); + + Err(PruningImportError::PruningProofNotEnoughHeaders) + } + + fn init_validate_pruning_point_proof_stores_and_processes( + &self, + proof: &PruningPointProof, + ) -> PruningImportResult { + if proof[0].is_empty() { + return Err(PruningImportError::PruningProofNotEnoughHeaders); + } + + let headers_estimate = self.estimate_proof_unique_size(proof); + + let (db_lifetime, db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)); + let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); + let headers_store = + Arc::new(DbHeadersStore::new(db.clone(), CachePolicy::Count(headers_estimate), CachePolicy::Count(headers_estimate))); + let ghostdag_stores = (0..=self.max_block_level) + .map(|level| Arc::new(DbGhostdagStore::new(db.clone(), level, cache_policy, cache_policy))) + .collect_vec(); + let mut relations_stores = + (0..=self.max_block_level).map(|level| DbRelationsStore::new(db.clone(), level, cache_policy, cache_policy)).collect_vec(); + let reachability_stores = (0..=self.max_block_level) + .map(|level| Arc::new(RwLock::new(DbReachabilityStore::with_block_level(db.clone(), cache_policy, cache_policy, level)))) + .collect_vec(); + + let reachability_services = (0..=self.max_block_level) + .map(|level| MTReachabilityService::new(reachability_stores[level as usize].clone())) + .collect_vec(); + + let ghostdag_managers = ghostdag_stores + .iter() + .cloned() + .enumerate() + .map(|(level, ghostdag_store)| { + GhostdagManager::new( + self.genesis_hash, + self.ghostdag_k, + ghostdag_store, + relations_stores[level].clone(), + headers_store.clone(), + reachability_services[level].clone(), + level != 0, + ) + }) + .collect_vec(); + + { + let mut batch = WriteBatch::default(); + for level in 0..=self.max_block_level { + let level = level as usize; + reachability::init(reachability_stores[level].write().deref_mut()).unwrap(); + relations_stores[level].insert_batch(&mut batch, ORIGIN, BlockHashes::new(vec![])).unwrap(); + ghostdag_stores[level].insert(ORIGIN, ghostdag_managers[level].origin_ghostdag_data()).unwrap(); + } + + db.write(batch).unwrap(); + } + + Ok(TempProofContext { db_lifetime, headers_store, ghostdag_stores, relations_stores, reachability_stores, ghostdag_managers }) + } + + fn populate_stores_for_validate_pruning_point_proof( + &self, + proof: &PruningPointProof, + ctx: &mut TempProofContext, + log_validating: bool, + ) -> PruningImportResult> { + let headers_store = &ctx.headers_store; + let ghostdag_stores = &ctx.ghostdag_stores; + let mut relations_stores = ctx.relations_stores.clone(); + let reachability_stores = &ctx.reachability_stores; + let ghostdag_managers = &ctx.ghostdag_managers; + + let proof_pp_header = proof[0].last().expect("checked if empty"); + let proof_pp = proof_pp_header.hash; + + let mut selected_tip_by_level = vec![None; self.max_block_level as usize + 1]; + for level in (0..=self.max_block_level).rev() { + // Before processing this level, check if the process is exiting so we can end early + if self.is_consensus_exiting.load(Ordering::Relaxed) { + return Err(PruningImportError::PruningValidationInterrupted); + } + + if log_validating { + info!("Validating level {level} from the pruning point proof ({} headers)", proof[level as usize].len()); + } + let level_idx = level as usize; + let mut selected_tip = None; + for (i, header) in proof[level as usize].iter().enumerate() { + let header_level = calc_block_level(header, self.max_block_level); + if header_level < level { + return Err(PruningImportError::PruningProofWrongBlockLevel(header.hash, header_level, level)); + } + + headers_store.insert(header.hash, header.clone(), header_level).unwrap_or_exists(); + + let parents = self + .parents_manager + .parents_at_level(header, level) + .iter() + .copied() + .filter(|parent| ghostdag_stores[level_idx].has(*parent).unwrap()) + .collect_vec(); + + // Only the first block at each level is allowed to have no known parents + if parents.is_empty() && i != 0 { + return Err(PruningImportError::PruningProofHeaderWithNoKnownParents(header.hash, level)); + } + + let parents: BlockHashes = parents.push_if_empty(ORIGIN).into(); + + if relations_stores[level_idx].has(header.hash).unwrap() { + return Err(PruningImportError::PruningProofDuplicateHeaderAtLevel(header.hash, level)); + } + + relations_stores[level_idx].insert(header.hash, parents.clone()).unwrap(); + let ghostdag_data = Arc::new(ghostdag_managers[level_idx].ghostdag(&parents)); + ghostdag_stores[level_idx].insert(header.hash, ghostdag_data.clone()).unwrap(); + selected_tip = Some(match selected_tip { + Some(tip) => ghostdag_managers[level_idx].find_selected_parent([tip, header.hash]), + None => header.hash, + }); + + let mut reachability_mergeset = { + let reachability_read = reachability_stores[level_idx].read(); + ghostdag_data + .unordered_mergeset_without_selected_parent() + .filter(|hash| reachability_read.has(*hash).unwrap()) + .collect_vec() // We collect to vector so reachability_read can be released and let `reachability::add_block` use a write lock. + .into_iter() + }; + reachability::add_block( + reachability_stores[level_idx].write().deref_mut(), + header.hash, + ghostdag_data.selected_parent, + &mut reachability_mergeset, + ) + .unwrap(); + + if selected_tip.unwrap() == header.hash { + reachability::hint_virtual_selected_parent(reachability_stores[level_idx].write().deref_mut(), header.hash) + .unwrap(); + } + } + + if level < self.max_block_level { + let block_at_depth_m_at_next_level = self + .block_at_depth( + &*ghostdag_stores[level_idx + 1], + selected_tip_by_level[level_idx + 1].unwrap(), + self.pruning_proof_m, + ) + .unwrap(); + if !relations_stores[level_idx].has(block_at_depth_m_at_next_level).unwrap() { + return Err(PruningImportError::PruningProofMissingBlockAtDepthMFromNextLevel(level, level + 1)); + } + } + + if selected_tip.unwrap() != proof_pp + && !self.parents_manager.parents_at_level(proof_pp_header, level).contains(&selected_tip.unwrap()) + { + return Err(PruningImportError::PruningProofMissesBlocksBelowPruningPoint(selected_tip.unwrap(), level)); + } + + selected_tip_by_level[level_idx] = selected_tip; + } + + Ok(selected_tip_by_level.into_iter().map(|selected_tip| selected_tip.unwrap()).collect()) + } + + fn validate_proof_selected_tip( + &self, + proof_selected_tip: Hash, + level: BlockLevel, + proof_pp_level: BlockLevel, + proof_pp: Hash, + proof_pp_header: &Header, + ) -> PruningImportResult<()> { + // A proof selected tip of some level has to be the proof suggested prunint point itself if its level + // is lower or equal to the pruning point level, or a parent of the pruning point on the relevant level + // otherwise. + if level <= proof_pp_level { + if proof_selected_tip != proof_pp { + return Err(PruningImportError::PruningProofSelectedTipIsNotThePruningPoint(proof_selected_tip, level)); + } + } else if !self.parents_manager.parents_at_level(proof_pp_header, level).contains(&proof_selected_tip) { + return Err(PruningImportError::PruningProofSelectedTipNotParentOfPruningPoint(proof_selected_tip, level)); + } + + Ok(()) + } + + // find_proof_and_consensus_common_chain_ancestor_ghostdag_data returns an option of a tuple + // that contains the ghostdag data of the proof and current consensus common ancestor. If no + // such ancestor exists, it returns None. + fn find_proof_and_consensus_common_ancestor_ghostdag_data( + &self, + proof_ghostdag_stores: &[Arc], + current_consensus_ghostdag_stores: &[Arc], + proof_selected_tip: Hash, + level: BlockLevel, + proof_selected_tip_gd: CompactGhostdagData, + ) -> Option<(CompactGhostdagData, CompactGhostdagData)> { + let mut proof_current = proof_selected_tip; + let mut proof_current_gd = proof_selected_tip_gd; + loop { + match current_consensus_ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap_option() { + Some(current_gd) => { + break Some((proof_current_gd, current_gd)); + } + None => { + proof_current = proof_current_gd.selected_parent; + if proof_current.is_origin() { + break None; + } + proof_current_gd = proof_ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap(); + } + }; + } + } +} From b516ac645c43bc7dbf70ea9ed17bc0c3d482ef47 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Sun, 10 Nov 2024 06:39:46 -0700 Subject: [PATCH 22/31] Pruning proof minor improvements (#590) * Check pow for headers in level proof * Implement comparable level work * Implement fairer pruning proof comparison * prefer having the GD manager compose the level target, so that 1. level_work is always used 2. level zero can be explicitly set to 0 by the manager itself (being consensus sensitive code) * 1. no need to init origin here 2. comments about blue work are obvious * use saturating ops and avoid SignedInteger all together * Comment on level_work * Move MAX_WORK_LEVEL close to BlueWorkType and explain * Refactor block level calc from pow to a function --------- Co-authored-by: Michael Sutton --- consensus/core/src/api/mod.rs | 4 +- consensus/core/src/errors/pruning.rs | 3 + consensus/core/src/lib.rs | 4 ++ consensus/core/src/pruning.rs | 13 ++++ consensus/pow/src/lib.rs | 14 +++- consensus/src/consensus/mod.rs | 10 ++- consensus/src/consensus/services.rs | 1 - .../pre_ghostdag_validation.rs | 5 +- consensus/src/processes/difficulty.rs | 64 ++++++++++++++++++- consensus/src/processes/ghostdag/protocol.rs | 64 ++++++++++++++----- .../src/processes/pruning_proof/apply.rs | 2 +- .../src/processes/pruning_proof/build.rs | 23 +++---- .../src/processes/pruning_proof/validate.rs | 40 ++++++++---- math/src/uint.rs | 25 ++++++++ protocol/flows/src/v5/ibd/flow.rs | 13 ++-- 15 files changed, 223 insertions(+), 62 deletions(-) diff --git a/consensus/core/src/api/mod.rs b/consensus/core/src/api/mod.rs index 365b8404c1..7c244b9148 100644 --- a/consensus/core/src/api/mod.rs +++ b/consensus/core/src/api/mod.rs @@ -17,7 +17,7 @@ use crate::{ tx::TxResult, }, header::Header, - pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList}, + pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList, PruningProofMetadata}, trusted::{ExternalGhostdagData, TrustedBlock}, tx::{MutableTransaction, Transaction, TransactionOutpoint, UtxoEntry}, BlockHashSet, BlueWorkType, ChainPath, @@ -203,7 +203,7 @@ pub trait ConsensusApi: Send + Sync { unimplemented!() } - fn validate_pruning_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> { + fn validate_pruning_proof(&self, proof: &PruningPointProof, proof_metadata: &PruningProofMetadata) -> PruningImportResult<()> { unimplemented!() } diff --git a/consensus/core/src/errors/pruning.rs b/consensus/core/src/errors/pruning.rs index d61437f876..a9686e023a 100644 --- a/consensus/core/src/errors/pruning.rs +++ b/consensus/core/src/errors/pruning.rs @@ -59,6 +59,9 @@ pub enum PruningImportError { #[error("process exit was initiated while validating pruning point proof")] PruningValidationInterrupted, + + #[error("block {0} at level {1} has invalid proof of work for level")] + ProofOfWorkFailed(Hash, BlockLevel), } pub type PruningImportResult = std::result::Result; diff --git a/consensus/core/src/lib.rs b/consensus/core/src/lib.rs index 188b2403b4..e4591f2181 100644 --- a/consensus/core/src/lib.rs +++ b/consensus/core/src/lib.rs @@ -41,6 +41,10 @@ pub mod utxo; /// overall blocks, so 2^192 is definitely a justified upper-bound. pub type BlueWorkType = kaspa_math::Uint192; +/// The extends directly from the expectation above about having no more than +/// 2^128 work in a single block +pub const MAX_WORK_LEVEL: BlockLevel = 128; + /// The type used to represent the GHOSTDAG K parameter pub type KType = u16; diff --git a/consensus/core/src/pruning.rs b/consensus/core/src/pruning.rs index d998903897..aa029b40e1 100644 --- a/consensus/core/src/pruning.rs +++ b/consensus/core/src/pruning.rs @@ -1,6 +1,7 @@ use crate::{ header::Header, trusted::{TrustedGhostdagData, TrustedHeader}, + BlueWorkType, }; use kaspa_hashes::Hash; use std::sync::Arc; @@ -19,3 +20,15 @@ pub struct PruningPointTrustedData { /// Union of GHOSTDAG data required to verify blocks in the future of the pruning point pub ghostdag_blocks: Vec, } + +#[derive(Clone, Copy)] +pub struct PruningProofMetadata { + /// The claimed work of the initial relay block (from the prover) + pub relay_block_blue_work: BlueWorkType, +} + +impl PruningProofMetadata { + pub fn new(relay_block_blue_work: BlueWorkType) -> Self { + Self { relay_block_blue_work } + } +} diff --git a/consensus/pow/src/lib.rs b/consensus/pow/src/lib.rs index c3fbdd867e..1eaa1a2ce1 100644 --- a/consensus/pow/src/lib.rs +++ b/consensus/pow/src/lib.rs @@ -54,12 +54,22 @@ impl State { } pub fn calc_block_level(header: &Header, max_block_level: BlockLevel) -> BlockLevel { + let (block_level, _) = calc_block_level_check_pow(header, max_block_level); + block_level +} + +pub fn calc_block_level_check_pow(header: &Header, max_block_level: BlockLevel) -> (BlockLevel, bool) { if header.parents_by_level.is_empty() { - return max_block_level; // Genesis has the max block level + return (max_block_level, true); // Genesis has the max block level } let state = State::new(header); - let (_, pow) = state.check_pow(header.nonce); + let (passed, pow) = state.check_pow(header.nonce); + let block_level = calc_level_from_pow(pow, max_block_level); + (block_level, passed) +} + +pub fn calc_level_from_pow(pow: Uint256, max_block_level: BlockLevel) -> BlockLevel { let signed_block_level = max_block_level as i64 - pow.bits() as i64; max(signed_block_level, 0) as BlockLevel } diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index a47b4218fc..b3edd55ca4 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -64,7 +64,7 @@ use kaspa_consensus_core::{ merkle::calc_hash_merkle_root, muhash::MuHashExtensions, network::NetworkType, - pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList}, + pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList, PruningProofMetadata}, trusted::{ExternalGhostdagData, TrustedBlock}, tx::{MutableTransaction, Transaction, TransactionOutpoint, UtxoEntry}, BlockHashSet, BlueWorkType, ChainPath, HashMapCustomHasher, @@ -757,8 +757,12 @@ impl ConsensusApi for Consensus { calc_hash_merkle_root(txs.iter(), storage_mass_activated) } - fn validate_pruning_proof(&self, proof: &PruningPointProof) -> Result<(), PruningImportError> { - self.services.pruning_proof_manager.validate_pruning_point_proof(proof) + fn validate_pruning_proof( + &self, + proof: &PruningPointProof, + proof_metadata: &PruningProofMetadata, + ) -> Result<(), PruningImportError> { + self.services.pruning_proof_manager.validate_pruning_point_proof(proof, proof_metadata) } fn apply_pruning_proof(&self, proof: PruningPointProof, trusted_set: &[TrustedBlock]) -> PruningImportResult<()> { diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index 608ae61b49..69eb936582 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -118,7 +118,6 @@ impl ConsensusServices { relations_services[0].clone(), storage.headers_store.clone(), reachability_service.clone(), - false, ); let coinbase_manager = CoinbaseManager::new( diff --git a/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs b/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs index 47094ed7f3..cce6411054 100644 --- a/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs +++ b/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs @@ -9,7 +9,7 @@ use kaspa_consensus_core::header::Header; use kaspa_consensus_core::BlockLevel; use kaspa_core::time::unix_now; use kaspa_database::prelude::StoreResultExtensions; -use std::cmp::max; +use kaspa_pow::calc_level_from_pow; impl HeaderProcessor { /// Validates the header in isolation including pow check against header declared bits. @@ -102,8 +102,7 @@ impl HeaderProcessor { let state = kaspa_pow::State::new(header); let (passed, pow) = state.check_pow(header.nonce); if passed || self.skip_proof_of_work { - let signed_block_level = self.max_block_level as i64 - pow.bits() as i64; - Ok(max(signed_block_level, 0) as BlockLevel) + Ok(calc_level_from_pow(pow, self.max_block_level)) } else { Err(RuleError::InvalidPoW) } diff --git a/consensus/src/processes/difficulty.rs b/consensus/src/processes/difficulty.rs index f02efdb551..a27da68a25 100644 --- a/consensus/src/processes/difficulty.rs +++ b/consensus/src/processes/difficulty.rs @@ -6,7 +6,7 @@ use crate::model::stores::{ use kaspa_consensus_core::{ config::params::MIN_DIFFICULTY_WINDOW_LEN, errors::difficulty::{DifficultyError, DifficultyResult}, - BlockHashSet, BlueWorkType, + BlockHashSet, BlueWorkType, MAX_WORK_LEVEL, }; use kaspa_math::{Uint256, Uint320}; use std::{ @@ -282,6 +282,16 @@ pub fn calc_work(bits: u32) -> BlueWorkType { res.try_into().expect("Work should not exceed 2**192") } +pub fn level_work(level: u8, max_block_level: u8) -> BlueWorkType { + // Need to make a special condition for level 0 to ensure true work is always used + if level == 0 { + return 0.into(); + } + // We use 256 here so the result corresponds to the work at the level from calc_level_from_pow + let exp = (level as u32) + 256 - (max_block_level as u32); + BlueWorkType::from_u64(1) << exp.min(MAX_WORK_LEVEL as u32) +} + #[derive(Eq)] struct DifficultyBlock { timestamp: u64, @@ -307,3 +317,55 @@ impl Ord for DifficultyBlock { self.timestamp.cmp(&other.timestamp).then_with(|| self.sortable_block.cmp(&other.sortable_block)) } } + +#[cfg(test)] +mod tests { + use kaspa_consensus_core::{BlockLevel, BlueWorkType, MAX_WORK_LEVEL}; + use kaspa_math::{Uint256, Uint320}; + use kaspa_pow::calc_level_from_pow; + + use crate::processes::difficulty::{calc_work, level_work}; + use kaspa_utils::hex::ToHex; + + #[test] + fn test_target_levels() { + let max_block_level: BlockLevel = 225; + for level in 1..=max_block_level { + // required pow for level + let level_target = (Uint320::from_u64(1) << (max_block_level - level).max(MAX_WORK_LEVEL) as u32) - Uint320::from_u64(1); + let level_target = Uint256::from_be_bytes(level_target.to_be_bytes()[8..40].try_into().unwrap()); + let calculated_level = calc_level_from_pow(level_target, max_block_level); + + let true_level_work = calc_work(level_target.compact_target_bits()); + let calc_level_work = level_work(level, max_block_level); + + // A "good enough" estimate of level work is within 1% diff from work with actual level target + // It's hard to calculate percentages with these large numbers, so to get around using floats + // we multiply the difference by 100. if the result is <= the calc_level_work it means + // difference must have been less than 1% + let (percent_diff, overflowed) = (true_level_work - calc_level_work).overflowing_mul(BlueWorkType::from_u64(100)); + let is_good_enough = percent_diff <= calc_level_work; + + println!("Level {}:", level); + println!( + " data | {} | {} | {} / {} |", + level_target.compact_target_bits(), + level_target.bits(), + calculated_level, + max_block_level + ); + println!(" pow | {}", level_target.to_hex()); + println!(" work | 0000000000000000{}", true_level_work.to_hex()); + println!(" lvwork | 0000000000000000{}", calc_level_work.to_hex()); + println!(" diff<1% | {}", !overflowed && (is_good_enough)); + + assert!(is_good_enough); + } + } + + #[test] + fn test_base_level_work() { + // Expect that at level 0, the level work is always 0 + assert_eq!(BlueWorkType::from(0), level_work(0, 255)); + } +} diff --git a/consensus/src/processes/ghostdag/protocol.rs b/consensus/src/processes/ghostdag/protocol.rs index 997c4eecb5..1032868ee0 100644 --- a/consensus/src/processes/ghostdag/protocol.rs +++ b/consensus/src/processes/ghostdag/protocol.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use kaspa_consensus_core::{ blockhash::{self, BlockHashExtensions, BlockHashes}, - BlockHashMap, BlueWorkType, HashMapCustomHasher, + BlockHashMap, BlockLevel, BlueWorkType, HashMapCustomHasher, }; use kaspa_hashes::Hash; use kaspa_utils::refs::Refs; @@ -16,7 +16,7 @@ use crate::{ relations::RelationsStoreReader, }, }, - processes::difficulty::calc_work, + processes::difficulty::{calc_work, level_work}, }; use super::ordering::*; @@ -29,7 +29,15 @@ pub struct GhostdagManager, pub(super) reachability_service: U, - use_score_as_work: bool, + + /// Level work is a lower-bound for the amount of work represented by each block. + /// When running GD for higher-level sub-DAGs, this value should be set accordingly + /// to the work represented by that level, and then used as a lower bound + /// for the work calculated from header bits (which depends on current difficulty). + /// For instance, assuming level 80 (i.e., pow hash has at least 80 zeros) is always + /// above the difficulty target, all blocks in it should represent the same amount of + /// work regardless of whether current difficulty requires 20 zeros or 25 zeros. + level_work: BlueWorkType, } impl GhostdagManager { @@ -40,9 +48,30 @@ impl, reachability_service: U, - use_score_as_work: bool, ) -> Self { - Self { genesis_hash, k, ghostdag_store, relations_store, reachability_service, headers_store, use_score_as_work } + // For ordinary GD, always keep level_work=0 so the lower bound is ineffective + Self { genesis_hash, k, ghostdag_store, relations_store, reachability_service, headers_store, level_work: 0.into() } + } + + pub fn with_level( + genesis_hash: Hash, + k: KType, + ghostdag_store: Arc, + relations_store: S, + headers_store: Arc, + reachability_service: U, + level: BlockLevel, + max_block_level: BlockLevel, + ) -> Self { + Self { + genesis_hash, + k, + ghostdag_store, + relations_store, + reachability_service, + headers_store, + level_work: level_work(level, max_block_level), + } } pub fn genesis_ghostdag_data(&self) -> GhostdagData { @@ -115,20 +144,21 @@ impl = Default::default(); let mut visited = BlockHashSet::new(); for child in relations_service.get_children(root).unwrap().read().iter().copied() { - topological_heap.push(Reverse(SortableBlock { - hash: child, - // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology - blue_work: self.headers_store.get_header(child).unwrap().blue_work, - })); + topological_heap + .push(Reverse(SortableBlock { hash: child, blue_work: self.headers_store.get_header(child).unwrap().blue_work })); } let mut has_required_block = required_block.is_some_and(|required_block| root == required_block); @@ -378,11 +376,8 @@ impl PruningProofManager { ghostdag_store.insert(current_hash, Arc::new(current_gd)).unwrap_or_exists(); for child in relations_service.get_children(current_hash).unwrap().read().iter().copied() { - topological_heap.push(Reverse(SortableBlock { - hash: child, - // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology - blue_work: self.headers_store.get_header(child).unwrap().blue_work, - })); + topological_heap + .push(Reverse(SortableBlock { hash: child, blue_work: self.headers_store.get_header(child).unwrap().blue_work })); } } diff --git a/consensus/src/processes/pruning_proof/validate.rs b/consensus/src/processes/pruning_proof/validate.rs index 63650cdc53..3262b65901 100644 --- a/consensus/src/processes/pruning_proof/validate.rs +++ b/consensus/src/processes/pruning_proof/validate.rs @@ -8,14 +8,13 @@ use kaspa_consensus_core::{ blockhash::{BlockHashExtensions, BlockHashes, ORIGIN}, errors::pruning::{PruningImportError, PruningImportResult}, header::Header, - pruning::PruningPointProof, + pruning::{PruningPointProof, PruningProofMetadata}, BlockLevel, }; use kaspa_core::info; use kaspa_database::prelude::{CachePolicy, ConnBuilder, StoreResultEmptyTuple, StoreResultExtensions}; use kaspa_hashes::Hash; -use kaspa_math::int::SignedInteger; -use kaspa_pow::calc_block_level; +use kaspa_pow::{calc_block_level, calc_block_level_check_pow}; use kaspa_utils::vec::VecExtensions; use parking_lot::lock_api::RwLock; use rocksdb::WriteBatch; @@ -26,6 +25,7 @@ use crate::{ stores::{ ghostdag::{CompactGhostdagData, DbGhostdagStore, GhostdagStore, GhostdagStoreReader}, headers::{DbHeadersStore, HeaderStore, HeaderStoreReader}, + headers_selected_tip::HeadersSelectedTipStoreReader, pruning::PruningStoreReader, reachability::{DbReachabilityStore, ReachabilityStoreReader}, relations::{DbRelationsStore, RelationsStoreReader}, @@ -37,7 +37,11 @@ use crate::{ use super::{PruningProofManager, TempProofContext}; impl PruningProofManager { - pub fn validate_pruning_point_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> { + pub fn validate_pruning_point_proof( + &self, + proof: &PruningPointProof, + proof_metadata: &PruningProofMetadata, + ) -> PruningImportResult<()> { if proof.len() != self.max_block_level as usize + 1 { return Err(PruningImportError::ProofNotEnoughLevels(self.max_block_level as usize + 1)); } @@ -74,6 +78,13 @@ impl PruningProofManager { let current_pp = pruning_read.get().unwrap().pruning_point; let current_pp_header = self.headers_store.get_header(current_pp).unwrap(); + // The accumulated blue work of current consensus from the pruning point onward + let pruning_period_work = + self.headers_selected_tip_store.read().get().unwrap().blue_work.saturating_sub(current_pp_header.blue_work); + // The claimed blue work of the prover from his pruning point and up to the triggering relay block. This work + // will eventually be verified if the proof is accepted so we can treat it as trusted + let prover_claimed_pruning_period_work = proof_metadata.relay_block_blue_work.saturating_sub(proof_pp_header.blue_work); + for (level_idx, selected_tip) in proof_selected_tip_by_level.iter().copied().enumerate() { let level = level_idx as BlockLevel; self.validate_proof_selected_tip(selected_tip, level, proof_pp_level, proof_pp, proof_pp_header)?; @@ -90,7 +101,6 @@ impl PruningProofManager { // we can determine if the proof is better. The proof is better if the blue work* difference between the // old current consensus's tips and the common ancestor is less than the blue work difference between the // proof's tip and the common ancestor. - // *Note: blue work is the same as blue score on levels higher than 0 if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = self.find_proof_and_consensus_common_ancestor_ghostdag_data( &proof_ghostdag_stores, ¤t_consensus_ghostdag_stores, @@ -98,13 +108,13 @@ impl PruningProofManager { level, proof_selected_tip_gd, ) { - let selected_tip_blue_work_diff = - SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(proof_common_ancestor_gd.blue_work); + let proof_level_blue_work_diff = proof_selected_tip_gd.blue_work.saturating_sub(proof_common_ancestor_gd.blue_work); for parent in self.parents_manager.parents_at_level(¤t_pp_header, level).iter().copied() { let parent_blue_work = current_consensus_ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); - let parent_blue_work_diff = - SignedInteger::from(parent_blue_work) - SignedInteger::from(common_ancestor_gd.blue_work); - if parent_blue_work_diff >= selected_tip_blue_work_diff { + let parent_blue_work_diff = parent_blue_work.saturating_sub(common_ancestor_gd.blue_work); + if parent_blue_work_diff.saturating_add(pruning_period_work) + >= proof_level_blue_work_diff.saturating_add(prover_claimed_pruning_period_work) + { return Err(PruningImportError::PruningProofInsufficientBlueWork); } } @@ -187,14 +197,15 @@ impl PruningProofManager { .cloned() .enumerate() .map(|(level, ghostdag_store)| { - GhostdagManager::new( + GhostdagManager::with_level( self.genesis_hash, self.ghostdag_k, ghostdag_store, relations_stores[level].clone(), headers_store.clone(), reachability_services[level].clone(), - level != 0, + level as BlockLevel, + self.max_block_level, ) }) .collect_vec(); @@ -242,10 +253,13 @@ impl PruningProofManager { let level_idx = level as usize; let mut selected_tip = None; for (i, header) in proof[level as usize].iter().enumerate() { - let header_level = calc_block_level(header, self.max_block_level); + let (header_level, pow_passes) = calc_block_level_check_pow(header, self.max_block_level); if header_level < level { return Err(PruningImportError::PruningProofWrongBlockLevel(header.hash, header_level, level)); } + if !pow_passes { + return Err(PruningImportError::ProofOfWorkFailed(header.hash, level)); + } headers_store.insert(header.hash, header.clone(), header_level).unwrap_or_exists(); diff --git a/math/src/uint.rs b/math/src/uint.rs index 4ecc1fe122..095d595597 100644 --- a/math/src/uint.rs +++ b/math/src/uint.rs @@ -158,6 +158,18 @@ macro_rules! construct_uint { (self, carry) } + #[inline] + pub fn saturating_sub(self, other: Self) -> Self { + let (sum, carry) = self.overflowing_sub(other); + if carry { Self::ZERO } else { sum } + } + + #[inline] + pub fn saturating_add(self, other: Self) -> Self { + let (sum, carry) = self.overflowing_add(other); + if carry { Self::MAX } else { sum } + } + /// Multiplication by u64 #[inline] pub fn overflowing_mul_u64(self, other: u64) -> (Self, bool) { @@ -1150,6 +1162,19 @@ mod tests { } } + #[test] + fn test_saturating_ops() { + let u1 = Uint128::from_u128(u128::MAX); + let u2 = Uint128::from_u64(u64::MAX); + // Sub + assert_eq!(u1.saturating_sub(u2), Uint128::from_u128(u128::MAX - u64::MAX as u128)); + assert_eq!(u1.saturating_sub(u2).as_u128(), u128::MAX - u64::MAX as u128); + assert_eq!(u2.saturating_sub(u1), Uint128::ZERO); + // Add + assert_eq!(u1.saturating_add(Uint128::from_u64(1)), Uint128::MAX); + assert_eq!(u2.saturating_add(Uint128::from_u64(1)), Uint128::from_u128(u64::MAX as u128 + 1)); + } + #[test] fn test_mod_inv() { use core::cmp::Ordering; diff --git a/protocol/flows/src/v5/ibd/flow.rs b/protocol/flows/src/v5/ibd/flow.rs index 1e38deddd1..0dd7fe64f1 100644 --- a/protocol/flows/src/v5/ibd/flow.rs +++ b/protocol/flows/src/v5/ibd/flow.rs @@ -10,7 +10,7 @@ use kaspa_consensus_core::{ api::BlockValidationFuture, block::Block, header::Header, - pruning::{PruningPointProof, PruningPointsList}, + pruning::{PruningPointProof, PruningPointsList, PruningProofMetadata}, BlockHashSet, }; use kaspa_consensusmanager::{spawn_blocking, ConsensusProxy, StagingConsensus}; @@ -218,7 +218,7 @@ impl IbdFlow { let staging_session = staging.session().await; - let pruning_point = self.sync_and_validate_pruning_proof(&staging_session).await?; + let pruning_point = self.sync_and_validate_pruning_proof(&staging_session, relay_block).await?; self.sync_headers(&staging_session, syncer_virtual_selected_parent, pruning_point, relay_block).await?; staging_session.async_validate_pruning_points().await?; self.validate_staging_timestamps(&self.ctx.consensus().session().await, &staging_session).await?; @@ -226,7 +226,7 @@ impl IbdFlow { Ok(()) } - async fn sync_and_validate_pruning_proof(&mut self, staging: &ConsensusProxy) -> Result { + async fn sync_and_validate_pruning_proof(&mut self, staging: &ConsensusProxy, relay_block: &Block) -> Result { self.router.enqueue(make_message!(Payload::RequestPruningPointProof, RequestPruningPointProofMessage {})).await?; // Pruning proof generation and communication might take several minutes, so we allow a long 10 minute timeout @@ -234,11 +234,14 @@ impl IbdFlow { let proof: PruningPointProof = msg.try_into()?; debug!("received proof with overall {} headers", proof.iter().map(|l| l.len()).sum::()); + let proof_metadata = PruningProofMetadata::new(relay_block.header.blue_work); + // Get a new session for current consensus (non staging) let consensus = self.ctx.consensus().session().await; // The proof is validated in the context of current consensus - let proof = consensus.clone().spawn_blocking(move |c| c.validate_pruning_proof(&proof).map(|()| proof)).await?; + let proof = + consensus.clone().spawn_blocking(move |c| c.validate_pruning_proof(&proof, &proof_metadata).map(|()| proof)).await?; let proof_pruning_point = proof[0].last().expect("was just ensured by validation").hash; @@ -316,7 +319,7 @@ impl IbdFlow { if mismatch_detected { info!("Validating the locally built proof (sanity test fallback #2)"); // Note: the proof is validated in the context of *current* consensus - if let Err(err) = con.validate_pruning_proof(&built_proof) { + if let Err(err) = con.validate_pruning_proof(&built_proof, &proof_metadata) { panic!("Locally built proof failed validation: {}", err); } info!("Locally built proof was validated successfully"); From 8b3ed0708a0b5ab2f10b921397c50d6524d4fd61 Mon Sep 17 00:00:00 2001 From: Maxim <59533214+biryukovmaxim@users.noreply.github.com> Date: Tue, 12 Nov 2024 14:10:08 +0400 Subject: [PATCH 23/31] Add KIP-10 Transaction Introspection Opcodes, 8-byte arithmetic and Hard Fork Support (#487) * implement new opcodes * example of mutual tx * add docs describing scenario * introduce feature gate for new features * introduce hf feature that enables txscript hf feature * style: fmt and clippy fix * implement new opcodes * example of mutual tx * add docs describing scenario * introduce feature gate for new features * style: fmt and clippy fix * remove unused feature * fmt * make opcode invalid in case of feature disabled * feature gate test * change test set based on feature add ci cd test * rename InputSPK -> InputSpk * enable kip10 opcodes based on daa_score in runtime * use dummy kip10 activation daa score in params * use dummy kip10 activation daa score in params * suppress clippy lint * add example with shared key * fix clippy * remove useless check from example * add one-time borrowing example * Implement one-time and two-times threshold borrowing scenarios - Add threshold_scenario_limited_one_time function - Add threshold_scenario_limited_2_times function - Create generate_limited_time_script for reusable script generation - Implement nested script structure for two-times borrowing - Update documentation for both scenarios - Add tests for owner spending, borrowing, and invalid attempts in both cases - Ensure consistent error handling and logging across scenarios - Refactor to use more generic script generation approach * fix: fix incorrect sig-op count * correct error description * style: fmt * pass kip-10 flag in constructor params * remove borrow scenario from tests. run tests against both kip1- enabled/disabled engine * introduce method that converts spk to bytes. add tests covering new opcodes * return comment describing where invalid opcodes starts from. add comments describing why 2 files are used. * fix wring error messages * support introspection by index * test input spk * test output spk * tests refactor * support 8-byte arithmetics * Standartize fork activation logic (#588) * Use ForkActivation for all fork activations * Avoid using negation in some ifs * Add is_within_range_from_activation * Move 'is always' check inside is_within_range_from_activation * lints * Refactoring for cleaner pruning proof module (#589) * Cleanup manual block level calc There were two areas in pruning proof mod that manually calculated block level. This replaces those with a call to calc_block_level * Refactor pruning proof build functions * Refactor apply pruning proof functions * Refactor validate pruning functions * Add comments for clarity * only enable 8 byte arithmetics for kip10 * use i64 value in 9-byte tests * fix tests covering kip10 and i64 deserialization * fix test according to 8-byte math * finish test covering kip10 opcodes: input/output/amount/spk * fix kip10 examples * rename test * feat: add input index op * feat: add input/outpiut opcodes * reseve opcodes reorder kip10 opcodes. reflect script tests * fix example * introspection opcodes are reserverd, not disables * use ForkActivation type * cicd: run kip-10 example * move spk encoding to txscript module * rework bound check ot input/output index * fix tests by importing spkencoding trait * replace todo in descripotions of over[under]flow errors * reorder new opcodes, reserve script sig opcode, remove txid * fix bitcoin script tests * add simple opcode tests * rename id(which represents input index) to idx * fix comments * add input spk tests * refactor test cases * refactor(txscript): Enforce input index invariant via assertion Change TxScriptEngine::from_transaction_input to assert valid input index instead of returning Result. This better reflects that an invalid index is a caller's (transaction validation) error rather than a script engine error, since the input must be part of the transaction being validated. An invalid index signifies a mismatch between the transaction and the input being validated - this is a programming error in the transaction validator layer, not a script engine concern. The script engine should be able to assume it receives valid inputs from its caller. The change simplifies error handling by enforcing this invariant early, while maintaining identical behavior for valid inputs. The function is now documented to panic on malformed inputs. This is a breaking change for code that previously handled InvalidIndex errors, though such handling was likely incorrect as it indicated an inconsistency in transaction validation. * refactor error types to contain correct info * rename id to idx * rename opcode * make construction of TxScriptEngine from transaction input infallible * style: format combinators chain * add integration test covering activation of kip10 * rename kip10_activation_daa_score to kip10_activation * Update crypto/txscript/src/lib.rs refactor vector filling * rework assert * verify that block is disqualified in case of it has tx which requires block that contains the tx with kip10 opcode is accepted after daa score has being reached * revert changer to infallible api * add doc comments * Update crypto/txscript/src/opcodes/mod.rs Fallible conversion of output amount (usize -> i64) * Update crypto/txscript/src/opcodes/mod.rs Fallible conversion of input amount (usize -> i64) * add required import * refactor: SigHashReusedValuesUnsync doesnt neet to be mutable * fix test description * rework example * 9 byte integers must fail to serialize * add todo * rewrite todo * remove redundant code * remove redundant mut in example * remove redundant mut in example * remove redundant mut in example * cicd: apply lint to examples --------- Co-authored-by: Ori Newman --- .github/workflows/ci.yaml | 5 +- consensus/benches/check_scripts.rs | 7 +- consensus/client/src/signing.rs | 2 +- consensus/core/src/config/params.rs | 18 +- consensus/core/src/tx.rs | 14 + consensus/src/consensus/services.rs | 1 + consensus/src/consensus/test_consensus.rs | 26 +- .../processes/transaction_validator/mod.rs | 6 + .../transaction_validator_populated.rs | 72 +- crypto/txscript/Cargo.toml | 3 + crypto/txscript/errors/src/lib.rs | 14 +- crypto/txscript/examples/kip-10.rs | 663 ++ crypto/txscript/src/data_stack.rs | 237 +- crypto/txscript/src/lib.rs | 162 +- crypto/txscript/src/opcodes/macros.rs | 6 +- crypto/txscript/src/opcodes/mod.rs | 1193 +++- crypto/txscript/src/script_builder.rs | 27 +- crypto/txscript/src/standard/multisig.rs | 2 +- .../test-data/script_tests-kip10.json | 5397 +++++++++++++++++ .../src/consensus_integration_tests.rs | 114 +- wallet/pskt/src/pskt.rs | 2 +- 21 files changed, 7700 insertions(+), 271 deletions(-) create mode 100644 crypto/txscript/examples/kip-10.rs create mode 100644 crypto/txscript/test-data/script_tests-kip10.json diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 49fe4e4637..c9aa2a2673 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -128,6 +128,9 @@ jobs: - name: Run cargo doc run: cargo doc --release --no-deps --workspace + - name: Run kip-10 example + run: cargo run --example kip-10 + # test-release: # name: Test Suite Release @@ -210,7 +213,7 @@ jobs: run: cargo fmt --all -- --check - name: Run cargo clippy - run: cargo clippy --workspace --tests --benches -- -D warnings + run: cargo clippy --workspace --tests --benches --examples -- -D warnings check-wasm32: diff --git a/consensus/benches/check_scripts.rs b/consensus/benches/check_scripts.rs index 4a596da1b2..a451eec650 100644 --- a/consensus/benches/check_scripts.rs +++ b/consensus/benches/check_scripts.rs @@ -84,7 +84,7 @@ fn benchmark_check_scripts(c: &mut Criterion) { let cache = Cache::new(inputs_count as u64); b.iter(|| { cache.clear(); - check_scripts_sequential(black_box(&cache), black_box(&tx.as_verifiable())).unwrap(); + check_scripts_sequential(black_box(&cache), black_box(&tx.as_verifiable()), false).unwrap(); }) }); @@ -93,7 +93,7 @@ fn benchmark_check_scripts(c: &mut Criterion) { let cache = Cache::new(inputs_count as u64); b.iter(|| { cache.clear(); - check_scripts_par_iter(black_box(&cache), black_box(&tx.as_verifiable())).unwrap(); + check_scripts_par_iter(black_box(&cache), black_box(&tx.as_verifiable()), false).unwrap(); }) }); @@ -107,7 +107,8 @@ fn benchmark_check_scripts(c: &mut Criterion) { let cache = Cache::new(inputs_count as u64); b.iter(|| { cache.clear(); - check_scripts_par_iter_pool(black_box(&cache), black_box(&tx.as_verifiable()), black_box(&pool)).unwrap(); + check_scripts_par_iter_pool(black_box(&cache), black_box(&tx.as_verifiable()), black_box(&pool), false) + .unwrap(); }) }); } diff --git a/consensus/client/src/signing.rs b/consensus/client/src/signing.rs index f7fe8cee6a..cd86046770 100644 --- a/consensus/client/src/signing.rs +++ b/consensus/client/src/signing.rs @@ -178,7 +178,7 @@ pub fn calc_schnorr_signature_hash( let utxo = cctx::UtxoEntry::from(utxo.as_ref()); let hash_type = SIG_HASH_ALL; - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); // let input = verifiable_tx.populated_input(input_index); // let tx = verifiable_tx.tx(); diff --git a/consensus/core/src/config/params.rs b/consensus/core/src/config/params.rs index 9c4a500e52..8cab11c92d 100644 --- a/consensus/core/src/config/params.rs +++ b/consensus/core/src/config/params.rs @@ -110,6 +110,18 @@ pub struct Params { /// DAA score from which storage mass calculation and transaction mass field are activated as a consensus rule pub storage_mass_activation: ForkActivation, + /// DAA score from which tx engine: + /// 1. Supports 8-byte integer arithmetic operations (previously limited to 4 bytes) + /// 2. Supports transaction introspection opcodes: + /// - OpTxInputCount (0xb3): Get number of inputs + /// - OpTxOutputCount (0xb4): Get number of outputs + /// - OpTxInputIndex (0xb9): Get current input index + /// - OpTxInputAmount (0xbe): Get input amount + /// - OpTxInputSpk (0xbf): Get input script public key + /// - OpTxOutputAmount (0xc2): Get output amount + /// - OpTxOutputSpk (0xc3): Get output script public key + pub kip10_activation: ForkActivation, + /// DAA score after which the pre-deflationary period switches to the deflationary period pub deflationary_phase_daa_score: u64, @@ -380,6 +392,7 @@ pub const MAINNET_PARAMS: Params = Params { storage_mass_parameter: STORAGE_MASS_PARAMETER, storage_mass_activation: ForkActivation::never(), + kip10_activation: ForkActivation::never(), // deflationary_phase_daa_score is the DAA score after which the pre-deflationary period // switches to the deflationary period. This number is calculated as follows: @@ -443,7 +456,7 @@ pub const TESTNET_PARAMS: Params = Params { storage_mass_parameter: STORAGE_MASS_PARAMETER, storage_mass_activation: ForkActivation::never(), - + kip10_activation: ForkActivation::never(), // deflationary_phase_daa_score is the DAA score after which the pre-deflationary period // switches to the deflationary period. This number is calculated as follows: // We define a year as 365.25 days @@ -513,6 +526,7 @@ pub const TESTNET11_PARAMS: Params = Params { storage_mass_parameter: STORAGE_MASS_PARAMETER, storage_mass_activation: ForkActivation::always(), + kip10_activation: ForkActivation::never(), skip_proof_of_work: false, max_block_level: 250, @@ -566,6 +580,7 @@ pub const SIMNET_PARAMS: Params = Params { storage_mass_parameter: STORAGE_MASS_PARAMETER, storage_mass_activation: ForkActivation::always(), + kip10_activation: ForkActivation::never(), skip_proof_of_work: true, // For simnet only, PoW can be simulated by default max_block_level: 250, @@ -612,6 +627,7 @@ pub const DEVNET_PARAMS: Params = Params { storage_mass_parameter: STORAGE_MASS_PARAMETER, storage_mass_activation: ForkActivation::never(), + kip10_activation: ForkActivation::never(), // deflationary_phase_daa_score is the DAA score after which the pre-deflationary period // switches to the deflationary period. This number is calculated as follows: diff --git a/consensus/core/src/tx.rs b/consensus/core/src/tx.rs index a4dd7dd45b..9f02ade4b6 100644 --- a/consensus/core/src/tx.rs +++ b/consensus/core/src/tx.rs @@ -293,6 +293,8 @@ pub trait VerifiableTransaction { fn id(&self) -> TransactionId { self.tx().id() } + + fn utxo(&self, index: usize) -> Option<&UtxoEntry>; } /// A custom iterator written only so that `populated_inputs` has a known return type and can de defined on the trait level @@ -342,6 +344,10 @@ impl<'a> VerifiableTransaction for PopulatedTransaction<'a> { fn populated_input(&self, index: usize) -> (&TransactionInput, &UtxoEntry) { (&self.tx.inputs[index], &self.entries[index]) } + + fn utxo(&self, index: usize) -> Option<&UtxoEntry> { + self.entries.get(index) + } } /// Represents a validated transaction with populated UTXO entry data and a calculated fee @@ -370,6 +376,10 @@ impl<'a> VerifiableTransaction for ValidatedTransaction<'a> { fn populated_input(&self, index: usize) -> (&TransactionInput, &UtxoEntry) { (&self.tx.inputs[index], &self.entries[index]) } + + fn utxo(&self, index: usize) -> Option<&UtxoEntry> { + self.entries.get(index) + } } impl AsRef for Transaction { @@ -507,6 +517,10 @@ impl> VerifiableTransaction for MutableTransactionVerifiab self.inner.entries[index].as_ref().expect("expected to be called only following full UTXO population"), ) } + + fn utxo(&self, index: usize) -> Option<&UtxoEntry> { + self.inner.entries.get(index).and_then(Option::as_ref) + } } /// Specialized impl for `T=Arc` diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index 69eb936582..16247db18b 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -146,6 +146,7 @@ impl ConsensusServices { tx_script_cache_counters, mass_calculator.clone(), params.storage_mass_activation, + params.kip10_activation, ); let pruning_point_manager = PruningPointManager::new( diff --git a/consensus/src/consensus/test_consensus.rs b/consensus/src/consensus/test_consensus.rs index 472bdbd835..87790d093f 100644 --- a/consensus/src/consensus/test_consensus.rs +++ b/consensus/src/consensus/test_consensus.rs @@ -13,11 +13,8 @@ use kaspa_hashes::Hash; use kaspa_notify::subscription::context::SubscriptionContext; use parking_lot::RwLock; -use kaspa_database::create_temp_db; -use kaspa_database::prelude::ConnBuilder; -use std::future::Future; -use std::{sync::Arc, thread::JoinHandle}; - +use super::services::{DbDagTraversalManager, DbGhostdagManager, DbWindowManager}; +use super::Consensus; use crate::pipeline::virtual_processor::test_block_builder::TestBlockBuilder; use crate::processes::window::WindowManager; use crate::{ @@ -35,9 +32,10 @@ use crate::{ pipeline::{body_processor::BlockBodyProcessor, virtual_processor::VirtualStateProcessor, ProcessingCounters}, test_helpers::header_from_precomputed_hash, }; - -use super::services::{DbDagTraversalManager, DbGhostdagManager, DbWindowManager}; -use super::Consensus; +use kaspa_database::create_temp_db; +use kaspa_database::prelude::ConnBuilder; +use std::future::Future; +use std::{sync::Arc, thread::JoinHandle}; pub struct TestConsensus { params: Params, @@ -138,6 +136,12 @@ impl TestConsensus { self.validate_and_insert_block(self.build_block_with_parents(hash, parents).to_immutable()).virtual_state_task } + /// Adds a valid block with the given transactions and parents to the consensus. + /// + /// # Panics + /// + /// Panics if block builder validation rules are violated. + /// See `kaspa_consensus_core::errors::block::RuleError` for the complete list of possible validation rules. pub fn add_utxo_valid_block_with_parents( &self, hash: Hash, @@ -149,6 +153,12 @@ impl TestConsensus { .virtual_state_task } + /// Builds a valid block with the given transactions, parents, and miner data. + /// + /// # Panics + /// + /// Panics if block builder validation rules are violated. + /// See `kaspa_consensus_core::errors::block::RuleError` for the complete list of possible validation rules. pub fn build_utxo_valid_block_with_parents( &self, hash: Hash, diff --git a/consensus/src/processes/transaction_validator/mod.rs b/consensus/src/processes/transaction_validator/mod.rs index f9d9f79c89..7d007a3350 100644 --- a/consensus/src/processes/transaction_validator/mod.rs +++ b/consensus/src/processes/transaction_validator/mod.rs @@ -28,9 +28,12 @@ pub struct TransactionValidator { /// Storage mass hardfork DAA score storage_mass_activation: ForkActivation, + /// KIP-10 hardfork DAA score + kip10_activation: ForkActivation, } impl TransactionValidator { + #[allow(clippy::too_many_arguments)] pub fn new( max_tx_inputs: usize, max_tx_outputs: usize, @@ -42,6 +45,7 @@ impl TransactionValidator { counters: Arc, mass_calculator: MassCalculator, storage_mass_activation: ForkActivation, + kip10_activation: ForkActivation, ) -> Self { Self { max_tx_inputs, @@ -54,6 +58,7 @@ impl TransactionValidator { sig_cache: Cache::with_counters(10_000, counters), mass_calculator, storage_mass_activation, + kip10_activation, } } @@ -78,6 +83,7 @@ impl TransactionValidator { sig_cache: Cache::with_counters(10_000, counters), mass_calculator: MassCalculator::new(0, 0, 0, 0), storage_mass_activation: ForkActivation::never(), + kip10_activation: ForkActivation::never(), } } } diff --git a/consensus/src/processes/transaction_validator/transaction_validator_populated.rs b/consensus/src/processes/transaction_validator/transaction_validator_populated.rs index bbb74f0ae1..cff13d9fbd 100644 --- a/consensus/src/processes/transaction_validator/transaction_validator_populated.rs +++ b/consensus/src/processes/transaction_validator/transaction_validator_populated.rs @@ -61,7 +61,7 @@ impl TransactionValidator { match flags { TxValidationFlags::Full | TxValidationFlags::SkipMassCheck => { Self::check_sig_op_counts(tx)?; - self.check_scripts(tx)?; + self.check_scripts(tx, pov_daa_score)?; } TxValidationFlags::SkipScriptChecks => {} } @@ -172,35 +172,47 @@ impl TransactionValidator { Ok(()) } - pub fn check_scripts(&self, tx: &(impl VerifiableTransaction + Sync)) -> TxResult<()> { - check_scripts(&self.sig_cache, tx) + pub fn check_scripts(&self, tx: &(impl VerifiableTransaction + Sync), pov_daa_score: u64) -> TxResult<()> { + check_scripts(&self.sig_cache, tx, self.kip10_activation.is_active(pov_daa_score)) } } -pub fn check_scripts(sig_cache: &Cache, tx: &(impl VerifiableTransaction + Sync)) -> TxResult<()> { +pub fn check_scripts( + sig_cache: &Cache, + tx: &(impl VerifiableTransaction + Sync), + kip10_enabled: bool, +) -> TxResult<()> { if tx.inputs().len() > CHECK_SCRIPTS_PARALLELISM_THRESHOLD { - check_scripts_par_iter(sig_cache, tx) + check_scripts_par_iter(sig_cache, tx, kip10_enabled) } else { - check_scripts_sequential(sig_cache, tx) + check_scripts_sequential(sig_cache, tx, kip10_enabled) } } -pub fn check_scripts_sequential(sig_cache: &Cache, tx: &impl VerifiableTransaction) -> TxResult<()> { +pub fn check_scripts_sequential( + sig_cache: &Cache, + tx: &impl VerifiableTransaction, + kip10_enabled: bool, +) -> TxResult<()> { let reused_values = SigHashReusedValuesUnsync::new(); for (i, (input, entry)) in tx.populated_inputs().enumerate() { - TxScriptEngine::from_transaction_input(tx, input, i, entry, &reused_values, sig_cache) - .and_then(|mut e| e.execute()) + TxScriptEngine::from_transaction_input(tx, input, i, entry, &reused_values, sig_cache, kip10_enabled) + .execute() .map_err(|err| map_script_err(err, input))?; } Ok(()) } -pub fn check_scripts_par_iter(sig_cache: &Cache, tx: &(impl VerifiableTransaction + Sync)) -> TxResult<()> { +pub fn check_scripts_par_iter( + sig_cache: &Cache, + tx: &(impl VerifiableTransaction + Sync), + kip10_enabled: bool, +) -> TxResult<()> { let reused_values = SigHashReusedValuesSync::new(); (0..tx.inputs().len()).into_par_iter().try_for_each(|idx| { let (input, utxo) = tx.populated_input(idx); - TxScriptEngine::from_transaction_input(tx, input, idx, utxo, &reused_values, sig_cache) - .and_then(|mut e| e.execute()) + TxScriptEngine::from_transaction_input(tx, input, idx, utxo, &reused_values, sig_cache, kip10_enabled) + .execute() .map_err(|err| map_script_err(err, input)) }) } @@ -209,8 +221,9 @@ pub fn check_scripts_par_iter_pool( sig_cache: &Cache, tx: &(impl VerifiableTransaction + Sync), pool: &ThreadPool, + kip10_enabled: bool, ) -> TxResult<()> { - pool.install(|| check_scripts_par_iter(sig_cache, tx)) + pool.install(|| check_scripts_par_iter(sig_cache, tx, kip10_enabled)) } fn map_script_err(script_err: TxScriptError, input: &TransactionInput) -> TxRuleError { @@ -305,13 +318,13 @@ mod tests { }], ); - tv.check_scripts(&populated_tx).expect("Signature check failed"); + tv.check_scripts(&populated_tx, u64::MAX).expect("Signature check failed"); // Test a tx with 2 inputs to cover parallelism split points in inner script checking code let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); // Duplicated sigs should fail due to wrong sighash assert_eq!( - tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2)), + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2), u64::MAX), Err(TxRuleError::SignatureInvalid(TxScriptError::EvalFalse)) ); } @@ -375,11 +388,11 @@ mod tests { }], ); - assert!(tv.check_scripts(&populated_tx).is_err(), "Expecting signature check to fail"); + assert!(tv.check_scripts(&populated_tx, u64::MAX).is_err(), "Expecting signature check to fail"); // Test a tx with 2 inputs to cover parallelism split points in inner script checking code let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); - tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2)).expect_err("Expecting signature check to fail"); + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2), u64::MAX).expect_err("Expecting signature check to fail"); // Verify we are correctly testing the parallelism case (applied here as sanity for all tests) assert!( @@ -448,13 +461,13 @@ mod tests { is_coinbase: false, }], ); - tv.check_scripts(&populated_tx).expect("Signature check failed"); + tv.check_scripts(&populated_tx, u64::MAX).expect("Signature check failed"); // Test a tx with 2 inputs to cover parallelism split points in inner script checking code let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); // Duplicated sigs should fail due to wrong sighash assert_eq!( - tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2)), + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2), u64::MAX), Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail)) ); } @@ -519,12 +532,12 @@ mod tests { }], ); - assert_eq!(tv.check_scripts(&populated_tx), Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail))); + assert_eq!(tv.check_scripts(&populated_tx, u64::MAX), Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail))); // Test a tx with 2 inputs to cover parallelism split points in inner script checking code let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); assert_eq!( - tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2)), + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2), u64::MAX), Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail)) ); } @@ -589,12 +602,12 @@ mod tests { }], ); - assert_eq!(tv.check_scripts(&populated_tx), Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail))); + assert_eq!(tv.check_scripts(&populated_tx, u64::MAX), Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail))); // Test a tx with 2 inputs to cover parallelism split points in inner script checking code let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); assert_eq!( - tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2)), + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2), u64::MAX), Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail)) ); } @@ -659,12 +672,12 @@ mod tests { }], ); - assert_eq!(tv.check_scripts(&populated_tx), Err(TxRuleError::SignatureInvalid(TxScriptError::EvalFalse))); + assert_eq!(tv.check_scripts(&populated_tx, u64::MAX), Err(TxRuleError::SignatureInvalid(TxScriptError::EvalFalse))); // Test a tx with 2 inputs to cover parallelism split points in inner script checking code let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); assert_eq!( - tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2)), + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2), u64::MAX), Err(TxRuleError::SignatureInvalid(TxScriptError::EvalFalse)) ); } @@ -720,12 +733,15 @@ mod tests { }], ); - assert_eq!(tv.check_scripts(&populated_tx), Err(TxRuleError::SignatureInvalid(TxScriptError::SignatureScriptNotPushOnly))); + assert_eq!( + tv.check_scripts(&populated_tx, u64::MAX), + Err(TxRuleError::SignatureInvalid(TxScriptError::SignatureScriptNotPushOnly)) + ); // Test a tx with 2 inputs to cover parallelism split points in inner script checking code let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); assert_eq!( - tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2)), + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2), u64::MAX), Err(TxRuleError::SignatureInvalid(TxScriptError::SignatureScriptNotPushOnly)) ); } @@ -806,7 +822,7 @@ mod tests { let schnorr_key = secp256k1::Keypair::from_seckey_slice(secp256k1::SECP256K1, &secret_key.secret_bytes()).unwrap(); let signed_tx = sign(MutableTransaction::with_entries(unsigned_tx, entries), schnorr_key); let populated_tx = signed_tx.as_verifiable(); - assert_eq!(tv.check_scripts(&populated_tx), Ok(())); + assert_eq!(tv.check_scripts(&populated_tx, u64::MAX), Ok(())); assert_eq!(TransactionValidator::check_sig_op_counts(&populated_tx), Ok(())); } } diff --git a/crypto/txscript/Cargo.toml b/crypto/txscript/Cargo.toml index e2f492ad38..46e3103e8f 100644 --- a/crypto/txscript/Cargo.toml +++ b/crypto/txscript/Cargo.toml @@ -9,6 +9,9 @@ include.workspace = true license.workspace = true repository.workspace = true +[[example]] +name = "kip-10" + [features] wasm32-core = [] wasm32-sdk = [] diff --git a/crypto/txscript/errors/src/lib.rs b/crypto/txscript/errors/src/lib.rs index 4c077dae35..b16ec4cead 100644 --- a/crypto/txscript/errors/src/lib.rs +++ b/crypto/txscript/errors/src/lib.rs @@ -6,8 +6,8 @@ pub enum TxScriptError { MalformedPushSize(Vec), #[error("opcode requires {0} bytes, but script only has {1} remaining")] MalformedPush(usize, usize), - #[error("transaction input index {0} >= {1}")] - InvalidIndex(usize, usize), + #[error("transaction input {0} is out of bounds, should be non-negative below {1}")] + InvalidInputIndex(i32, usize), #[error("combined stack size {0} > max allowed {1}")] StackSizeExceeded(usize, usize), #[error("attempt to execute invalid opcode {0}")] @@ -69,4 +69,14 @@ pub enum TxScriptError { InvalidStackOperation(usize, usize), #[error("script of size {0} exceeded maximum allowed size of {1}")] ScriptSize(usize, usize), + #[error("transaction output {0} is out of bounds, should be non-negative below {1}")] + InvalidOutputIndex(i32, usize), + #[error(transparent)] + Serialization(#[from] SerializationError), +} + +#[derive(Error, PartialEq, Eq, Debug, Clone, Copy)] +pub enum SerializationError { + #[error("Number exceeds 8 bytes: {0}")] + NumberTooLong(i64), } diff --git a/crypto/txscript/examples/kip-10.rs b/crypto/txscript/examples/kip-10.rs new file mode 100644 index 0000000000..4077385a72 --- /dev/null +++ b/crypto/txscript/examples/kip-10.rs @@ -0,0 +1,663 @@ +use kaspa_addresses::{Address, Prefix, Version}; +use kaspa_consensus_core::{ + hashing::{ + sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}, + sighash_type::SIG_HASH_ALL, + }, + tx::{ + MutableTransaction, PopulatedTransaction, Transaction, TransactionId, TransactionInput, TransactionOutpoint, + TransactionOutput, UtxoEntry, VerifiableTransaction, + }, +}; +use kaspa_txscript::{ + caches::Cache, + opcodes::codes::{ + OpCheckSig, OpCheckSigVerify, OpDup, OpElse, OpEndIf, OpEqualVerify, OpFalse, OpGreaterThanOrEqual, OpIf, OpSub, OpTrue, + OpTxInputAmount, OpTxInputIndex, OpTxInputSpk, OpTxOutputAmount, OpTxOutputSpk, + }, + pay_to_address_script, pay_to_script_hash_script, + script_builder::{ScriptBuilder, ScriptBuilderResult}, + TxScriptEngine, +}; +use kaspa_txscript_errors::TxScriptError::{EvalFalse, VerifyError}; +use rand::thread_rng; +use secp256k1::Keypair; + +/// Main function to execute all Kaspa transaction script scenarios. +/// +/// # Returns +/// +/// * `ScriptBuilderResult<()>` - Result of script builder operations for all scenarios. +fn main() -> ScriptBuilderResult<()> { + threshold_scenario()?; + threshold_scenario_limited_one_time()?; + threshold_scenario_limited_2_times()?; + shared_secret_scenario()?; + Ok(()) +} + +/// # Standard Threshold Scenario +/// +/// This scenario demonstrates the use of custom opcodes and script execution within the Kaspa blockchain ecosystem. +/// There are two main cases: +/// +/// 1. **Owner case:** The script checks if the input is used by the owner and verifies the owner's signature. +/// 2. **Borrower case:** The script allows the input to be consumed if the output with the same index has a value of input + threshold and goes to the P2SH of the script itself. +/// +/// # Returns +/// +/// * `ScriptBuilderResult<()>` - Result of script builder operations for this scenario. +fn threshold_scenario() -> ScriptBuilderResult<()> { + println!("\n[STANDARD] Running standard threshold scenario"); + // Create a new key pair for the owner + let owner = Keypair::new(secp256k1::SECP256K1, &mut thread_rng()); + + // Set a threshold value for comparison + let threshold: i64 = 100; + + // Initialize a cache for signature verification + let sig_cache = Cache::new(10_000); + + // Prepare to reuse values for signature hashing + let reused_values = SigHashReusedValuesUnsync::new(); + + // Create the script builder + let mut builder = ScriptBuilder::new(); + let script = builder + // Owner branch + .add_op(OpIf)? + .add_data(owner.x_only_public_key().0.serialize().as_slice())? + .add_op(OpCheckSig)? + // Borrower branch + .add_op(OpElse)? + .add_ops(&[OpTxInputIndex, OpTxInputSpk, OpTxInputIndex, OpTxOutputSpk, OpEqualVerify, OpTxInputIndex, OpTxOutputAmount])? + .add_i64(threshold)? + .add_ops(&[OpSub, OpTxInputIndex, OpTxInputAmount, OpGreaterThanOrEqual])? + .add_op(OpEndIf)? + .drain(); + + // Generate the script public key + let spk = pay_to_script_hash_script(&script); + + // Define the input value + let input_value = 1000000000; + + // Create a transaction output + let output = TransactionOutput { value: 1000000000 + threshold as u64, script_public_key: spk.clone() }; + + // Create a UTXO entry for the input + let utxo_entry = UtxoEntry::new(input_value, spk, 0, false); + + // Create a transaction input + let input = TransactionInput { + previous_outpoint: TransactionOutpoint { + transaction_id: TransactionId::from_bytes([ + 0xc9, 0x97, 0xa5, 0xe5, 0x6e, 0x10, 0x42, 0x02, 0xfa, 0x20, 0x9c, 0x6a, 0x85, 0x2d, 0xd9, 0x06, 0x60, 0xa2, 0x0b, + 0x2d, 0x9c, 0x35, 0x24, 0x23, 0xed, 0xce, 0x25, 0x85, 0x7f, 0xcd, 0x37, 0x04, + ]), + index: 0, + }, + signature_script: ScriptBuilder::new().add_data(&script)?.drain(), + sequence: 4294967295, + sig_op_count: 1, + }; + + // Create a transaction with the input and output + let mut tx = Transaction::new(1, vec![input.clone()], vec![output.clone()], 0, Default::default(), 0, vec![]); + + // Check owner branch + { + println!("[STANDARD] Checking owner branch"); + let mut tx = MutableTransaction::with_entries(tx.clone(), vec![utxo_entry.clone()]); + let sig_hash = calc_schnorr_signature_hash(&tx.as_verifiable(), 0, SIG_HASH_ALL, &reused_values); + let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); + + let sig = owner.sign_schnorr(msg); + let mut signature = Vec::new(); + signature.extend_from_slice(sig.as_ref().as_slice()); + signature.push(SIG_HASH_ALL.to_u8()); + + let mut builder = ScriptBuilder::new(); + builder.add_data(&signature)?; + builder.add_op(OpTrue)?; + builder.add_data(&script)?; + { + tx.tx.inputs[0].signature_script = builder.drain(); + } + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache, true); + assert_eq!(vm.execute(), Ok(())); + println!("[STANDARD] Owner branch execution successful"); + } + + // Check borrower branch + { + println!("[STANDARD] Checking borrower branch"); + tx.inputs[0].signature_script = ScriptBuilder::new().add_op(OpFalse)?.add_data(&script)?.drain(); + let tx = PopulatedTransaction::new(&tx, vec![utxo_entry.clone()]); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache, true); + assert_eq!(vm.execute(), Ok(())); + println!("[STANDARD] Borrower branch execution successful"); + } + + // Check borrower branch with threshold not reached + { + println!("[STANDARD] Checking borrower branch with threshold not reached"); + // Less than threshold + tx.outputs[0].value -= 1; + let tx = PopulatedTransaction::new(&tx, vec![utxo_entry.clone()]); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache, true); + assert_eq!(vm.execute(), Err(EvalFalse)); + println!("[STANDARD] Borrower branch with threshold not reached failed as expected"); + } + + println!("[STANDARD] Standard threshold scenario completed successfully"); + Ok(()) +} + +/// Generate a script for limited-time borrowing scenarios +/// +/// This function creates a script that allows for limited-time borrowing with a threshold, +/// or spending by the owner at any time. It's generic enough to be used for both one-time +/// and multi-time borrowing scenarios. +/// +/// # Arguments +/// +/// * `owner` - The public key of the owner +/// * `threshold` - The threshold amount that must be met for borrowing +/// * `output_spk` - The output script public key as a vector of bytes +/// +/// # Returns +/// +/// * The generated script as a vector of bytes +fn generate_limited_time_script(owner: &Keypair, threshold: i64, output_spk: Vec) -> ScriptBuilderResult> { + let mut builder = ScriptBuilder::new(); + let script = builder + // Owner branch + .add_op(OpIf)? + .add_data(owner.x_only_public_key().0.serialize().as_slice())? + .add_op(OpCheckSig)? + // Borrower branch + .add_op(OpElse)? + .add_data(&output_spk)? + .add_ops(&[OpTxInputIndex, OpTxOutputSpk, OpEqualVerify, OpTxInputIndex, OpTxOutputAmount])? + .add_i64(threshold)? + .add_ops(&[OpSub, OpTxInputIndex, OpTxInputAmount, OpGreaterThanOrEqual])? + .add_op(OpEndIf)? + .drain(); + + Ok(script) +} + +// Helper function to create P2PK script as a vector +fn p2pk_as_vec(owner: &Keypair) -> Vec { + let p2pk = + pay_to_address_script(&Address::new(Prefix::Mainnet, Version::PubKey, owner.x_only_public_key().0.serialize().as_slice())); + let version = p2pk.version.to_be_bytes(); + let script = p2pk.script(); + let mut v = Vec::with_capacity(version.len() + script.len()); + v.extend_from_slice(&version); + v.extend_from_slice(script); + v +} + +/// # Threshold Scenario with Limited One-Time Borrowing +/// +/// This function demonstrates a modified version of the threshold scenario where borrowing +/// is limited to a single occurrence. The key difference from the standard threshold scenario +/// is that the output goes to a Pay-to-Public-Key (P2PK) address instead of a Pay-to-Script-Hash (P2SH) +/// address of the script itself. +/// +/// ## Key Features: +/// 1. **One-Time Borrowing:** The borrower can only use this mechanism once, as the funds are +/// sent to a regular P2PK address instead of back to the script. +/// 2. **Owner Access:** The owner retains the ability to spend the funds at any time using their private key. +/// 3. **Threshold Mechanism:** The borrower must still meet the threshold requirement to spend the funds. +/// 4. **Output Validation:** Ensures the output goes to the correct address. +/// +/// ## Scenarios Tested: +/// 1. **Owner Spending:** Verifies that the owner can spend the funds using their signature. +/// 2. **Borrower Spending:** Checks if the borrower can spend when meeting the threshold and +/// sending to the correct P2PK address. +/// 3. **Invalid Borrower Attempt (Threshold):** Ensures the script fails if the borrower doesn't meet the threshold. +/// 4. **Invalid Borrower Attempt (Wrong Output):** Ensures the script fails if the output goes to an incorrect address. +/// +/// # Returns +/// +/// * `ScriptBuilderResult<()>` - Result of script builder operations for this scenario. +fn threshold_scenario_limited_one_time() -> ScriptBuilderResult<()> { + println!("\n[ONE-TIME] Running threshold one-time scenario"); + // Create a new key pair for the owner + let owner = Keypair::new(secp256k1::SECP256K1, &mut thread_rng()); + + // Set a threshold value for comparison + let threshold: i64 = 100; + + let p2pk = + pay_to_address_script(&Address::new(Prefix::Mainnet, Version::PubKey, owner.x_only_public_key().0.serialize().as_slice())); + let p2pk_vec = p2pk_as_vec(&owner); + let script = generate_limited_time_script(&owner, threshold, p2pk_vec.clone())?; + + // Initialize a cache for signature verification + let sig_cache = Cache::new(10_000); + + // Prepare to reuse values for signature hashing + let reused_values = SigHashReusedValuesUnsync::new(); + + // Generate the script public key + let spk = pay_to_script_hash_script(&script); + + // Define the input value + let input_value = 1000000000; + + // Create a transaction output + let output = TransactionOutput { value: 1000000000 + threshold as u64, script_public_key: p2pk.clone() }; + + // Create a UTXO entry for the input + let utxo_entry = UtxoEntry::new(input_value, spk, 0, false); + + // Create a transaction input + let input = TransactionInput { + previous_outpoint: TransactionOutpoint { + transaction_id: TransactionId::from_bytes([ + 0xc9, 0x97, 0xa5, 0xe5, 0x6e, 0x10, 0x42, 0x02, 0xfa, 0x20, 0x9c, 0x6a, 0x85, 0x2d, 0xd9, 0x06, 0x60, 0xa2, 0x0b, + 0x2d, 0x9c, 0x35, 0x24, 0x23, 0xed, 0xce, 0x25, 0x85, 0x7f, 0xcd, 0x37, 0x04, + ]), + index: 0, + }, + signature_script: ScriptBuilder::new().add_data(&script)?.drain(), + sequence: 4294967295, + sig_op_count: 1, + }; + + // Create a transaction with the input and output + let mut tx = Transaction::new(1, vec![input.clone()], vec![output.clone()], 0, Default::default(), 0, vec![]); + + // Check owner branch + { + println!("[ONE-TIME] Checking owner branch"); + let mut tx = MutableTransaction::with_entries(tx.clone(), vec![utxo_entry.clone()]); + let sig_hash = calc_schnorr_signature_hash(&tx.as_verifiable(), 0, SIG_HASH_ALL, &reused_values); + let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); + + let sig = owner.sign_schnorr(msg); + let mut signature = Vec::new(); + signature.extend_from_slice(sig.as_ref().as_slice()); + signature.push(SIG_HASH_ALL.to_u8()); + + let mut builder = ScriptBuilder::new(); + builder.add_data(&signature)?; + builder.add_op(OpTrue)?; + builder.add_data(&script)?; + { + tx.tx.inputs[0].signature_script = builder.drain(); + } + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache, true); + assert_eq!(vm.execute(), Ok(())); + println!("[ONE-TIME] Owner branch execution successful"); + } + + // Check borrower branch + { + println!("[ONE-TIME] Checking borrower branch"); + tx.inputs[0].signature_script = ScriptBuilder::new().add_op(OpFalse)?.add_data(&script)?.drain(); + let tx = PopulatedTransaction::new(&tx, vec![utxo_entry.clone()]); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache, true); + assert_eq!(vm.execute(), Ok(())); + println!("[ONE-TIME] Borrower branch execution successful"); + } + + // Check borrower branch with threshold not reached + { + println!("[ONE-TIME] Checking borrower branch with threshold not reached"); + // Less than threshold + tx.outputs[0].value -= 1; + let tx = PopulatedTransaction::new(&tx, vec![utxo_entry.clone()]); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache, true); + assert_eq!(vm.execute(), Err(EvalFalse)); + println!("[ONE-TIME] Borrower branch with threshold not reached failed as expected"); + } + + // Check borrower branch with output going to wrong address + { + println!("[ONE-TIME] Checking borrower branch with output going to wrong address"); + // Create a new key pair for a different address + let wrong_recipient = Keypair::new(secp256k1::SECP256K1, &mut thread_rng()); + let wrong_p2pk = pay_to_address_script(&Address::new( + Prefix::Mainnet, + Version::PubKey, + wrong_recipient.x_only_public_key().0.serialize().as_slice(), + )); + + // Create a new transaction with the wrong output address + let mut wrong_tx = tx.clone(); + wrong_tx.outputs[0].script_public_key = wrong_p2pk; + wrong_tx.inputs[0].signature_script = ScriptBuilder::new().add_op(OpFalse)?.add_data(&script)?.drain(); + + let wrong_tx = PopulatedTransaction::new(&wrong_tx, vec![utxo_entry.clone()]); + let mut vm = TxScriptEngine::from_transaction_input( + &wrong_tx, + &wrong_tx.tx.inputs[0], + 0, + &utxo_entry, + &reused_values, + &sig_cache, + true, + ); + assert_eq!(vm.execute(), Err(VerifyError)); + println!("[ONE-TIME] Borrower branch with output going to wrong address failed as expected"); + } + + println!("[ONE-TIME] Threshold one-time scenario completed successfully"); + Ok(()) +} + +/// # Threshold Scenario with Limited Two-Times Borrowing +/// +/// This function demonstrates a modified version of the threshold scenario where borrowing +/// is limited to two occurrences. The key difference from the one-time scenario is that +/// the first borrowing outputs to a P2SH of the one-time script, allowing for a second borrowing. +/// +/// ## Key Features: +/// 1. **Two-Times Borrowing:** The borrower can use this mechanism twice. +/// 2. **Owner Access:** The owner retains the ability to spend the funds at any time using their private key. +/// 3. **Threshold Mechanism:** The borrower must still meet the threshold requirement to spend the funds. +/// 4. **Output Validation:** Ensures the output goes to the correct address (P2SH of one-time script for first borrow). +/// +/// ## Scenarios Tested: +/// 1. **Owner Spending:** Verifies that the owner can spend the funds using their signature. +/// 2. **Borrower First Spending:** Checks if the borrower can spend when meeting the threshold and +/// sending to the correct P2SH address of the one-time script. +/// 3. **Invalid Borrower Attempt (Threshold):** Ensures the script fails if the borrower doesn't meet the threshold. +/// 4. **Invalid Borrower Attempt (Wrong Output):** Ensures the script fails if the output goes to an incorrect address. +/// +/// # Returns +/// +/// * `ScriptBuilderResult<()>` - Result of script builder operations for this scenario. +fn threshold_scenario_limited_2_times() -> ScriptBuilderResult<()> { + println!("\n[TWO-TIMES] Running threshold two-times scenario"); + let owner = Keypair::new(secp256k1::SECP256K1, &mut thread_rng()); + let threshold: i64 = 100; + + // First, create the one-time script + let p2pk_vec = p2pk_as_vec(&owner); + let one_time_script = generate_limited_time_script(&owner, threshold, p2pk_vec)?; + + // Now, create the two-times script using the one-time script as output + let p2sh_one_time = pay_to_script_hash_script(&one_time_script); + let p2sh_one_time_vec = { + let version = p2sh_one_time.version.to_be_bytes(); + let script = p2sh_one_time.script(); + let mut v = Vec::with_capacity(version.len() + script.len()); + v.extend_from_slice(&version); + v.extend_from_slice(script); + v + }; + + let two_times_script = generate_limited_time_script(&owner, threshold, p2sh_one_time_vec)?; + + // Initialize a cache for signature verification + let sig_cache = Cache::new(10_000); + + // Prepare to reuse values for signature hashing + let reused_values = SigHashReusedValuesUnsync::new(); + + // Generate the script public key + let spk = pay_to_script_hash_script(&two_times_script); + + // Define the input value + let input_value = 1000000000; + + // Create a transaction output + let output = TransactionOutput { value: 1000000000 + threshold as u64, script_public_key: p2sh_one_time }; + + // Create a UTXO entry for the input + let utxo_entry = UtxoEntry::new(input_value, spk, 0, false); + + // Create a transaction input + let input = TransactionInput { + previous_outpoint: TransactionOutpoint { + transaction_id: TransactionId::from_bytes([ + 0xc9, 0x97, 0xa5, 0xe5, 0x6e, 0x10, 0x42, 0x02, 0xfa, 0x20, 0x9c, 0x6a, 0x85, 0x2d, 0xd9, 0x06, 0x60, 0xa2, 0x0b, + 0x2d, 0x9c, 0x35, 0x24, 0x23, 0xed, 0xce, 0x25, 0x85, 0x7f, 0xcd, 0x37, 0x04, + ]), + index: 0, + }, + signature_script: ScriptBuilder::new().add_data(&two_times_script)?.drain(), + sequence: 4294967295, + sig_op_count: 1, + }; + + // Create a transaction with the input and output + let mut tx = Transaction::new(1, vec![input.clone()], vec![output.clone()], 0, Default::default(), 0, vec![]); + + // Check owner branch + { + println!("[TWO-TIMES] Checking owner branch"); + let mut tx = MutableTransaction::with_entries(tx.clone(), vec![utxo_entry.clone()]); + let sig_hash = calc_schnorr_signature_hash(&tx.as_verifiable(), 0, SIG_HASH_ALL, &reused_values); + let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); + + let sig = owner.sign_schnorr(msg); + let mut signature = Vec::new(); + signature.extend_from_slice(sig.as_ref().as_slice()); + signature.push(SIG_HASH_ALL.to_u8()); + + let mut builder = ScriptBuilder::new(); + builder.add_data(&signature)?; + builder.add_op(OpTrue)?; + builder.add_data(&two_times_script)?; + { + tx.tx.inputs[0].signature_script = builder.drain(); + } + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache, true); + assert_eq!(vm.execute(), Ok(())); + println!("[TWO-TIMES] Owner branch execution successful"); + } + + // Check borrower branch (first borrowing) + { + println!("[TWO-TIMES] Checking borrower branch (first borrowing)"); + tx.inputs[0].signature_script = ScriptBuilder::new().add_op(OpFalse)?.add_data(&two_times_script)?.drain(); + let tx = PopulatedTransaction::new(&tx, vec![utxo_entry.clone()]); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache, true); + assert_eq!(vm.execute(), Ok(())); + println!("[TWO-TIMES] Borrower branch (first borrowing) execution successful"); + } + + // Check borrower branch with threshold not reached + { + println!("[TWO-TIMES] Checking borrower branch with threshold not reached"); + // Less than threshold + tx.outputs[0].value -= 1; + let tx = PopulatedTransaction::new(&tx, vec![utxo_entry.clone()]); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache, true); + assert_eq!(vm.execute(), Err(EvalFalse)); + println!("[TWO-TIMES] Borrower branch with threshold not reached failed as expected"); + } + + // Check borrower branch with output going to wrong address + { + println!("[TWO-TIMES] Checking borrower branch with output going to wrong address"); + // Create a new key pair for a different address + let wrong_recipient = Keypair::new(secp256k1::SECP256K1, &mut thread_rng()); + let wrong_p2pk = pay_to_address_script(&Address::new( + Prefix::Mainnet, + Version::PubKey, + wrong_recipient.x_only_public_key().0.serialize().as_slice(), + )); + + // Create a new transaction with the wrong output address + let mut wrong_tx = tx.clone(); + wrong_tx.outputs[0].script_public_key = wrong_p2pk; + wrong_tx.inputs[0].signature_script = ScriptBuilder::new().add_op(OpFalse)?.add_data(&two_times_script)?.drain(); + + let wrong_tx = PopulatedTransaction::new(&wrong_tx, vec![utxo_entry.clone()]); + let mut vm = TxScriptEngine::from_transaction_input( + &wrong_tx, + &wrong_tx.tx.inputs[0], + 0, + &utxo_entry, + &reused_values, + &sig_cache, + true, + ); + assert_eq!(vm.execute(), Err(VerifyError)); + println!("[TWO-TIMES] Borrower branch with output going to wrong address failed as expected"); + } + + println!("[TWO-TIMES] Threshold two-times scenario completed successfully"); + Ok(()) +} + +/// # Shared Secret Scenario +/// +/// This scenario demonstrates the use of a shared secret within the Kaspa blockchain ecosystem. +/// Instead of using a threshold value, it checks the shared secret and the signature associated with it. +/// +/// ## Key Features: +/// 1. **Owner Access:** The owner can spend funds at any time using their signature. +/// 2. **Shared Secret:** A separate keypair is used as a shared secret for borrower access. +/// 3. **Borrower Verification:** The borrower must provide the correct shared secret signature to spend. +/// +/// ## Scenarios Tested: +/// 1. **Owner Spending:** Verifies that the owner can spend the funds using their signature. +/// 2. **Borrower with Correct Secret:** Checks if the borrower can spend when providing the correct shared secret. +/// 3. **Borrower with Incorrect Secret:** Ensures the script fails if the borrower uses an incorrect secret. +/// +/// # Returns +/// +/// * `ScriptBuilderResult<()>` - Result of script builder operations for this scenario. +fn shared_secret_scenario() -> ScriptBuilderResult<()> { + println!("\n[SHARED-SECRET] Running shared secret scenario"); + + // Create key pairs for the owner, shared secret, and a potential borrower + let owner = Keypair::new(secp256k1::SECP256K1, &mut thread_rng()); + let shared_secret_kp = Keypair::new(secp256k1::SECP256K1, &mut thread_rng()); + let borrower_kp = Keypair::new(secp256k1::SECP256K1, &mut thread_rng()); + + // Initialize a cache for signature verification + let sig_cache = Cache::new(10_000); + + // Create the script builder + let mut builder = ScriptBuilder::new(); + let script = builder + // Owner branch + .add_op(OpIf)? + .add_data(owner.x_only_public_key().0.serialize().as_slice())? + .add_op(OpCheckSig)? + // Borrower branch + .add_op(OpElse)? + .add_op(OpDup)? + .add_data(shared_secret_kp.x_only_public_key().0.serialize().as_slice())? + .add_op(OpEqualVerify)? + .add_op(OpCheckSigVerify)? + .add_ops(&[OpTxInputIndex, OpTxInputSpk, OpTxInputIndex, OpTxOutputSpk, OpEqualVerify, OpTxInputIndex, OpTxOutputAmount, OpTxInputIndex, OpTxInputAmount, OpGreaterThanOrEqual])? + .add_op(OpEndIf)? + .drain(); + + // Generate the script public key + let spk = pay_to_script_hash_script(&script); + + // Define the input value + let input_value = 1000000000; + + // Create a transaction output + let output = TransactionOutput { value: input_value, script_public_key: spk.clone() }; + + // Create a UTXO entry for the input + let utxo_entry = UtxoEntry::new(input_value, spk, 0, false); + + // Create a transaction input + let input = TransactionInput { + previous_outpoint: TransactionOutpoint { + transaction_id: TransactionId::from_bytes([ + 0xc9, 0x97, 0xa5, 0xe5, 0x6e, 0x10, 0x42, 0x02, 0xfa, 0x20, 0x9c, 0x6a, 0x85, 0x2d, 0xd9, 0x06, 0x60, 0xa2, 0x0b, + 0x2d, 0x9c, 0x35, 0x24, 0x23, 0xed, 0xce, 0x25, 0x85, 0x7f, 0xcd, 0x37, 0x04, + ]), + index: 0, + }, + signature_script: ScriptBuilder::new().add_data(&script)?.drain(), + sequence: 4294967295, + sig_op_count: 1, + }; + + // Create a transaction with the input and output + let tx = Transaction::new(1, vec![input.clone()], vec![output.clone()], 0, Default::default(), 0, vec![]); + let sign = |pk: Keypair| { + // Prepare to reuse values for signature hashing + let reused_values = SigHashReusedValuesUnsync::new(); + + let tx = MutableTransaction::with_entries(tx.clone(), vec![utxo_entry.clone()]); + let sig_hash = calc_schnorr_signature_hash(&tx.as_verifiable(), 0, SIG_HASH_ALL, &reused_values); + let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); + + let sig = pk.sign_schnorr(msg); + let mut signature = Vec::new(); + signature.extend_from_slice(sig.as_ref().as_slice()); + signature.push(SIG_HASH_ALL.to_u8()); + (tx, signature, reused_values) + }; + + // Check owner branch + { + println!("[SHARED-SECRET] Checking owner branch"); + let (mut tx, signature, reused_values) = sign(owner); + let mut builder = ScriptBuilder::new(); + builder.add_data(&signature)?; + builder.add_op(OpTrue)?; + builder.add_data(&script)?; + { + tx.tx.inputs[0].signature_script = builder.drain(); + } + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache, true); + assert_eq!(vm.execute(), Ok(())); + println!("[SHARED-SECRET] Owner branch execution successful"); + } + + // Check borrower branch with correct shared secret + { + println!("[SHARED-SECRET] Checking borrower branch with correct shared secret"); + let (mut tx, signature, reused_values) = sign(shared_secret_kp); + builder.add_data(&signature)?; + builder.add_data(shared_secret_kp.x_only_public_key().0.serialize().as_slice())?; + builder.add_op(OpFalse)?; + builder.add_data(&script)?; + { + tx.tx.inputs[0].signature_script = builder.drain(); + } + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache, true); + assert_eq!(vm.execute(), Ok(())); + println!("[SHARED-SECRET] Borrower branch with correct shared secret execution successful"); + } + + // Check borrower branch with incorrect secret + { + println!("[SHARED-SECRET] Checking borrower branch with incorrect secret"); + let (mut tx, signature, reused_values) = sign(borrower_kp); + builder.add_data(&signature)?; + builder.add_data(borrower_kp.x_only_public_key().0.serialize().as_slice())?; + builder.add_op(OpFalse)?; + builder.add_data(&script)?; + { + tx.tx.inputs[0].signature_script = builder.drain(); + } + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache, true); + assert_eq!(vm.execute(), Err(VerifyError)); + println!("[SHARED-SECRET] Borrower branch with incorrect secret failed as expected"); + } + + println!("[SHARED-SECRET] Shared secret scenario completed successfully"); + Ok(()) +} diff --git a/crypto/txscript/src/data_stack.rs b/crypto/txscript/src/data_stack.rs index 5d8ea18ed6..cb5935bbbd 100644 --- a/crypto/txscript/src/data_stack.rs +++ b/crypto/txscript/src/data_stack.rs @@ -1,11 +1,15 @@ use crate::TxScriptError; use core::fmt::Debug; use core::iter; +use kaspa_txscript_errors::SerializationError; +use std::cmp::Ordering; +use std::ops::Deref; const DEFAULT_SCRIPT_NUM_LEN: usize = 4; +const DEFAULT_SCRIPT_NUM_LEN_KIP10: usize = 8; #[derive(PartialEq, Eq, Debug, Default)] -pub(crate) struct SizedEncodeInt(i64); +pub(crate) struct SizedEncodeInt(pub(crate) i64); pub(crate) type Stack = Vec>; @@ -19,7 +23,7 @@ pub(crate) trait DataStack { Vec: OpcodeData; fn pop_raw(&mut self) -> Result<[Vec; SIZE], TxScriptError>; fn peek_raw(&self) -> Result<[Vec; SIZE], TxScriptError>; - fn push_item(&mut self, item: T) + fn push_item(&mut self, item: T) -> Result<(), TxScriptError> where Vec: OpcodeData; fn drop_items(&mut self) -> Result<(), TxScriptError>; @@ -31,7 +35,9 @@ pub(crate) trait DataStack { pub(crate) trait OpcodeData { fn deserialize(&self) -> Result; - fn serialize(from: &T) -> Self; + fn serialize(from: &T) -> Result + where + Self: Sized; } fn check_minimal_data_encoding(v: &[u8]) -> Result<(), TxScriptError> { @@ -59,6 +65,36 @@ fn check_minimal_data_encoding(v: &[u8]) -> Result<(), TxScriptError> { Ok(()) } +#[inline] +fn serialize_i64(from: &i64) -> Vec { + let sign = from.signum(); + let mut positive = from.unsigned_abs(); + let mut last_saturated = false; + let mut number_vec: Vec = iter::from_fn(move || { + if positive == 0 { + if last_saturated { + last_saturated = false; + Some(0) + } else { + None + } + } else { + let value = positive & 0xff; + last_saturated = (value & 0x80) != 0; + positive >>= 8; + Some(value as u8) + } + }) + .collect(); + if sign == -1 { + match number_vec.last_mut() { + Some(num) => *num |= 0x80, + _ => unreachable!(), + } + } + number_vec +} + fn deserialize_i64(v: &[u8]) -> Result { match v.len() { l if l > size_of::() => { @@ -75,6 +111,59 @@ fn deserialize_i64(v: &[u8]) -> Result { } } +#[derive(Debug, Copy, Clone, PartialEq, Eq, Ord, PartialOrd)] +#[repr(transparent)] +pub struct Kip10I64(pub i64); + +impl From for i64 { + fn from(value: Kip10I64) -> Self { + value.0 + } +} + +impl PartialEq for Kip10I64 { + fn eq(&self, other: &i64) -> bool { + self.0.eq(other) + } +} + +impl PartialOrd for Kip10I64 { + fn partial_cmp(&self, other: &i64) -> Option { + self.0.partial_cmp(other) + } +} + +impl Deref for Kip10I64 { + type Target = i64; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl OpcodeData for Vec { + #[inline] + fn deserialize(&self) -> Result { + match self.len() > DEFAULT_SCRIPT_NUM_LEN_KIP10 { + true => Err(TxScriptError::NumberTooBig(format!( + "numeric value encoded as {:x?} is {} bytes which exceeds the max allowed of {}", + self, + self.len(), + DEFAULT_SCRIPT_NUM_LEN_KIP10 + ))), + false => deserialize_i64(self).map(Kip10I64), + } + } + + #[inline] + fn serialize(from: &Kip10I64) -> Result { + if from.0 == i64::MIN { + return Err(SerializationError::NumberTooLong(from.0)); + } + Ok(serialize_i64(&from.0)) + } +} + impl OpcodeData for Vec { #[inline] fn deserialize(&self) -> Result { @@ -90,33 +179,11 @@ impl OpcodeData for Vec { } #[inline] - fn serialize(from: &i64) -> Self { - let sign = from.signum(); - let mut positive = from.abs(); - let mut last_saturated = false; - let mut number_vec: Vec = iter::from_fn(move || { - if positive == 0 { - if last_saturated { - last_saturated = false; - Some(0) - } else { - None - } - } else { - let value = positive & 0xff; - last_saturated = (value & 0x80) != 0; - positive >>= 8; - Some(value as u8) - } - }) - .collect(); - if sign == -1 { - match number_vec.last_mut() { - Some(num) => *num |= 0x80, - _ => unreachable!(), - } + fn serialize(from: &i64) -> Result { + if from == &i64::MIN { + return Err(SerializationError::NumberTooLong(*from)); } - number_vec + Ok(serialize_i64(from)) } } @@ -124,13 +191,14 @@ impl OpcodeData for Vec { #[inline] fn deserialize(&self) -> Result { let res = OpcodeData::::deserialize(self)?; - i32::try_from(res.clamp(i32::MIN as i64, i32::MAX as i64)) - .map_err(|e| TxScriptError::InvalidState(format!("data is too big for `i32`: {e}"))) + // TODO: Consider getting rid of clamp, since the call to deserialize should return an error + // if the number is not in the i32 range (this should be done with proper testing)? + Ok(res.clamp(i32::MIN as i64, i32::MAX as i64) as i32) } #[inline] - fn serialize(from: &i32) -> Self { - OpcodeData::::serialize(&(*from as i64)) + fn serialize(from: &i32) -> Result { + Ok(OpcodeData::::serialize(&(*from as i64)).expect("should never happen")) } } @@ -142,15 +210,15 @@ impl OpcodeData> for Vec { "numeric value encoded as {:x?} is {} bytes which exceeds the max allowed of {}", self, self.len(), - DEFAULT_SCRIPT_NUM_LEN + LEN ))), false => deserialize_i64(self).map(SizedEncodeInt::), } } #[inline] - fn serialize(from: &SizedEncodeInt) -> Self { - OpcodeData::::serialize(&from.0) + fn serialize(from: &SizedEncodeInt) -> Result { + Ok(serialize_i64(&from.0)) } } @@ -166,11 +234,11 @@ impl OpcodeData for Vec { } #[inline] - fn serialize(from: &bool) -> Self { - match from { + fn serialize(from: &bool) -> Result { + Ok(match from { true => vec![1], false => vec![], - } + }) } } @@ -216,11 +284,13 @@ impl DataStack for Stack { } #[inline] - fn push_item(&mut self, item: T) + fn push_item(&mut self, item: T) -> Result<(), TxScriptError> where Vec: OpcodeData, { - Vec::push(self, OpcodeData::serialize(&item)); + let v = OpcodeData::serialize(&item)?; + Vec::push(self, v); + Ok(()) } #[inline] @@ -283,9 +353,9 @@ impl DataStack for Stack { #[cfg(test)] mod tests { - use super::OpcodeData; + use super::{Kip10I64, OpcodeData}; use crate::data_stack::SizedEncodeInt; - use kaspa_txscript_errors::TxScriptError; + use kaspa_txscript_errors::{SerializationError, TxScriptError}; // TestScriptNumBytes #[test] @@ -322,7 +392,7 @@ mod tests { TestCase { num: 2147483647, serialized: hex::decode("ffffff7f").expect("failed parsing hex") }, TestCase { num: -2147483647, serialized: hex::decode("ffffffff").expect("failed parsing hex") }, // Values that are out of range for data that is interpreted as - // numbers, but are allowed as the result of numeric operations. + // numbers before KIP-10 enabled, but are allowed as the result of numeric operations. TestCase { num: 2147483648, serialized: hex::decode("0000008000").expect("failed parsing hex") }, TestCase { num: -2147483648, serialized: hex::decode("0000008080").expect("failed parsing hex") }, TestCase { num: 2415919104, serialized: hex::decode("0000009000").expect("failed parsing hex") }, @@ -340,9 +410,13 @@ mod tests { ]; for test in tests { - let serialized: Vec = OpcodeData::::serialize(&test.num); + let serialized: Vec = OpcodeData::::serialize(&test.num).unwrap(); assert_eq!(serialized, test.serialized); } + + // special case 9-byte i64 + let r: Result, _> = OpcodeData::::serialize(&-9223372036854775808); + assert_eq!(r, Err(SerializationError::NumberTooLong(-9223372036854775808))); } // TestMakeScriptNum @@ -537,7 +611,73 @@ mod tests { }, // 7340032 // Values above 8 bytes should always return error ]; - + let kip10_tests = vec![ + TestCase:: { + serialized: hex::decode("0000008000").expect("failed parsing hex"), + result: Ok(Kip10I64(2147483648)), + }, + TestCase:: { + serialized: hex::decode("0000008080").expect("failed parsing hex"), + result: Ok(Kip10I64(-2147483648)), + }, + TestCase:: { + serialized: hex::decode("0000009000").expect("failed parsing hex"), + result: Ok(Kip10I64(2415919104)), + }, + TestCase:: { + serialized: hex::decode("0000009080").expect("failed parsing hex"), + result: Ok(Kip10I64(-2415919104)), + }, + TestCase:: { + serialized: hex::decode("ffffffff00").expect("failed parsing hex"), + result: Ok(Kip10I64(4294967295)), + }, + TestCase:: { + serialized: hex::decode("ffffffff80").expect("failed parsing hex"), + result: Ok(Kip10I64(-4294967295)), + }, + TestCase:: { + serialized: hex::decode("0000000001").expect("failed parsing hex"), + result: Ok(Kip10I64(4294967296)), + }, + TestCase:: { + serialized: hex::decode("0000000081").expect("failed parsing hex"), + result: Ok(Kip10I64(-4294967296)), + }, + TestCase:: { + serialized: hex::decode("ffffffffffff00").expect("failed parsing hex"), + result: Ok(Kip10I64(281474976710655)), + }, + TestCase:: { + serialized: hex::decode("ffffffffffff80").expect("failed parsing hex"), + result: Ok(Kip10I64(-281474976710655)), + }, + TestCase:: { + serialized: hex::decode("ffffffffffffff00").expect("failed parsing hex"), + result: Ok(Kip10I64(72057594037927935)), + }, + TestCase:: { + serialized: hex::decode("ffffffffffffff80").expect("failed parsing hex"), + result: Ok(Kip10I64(-72057594037927935)), + }, + TestCase:: { + serialized: hex::decode("ffffffffffffff7f").expect("failed parsing hex"), + result: Ok(Kip10I64(9223372036854775807)), + }, + TestCase:: { + serialized: hex::decode("ffffffffffffffff").expect("failed parsing hex"), + result: Ok(Kip10I64(-9223372036854775807)), + }, + // Minimally encoded values that are out of range for data that + // is interpreted as script numbers with the minimal encoding + // flag set. Should error and return 0. + TestCase:: { + serialized: hex::decode("000000000000008080").expect("failed parsing hex"), + result: Err(TxScriptError::NumberTooBig( + "numeric value encoded as [0, 0, 0, 0, 0, 0, 0, 80, 80] is 9 bytes which exceeds the max allowed of 8".to_string(), + )), + }, + ]; let test_of_size_5 = vec![ TestCase::> { serialized: hex::decode("ffffffff7f").expect("failed parsing hex"), @@ -633,5 +773,10 @@ mod tests { // code matches the value specified in the test instance. assert_eq!(test.serialized.deserialize(), test.result); } + for test in kip10_tests { + // Ensure the error code is of the expected type and the error + // code matches the value specified in the test instance. + assert_eq!(test.serialized.deserialize(), test.result); + } } } diff --git a/crypto/txscript/src/lib.rs b/crypto/txscript/src/lib.rs index 5fed84328d..f36307a60a 100644 --- a/crypto/txscript/src/lib.rs +++ b/crypto/txscript/src/lib.rs @@ -68,7 +68,7 @@ pub struct SigCacheKey { } enum ScriptSource<'a, T: VerifiableTransaction> { - TxInput { tx: &'a T, input: &'a TransactionInput, id: usize, utxo_entry: &'a UtxoEntry, is_p2sh: bool }, + TxInput { tx: &'a T, input: &'a TransactionInput, idx: usize, utxo_entry: &'a UtxoEntry, is_p2sh: bool }, StandAloneScripts(Vec<&'a [u8]>), } @@ -85,6 +85,7 @@ pub struct TxScriptEngine<'a, T: VerifiableTransaction, Reused: SigHashReusedVal cond_stack: Vec, // Following if stacks, and whether it is running num_ops: i32, + kip10_enabled: bool, } fn parse_script( @@ -154,7 +155,7 @@ pub fn is_unspendable(scr } impl<'a, T: VerifiableTransaction, Reused: SigHashReusedValues> TxScriptEngine<'a, T, Reused> { - pub fn new(reused_values: &'a Reused, sig_cache: &'a Cache) -> Self { + pub fn new(reused_values: &'a Reused, sig_cache: &'a Cache, kip10_enabled: bool) -> Self { Self { dstack: vec![], astack: vec![], @@ -163,9 +164,26 @@ impl<'a, T: VerifiableTransaction, Reused: SigHashReusedValues> TxScriptEngine<' sig_cache, cond_stack: vec![], num_ops: 0, + kip10_enabled, } } + /// Creates a new Script Engine for validating transaction input. + /// + /// # Arguments + /// * `tx` - The transaction being validated + /// * `input` - The input being validated + /// * `input_idx` - Index of the input in the transaction + /// * `utxo_entry` - UTXO entry being spent + /// * `reused_values` - Reused values for signature hashing + /// * `sig_cache` - Cache for signature verification + /// * `kip10_enabled` - Whether KIP-10 transaction introspection opcodes are enabled + /// + /// # Panics + /// * When input_idx >= number of inputs in transaction (malformed input) + /// + /// # Returns + /// Script engine instance configured for the given input pub fn from_transaction_input( tx: &'a T, input: &'a TransactionInput, @@ -173,26 +191,31 @@ impl<'a, T: VerifiableTransaction, Reused: SigHashReusedValues> TxScriptEngine<' utxo_entry: &'a UtxoEntry, reused_values: &'a Reused, sig_cache: &'a Cache, - ) -> Result { + kip10_enabled: bool, + ) -> Self { let script_public_key = utxo_entry.script_public_key.script(); // The script_public_key in P2SH is just validating the hash on the OpMultiSig script // the user provides let is_p2sh = ScriptClass::is_pay_to_script_hash(script_public_key); - match input_idx < tx.tx().inputs.len() { - true => Ok(Self { - dstack: Default::default(), - astack: Default::default(), - script_source: ScriptSource::TxInput { tx, input, id: input_idx, utxo_entry, is_p2sh }, - reused_values, - sig_cache, - cond_stack: Default::default(), - num_ops: 0, - }), - false => Err(TxScriptError::InvalidIndex(input_idx, tx.tx().inputs.len())), + assert!(input_idx < tx.tx().inputs.len()); + Self { + dstack: Default::default(), + astack: Default::default(), + script_source: ScriptSource::TxInput { tx, input, idx: input_idx, utxo_entry, is_p2sh }, + reused_values, + sig_cache, + cond_stack: Default::default(), + num_ops: 0, + kip10_enabled, } } - pub fn from_script(script: &'a [u8], reused_values: &'a Reused, sig_cache: &'a Cache) -> Self { + pub fn from_script( + script: &'a [u8], + reused_values: &'a Reused, + sig_cache: &'a Cache, + kip10_enabled: bool, + ) -> Self { Self { dstack: Default::default(), astack: Default::default(), @@ -201,6 +224,7 @@ impl<'a, T: VerifiableTransaction, Reused: SigHashReusedValues> TxScriptEngine<' sig_cache, cond_stack: Default::default(), num_ops: 0, + kip10_enabled, } } @@ -300,7 +324,7 @@ impl<'a, T: VerifiableTransaction, Reused: SigHashReusedValues> TxScriptEngine<' // each is successful scripts.iter().enumerate().filter(|(_, s)| !s.is_empty()).try_for_each(|(idx, s)| { let verify_only_push = - idx == 0 && matches!(self.script_source, ScriptSource::TxInput { tx: _, input: _, id: _, utxo_entry: _, is_p2sh: _ }); + idx == 0 && matches!(self.script_source, ScriptSource::TxInput { tx: _, input: _, idx: _, utxo_entry: _, is_p2sh: _ }); // Save script in p2sh if is_p2sh && idx == 1 { saved_stack = Some(self.dstack.clone()); @@ -437,21 +461,21 @@ impl<'a, T: VerifiableTransaction, Reused: SigHashReusedValues> TxScriptEngine<' return Err(TxScriptError::NullFail); } - self.dstack.push_item(!failed); + self.dstack.push_item(!failed)?; Ok(()) } #[inline] fn check_schnorr_signature(&mut self, hash_type: SigHashType, key: &[u8], sig: &[u8]) -> Result { match self.script_source { - ScriptSource::TxInput { tx, id, .. } => { + ScriptSource::TxInput { tx, idx, .. } => { if sig.len() != 64 { return Err(TxScriptError::SigLength(sig.len())); } Self::check_pub_key_encoding(key)?; let pk = secp256k1::XOnlyPublicKey::from_slice(key).map_err(TxScriptError::InvalidSignature)?; let sig = secp256k1::schnorr::Signature::from_slice(sig).map_err(TxScriptError::InvalidSignature)?; - let sig_hash = calc_schnorr_signature_hash(tx, id, hash_type, self.reused_values); + let sig_hash = calc_schnorr_signature_hash(tx, idx, hash_type, self.reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let sig_cache_key = SigCacheKey { signature: Signature::Secp256k1(sig), pub_key: PublicKey::Schnorr(pk), message: msg }; @@ -479,14 +503,14 @@ impl<'a, T: VerifiableTransaction, Reused: SigHashReusedValues> TxScriptEngine<' fn check_ecdsa_signature(&mut self, hash_type: SigHashType, key: &[u8], sig: &[u8]) -> Result { match self.script_source { - ScriptSource::TxInput { tx, id, .. } => { + ScriptSource::TxInput { tx, idx, .. } => { if sig.len() != 64 { return Err(TxScriptError::SigLength(sig.len())); } Self::check_pub_key_encoding_ecdsa(key)?; let pk = secp256k1::PublicKey::from_slice(key).map_err(TxScriptError::InvalidSignature)?; let sig = secp256k1::ecdsa::Signature::from_compact(sig).map_err(TxScriptError::InvalidSignature)?; - let sig_hash = calc_ecdsa_signature_hash(tx, id, hash_type, self.reused_values); + let sig_hash = calc_ecdsa_signature_hash(tx, idx, hash_type, self.reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let sig_cache_key = SigCacheKey { signature: Signature::Ecdsa(sig), pub_key: PublicKey::Ecdsa(pk), message: msg }; @@ -512,6 +536,16 @@ impl<'a, T: VerifiableTransaction, Reused: SigHashReusedValues> TxScriptEngine<' } } +trait SpkEncoding { + fn to_bytes(&self) -> Vec; +} + +impl SpkEncoding for ScriptPublicKey { + fn to_bytes(&self) -> Vec { + self.version.to_be_bytes().into_iter().chain(self.script().iter().copied()).collect() + } +} + #[cfg(test)] mod tests { use std::iter::once; @@ -546,6 +580,10 @@ mod tests { fn populated_input(&self, _index: usize) -> (&TransactionInput, &UtxoEntry) { unimplemented!() } + + fn utxo(&self, _index: usize) -> Option<&UtxoEntry> { + unimplemented!() + } } fn run_test_script_cases(test_cases: Vec) { @@ -572,10 +610,18 @@ mod tests { let utxo_entry = UtxoEntry::new(output.value, output.script_public_key.clone(), 0, tx.is_coinbase()); let populated_tx = PopulatedTransaction::new(&tx, vec![utxo_entry.clone()]); - - let mut vm = TxScriptEngine::from_transaction_input(&populated_tx, &input, 0, &utxo_entry, &reused_values, &sig_cache) - .expect("Script creation failed"); - assert_eq!(vm.execute(), test.expected_result); + [false, true].into_iter().for_each(|kip10_enabled| { + let mut vm = TxScriptEngine::from_transaction_input( + &populated_tx, + &input, + 0, + &utxo_entry, + &reused_values, + &sig_cache, + kip10_enabled, + ); + assert_eq!(vm.execute(), test.expected_result); + }); } } @@ -999,7 +1045,7 @@ mod bitcoind_tests { } impl JsonTestRow { - fn test_row(&self) -> Result<(), TestError> { + fn test_row(&self, kip10_enabled: bool) -> Result<(), TestError> { // Parse test to objects let (sig_script, script_pub_key, expected_result) = match self.clone() { JsonTestRow::Test(sig_script, sig_pub_key, _, expected_result) => (sig_script, sig_pub_key, expected_result), @@ -1011,7 +1057,7 @@ mod bitcoind_tests { } }; - let result = Self::run_test(sig_script, script_pub_key); + let result = Self::run_test(sig_script, script_pub_key, kip10_enabled); match Self::result_name(result.clone()).contains(&expected_result.as_str()) { true => Ok(()), @@ -1019,7 +1065,7 @@ mod bitcoind_tests { } } - fn run_test(sig_script: String, script_pub_key: String) -> Result<(), UnifiedError> { + fn run_test(sig_script: String, script_pub_key: String, kip10_enabled: bool) -> Result<(), UnifiedError> { let script_sig = opcodes::parse_short_form(sig_script).map_err(UnifiedError::ScriptBuilderError)?; let script_pub_key = ScriptPublicKey::from_vec(0, opcodes::parse_short_form(script_pub_key).map_err(UnifiedError::ScriptBuilderError)?); @@ -1039,8 +1085,8 @@ mod bitcoind_tests { &populated_tx.entries[0], &reused_values, &sig_cache, - ) - .map_err(UnifiedError::TxScriptError)?; + kip10_enabled, + ); vm.execute().map_err(UnifiedError::TxScriptError) } @@ -1073,6 +1119,7 @@ mod bitcoind_tests { Err(ue) => match ue { UnifiedError::TxScriptError(e) => match e { TxScriptError::NumberTooBig(_) => vec!["UNKNOWN_ERROR"], + TxScriptError::Serialization(_) => vec!["UNKNOWN_ERROR"], TxScriptError::PubKeyFormat => vec!["PUBKEYFORMAT"], TxScriptError::EvalFalse => vec!["EVAL_FALSE"], TxScriptError::EmptyStack => { @@ -1119,22 +1166,47 @@ mod bitcoind_tests { #[test] fn test_bitcoind_tests() { - let file = File::open(Path::new(env!("CARGO_MANIFEST_DIR")).join("test-data").join("script_tests.json")) - .expect("Could not find test file"); - let reader = BufReader::new(file); - - // Read the JSON contents of the file as an instance of `User`. - let tests: Vec = serde_json::from_reader(reader).expect("Failed Parsing {:?}"); - let mut had_errors = 0; - let total_tests = tests.len(); - for row in tests { - if let Err(error) = row.test_row() { - println!("Test: {:?} failed: {:?}", row.clone(), error); - had_errors += 1; + // Script test files are split into two versions to test behavior before and after KIP-10: + // + // - script_tests.json: Tests basic script functionality with KIP-10 disabled (kip10_enabled=false) + // - script_tests-kip10.json: Tests expanded functionality with KIP-10 enabled (kip10_enabled=true) + // + // KIP-10 introduces two major changes: + // + // 1. Support for 8-byte integer arithmetic (previously limited to 4 bytes) + // This enables working with larger numbers in scripts and reduces artificial constraints + // + // 2. Transaction introspection opcodes: + // - OpTxInputCount (0xb3): Get number of inputs + // - OpTxOutputCount (0xb4): Get number of outputs + // - OpTxInputIndex (0xb9): Get current input index + // - OpTxInputAmount (0xbe): Get input amount + // - OpTxInputSpk (0xbf): Get input script public key + // - OpTxOutputAmount (0xc2): Get output amount + // - OpTxOutputSpk (0xc3): Get output script public key + // + // These changes were added to support mutual transactions and auto-compounding addresses. + // When KIP-10 is disabled (pre-activation), the new opcodes will return an InvalidOpcode error + // and arithmetic is limited to 4 bytes. When enabled, scripts gain full access to transaction + // data and 8-byte arithmetic capabilities. + for (file_name, kip10_enabled) in [("script_tests.json", false), ("script_tests-kip10.json", true)] { + let file = + File::open(Path::new(env!("CARGO_MANIFEST_DIR")).join("test-data").join(file_name)).expect("Could not find test file"); + let reader = BufReader::new(file); + + // Read the JSON contents of the file as an instance of `User`. + let tests: Vec = serde_json::from_reader(reader).expect("Failed Parsing {:?}"); + let mut had_errors = 0; + let total_tests = tests.len(); + for row in tests { + if let Err(error) = row.test_row(kip10_enabled) { + println!("Test: {:?} failed: {:?}", row.clone(), error); + had_errors += 1; + } + } + if had_errors > 0 { + panic!("{}/{} json tests failed", had_errors, total_tests) } - } - if had_errors > 0 { - panic!("{}/{} json tests failed", had_errors, total_tests) } } } diff --git a/crypto/txscript/src/opcodes/macros.rs b/crypto/txscript/src/opcodes/macros.rs index c4d161d400..a4b3bfbbf4 100644 --- a/crypto/txscript/src/opcodes/macros.rs +++ b/crypto/txscript/src/opcodes/macros.rs @@ -132,7 +132,11 @@ macro_rules! opcode_list { let mut builder = ScriptBuilder::new(); for token in script.split_whitespace() { if let Ok(value) = token.parse::() { - builder.add_i64(value)?; + if value == i64::MIN { + builder.add_i64_min()?; + } else { + builder.add_i64(value)?; + } } else if let Some(Ok(value)) = token.strip_prefix("0x").and_then(|trimmed| Some(hex::decode(trimmed))) { builder.extend(&value); diff --git a/crypto/txscript/src/opcodes/mod.rs b/crypto/txscript/src/opcodes/mod.rs index f2a92fa0b5..c59bc27d91 100644 --- a/crypto/txscript/src/opcodes/mod.rs +++ b/crypto/txscript/src/opcodes/mod.rs @@ -1,18 +1,20 @@ #[macro_use] mod macros; -use crate::data_stack::{DataStack, OpcodeData}; use crate::{ - ScriptSource, TxScriptEngine, TxScriptError, LOCK_TIME_THRESHOLD, MAX_TX_IN_SEQUENCE_NUM, NO_COST_OPCODE, + data_stack::{DataStack, Kip10I64, OpcodeData}, + ScriptSource, SpkEncoding, TxScriptEngine, TxScriptError, LOCK_TIME_THRESHOLD, MAX_TX_IN_SEQUENCE_NUM, NO_COST_OPCODE, SEQUENCE_LOCK_TIME_DISABLED, SEQUENCE_LOCK_TIME_MASK, }; use blake2b_simd::Params; -use core::cmp::{max, min}; use kaspa_consensus_core::hashing::sighash::SigHashReusedValues; use kaspa_consensus_core::hashing::sighash_type::SigHashType; use kaspa_consensus_core::tx::VerifiableTransaction; use sha2::{Digest, Sha256}; -use std::fmt::{Debug, Formatter}; +use std::{ + fmt::{Debug, Formatter}, + num::TryFromIntError, +}; /// First value in the range formed by the "small integer" Op# opcodes pub const OP_SMALL_INT_MIN_VAL: u8 = 1; @@ -210,10 +212,30 @@ fn push_number( number: i64, vm: &mut TxScriptEngine, ) -> OpCodeResult { - vm.dstack.push_item(number); + vm.dstack.push_item(number)?; Ok(()) } +/// This macro helps to avoid code duplication in numeric opcodes where the only difference +/// between KIP10_ENABLED and disabled states is the numeric type used (Kip10I64 vs i64). +/// KIP10I64 deserializator supports 8-byte integers +macro_rules! numeric_op { + ($vm: expr, $pattern: pat, $count: expr, $block: expr) => { + if $vm.kip10_enabled { + let $pattern: [Kip10I64; $count] = $vm.dstack.pop_items()?; + let r = $block; + $vm.dstack.push_item(r)?; + Ok(()) + } else { + let $pattern: [i64; $count] = $vm.dstack.pop_items()?; + #[allow(clippy::useless_conversion)] + let r = $block; + $vm.dstack.push_item(r)?; + Ok(()) + } + }; +} + /* The following is the implementation and metadata of all opcodes. Each opcode has unique number (and template system makes it impossible to use two opcodes), length specification, @@ -521,7 +543,7 @@ opcode_list! { opcode OpSize<0x82, 1>(self, vm) { match vm.dstack.last() { Some(last) => { - vm.dstack.push_item(i64::try_from(last.len()).map_err(|e| TxScriptError::NumberTooBig(e.to_string()))?); + vm.dstack.push_item(i64::try_from(last.len()).map_err(|e| TxScriptError::NumberTooBig(e.to_string()))?)?; Ok(()) }, None => Err(TxScriptError::InvalidStackOperation(1, 0)) @@ -566,54 +588,38 @@ opcode_list! { // Numeric related opcodes. opcode Op1Add<0x8b, 1>(self, vm) { - let [value]: [i64; 1] = vm.dstack.pop_items()?; - vm.dstack.push_item(value + 1); - Ok(()) + numeric_op!(vm, [value], 1, value.checked_add(1).ok_or_else(|| TxScriptError::NumberTooBig("Result of addition exceeds 64-bit signed integer range".to_string()))?) } opcode Op1Sub<0x8c, 1>(self, vm) { - let [value]: [i64; 1] = vm.dstack.pop_items()?; - vm.dstack.push_item(value - 1); - Ok(()) + numeric_op!(vm, [value], 1, value.checked_sub(1).ok_or_else(|| TxScriptError::NumberTooBig("Result of subtraction exceeds 64-bit signed integer range".to_string()))?) } opcode Op2Mul<0x8d, 1>(self, vm) Err(TxScriptError::OpcodeDisabled(format!("{self:?}"))) opcode Op2Div<0x8e, 1>(self, vm) Err(TxScriptError::OpcodeDisabled(format!("{self:?}"))) opcode OpNegate<0x8f, 1>(self, vm) { - let [value]: [i64; 1] = vm.dstack.pop_items()?; - vm.dstack.push_item(-value); - Ok(()) + numeric_op!(vm, [value], 1, value.checked_neg().ok_or_else(|| TxScriptError::NumberTooBig("Negation result exceeds 64-bit signed integer range".to_string()))?) } opcode OpAbs<0x90, 1>(self, vm) { - let [m]: [i64; 1] = vm.dstack.pop_items()?; - vm.dstack.push_item(m.abs()); - Ok(()) + numeric_op!(vm, [value], 1, value.checked_abs().ok_or_else(|| TxScriptError::NumberTooBig("Absolute value exceeds 64-bit signed integer range".to_string()))?) } opcode OpNot<0x91, 1>(self, vm) { - let [m]: [i64; 1] = vm.dstack.pop_items()?; - vm.dstack.push_item((m == 0) as i64); - Ok(()) + numeric_op!(vm, [m], 1, (m == 0) as i64) } opcode Op0NotEqual<0x92, 1>(self, vm) { - let [m]: [i64; 1] = vm.dstack.pop_items()?; - vm.dstack.push_item((m != 0) as i64 ); - Ok(()) + numeric_op!(vm, [m], 1, (m != 0) as i64) } opcode OpAdd<0x93, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - vm.dstack.push_item(a+b); - Ok(()) + numeric_op!(vm, [a,b], 2, a.checked_add(b.into()).ok_or_else(|| TxScriptError::NumberTooBig("Sum exceeds 64-bit signed integer range".to_string()))?) } opcode OpSub<0x94, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - vm.dstack.push_item(a-b); - Ok(()) + numeric_op!(vm, [a,b], 2, a.checked_sub(b.into()).ok_or_else(|| TxScriptError::NumberTooBig("Difference exceeds 64-bit signed integer range".to_string()))?) } opcode OpMul<0x95, 1>(self, vm) Err(TxScriptError::OpcodeDisabled(format!("{self:?}"))) @@ -623,77 +629,63 @@ opcode_list! { opcode OpRShift<0x99, 1>(self, vm) Err(TxScriptError::OpcodeDisabled(format!("{self:?}"))) opcode OpBoolAnd<0x9a, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - vm.dstack.push_item(((a != 0) && (b != 0)) as i64); - Ok(()) + numeric_op!(vm, [a,b], 2, ((a != 0) && (b != 0)) as i64) } opcode OpBoolOr<0x9b, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - vm.dstack.push_item(((a != 0) || (b != 0)) as i64); - Ok(()) + numeric_op!(vm, [a,b], 2, ((a != 0) || (b != 0)) as i64) } opcode OpNumEqual<0x9c, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - vm.dstack.push_item((a == b) as i64); - Ok(()) + numeric_op!(vm, [a,b], 2, (a == b) as i64) } opcode OpNumEqualVerify<0x9d, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - match a == b { - true => Ok(()), - false => Err(TxScriptError::VerifyError) + if vm.kip10_enabled { + let [a,b]: [Kip10I64; 2] = vm.dstack.pop_items()?; + match a == b { + true => Ok(()), + false => Err(TxScriptError::VerifyError) + } + } else { + let [a,b]: [i64; 2] = vm.dstack.pop_items()?; + match a == b { + true => Ok(()), + false => Err(TxScriptError::VerifyError) + } } } opcode OpNumNotEqual<0x9e, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - vm.dstack.push_item((a != b) as i64); - Ok(()) + numeric_op!(vm, [a, b], 2, (a != b) as i64) } opcode OpLessThan<0x9f, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - vm.dstack.push_item((a < b) as i64); - Ok(()) + numeric_op!(vm, [a, b], 2, (a < b) as i64) } opcode OpGreaterThan<0xa0, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - vm.dstack.push_item((a > b) as i64); - Ok(()) + numeric_op!(vm, [a, b], 2, (a > b) as i64) } opcode OpLessThanOrEqual<0xa1, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - vm.dstack.push_item((a <= b) as i64); - Ok(()) + numeric_op!(vm, [a, b], 2, (a <= b) as i64) } opcode OpGreaterThanOrEqual<0xa2, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - vm.dstack.push_item((a >= b) as i64); - Ok(()) + numeric_op!(vm, [a, b], 2, (a >= b) as i64) } opcode OpMin<0xa3, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - vm.dstack.push_item(min(a,b)); - Ok(()) + numeric_op!(vm, [a, b], 2, a.min(b)) } opcode OpMax<0xa4, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - vm.dstack.push_item(max(a,b)); - Ok(()) + numeric_op!(vm, [a, b], 2, a.max(b)) } opcode OpWithin<0xa5, 1>(self, vm) { - let [x,l,u]: [i64; 3] = vm.dstack.pop_items()?; - vm.dstack.push_item((x >= l && x < u) as i64); - Ok(()) + numeric_op!(vm, [x,l,u], 3, (x >= l && x < u) as i64) } // Undefined opcodes. @@ -729,7 +721,7 @@ opcode_list! { let hash_type = SigHashType::from_u8(typ).map_err(|e| TxScriptError::InvalidSigHashType(typ))?; match vm.check_ecdsa_signature(hash_type, key.as_slice(), sig.as_slice()) { Ok(valid) => { - vm.dstack.push_item(valid); + vm.dstack.push_item(valid)?; Ok(()) }, Err(e) => { @@ -738,7 +730,7 @@ opcode_list! { } } None => { - vm.dstack.push_item(false); + vm.dstack.push_item(false)?; Ok(()) } } @@ -752,7 +744,7 @@ opcode_list! { let hash_type = SigHashType::from_u8(typ).map_err(|e| TxScriptError::InvalidSigHashType(typ))?; match vm.check_schnorr_signature(hash_type, key.as_slice(), sig.as_slice()) { Ok(valid) => { - vm.dstack.push_item(valid); + vm.dstack.push_item(valid)?; Ok(()) }, Err(e) => { @@ -761,7 +753,7 @@ opcode_list! { } } None => { - vm.dstack.push_item(false); + vm.dstack.push_item(false)?; Ok(()) } } @@ -884,25 +876,125 @@ opcode_list! { } } - // Undefined opcodes. - opcode OpUnknown178<0xb2, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown179<0xb3, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown180<0xb4, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown181<0xb5, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown182<0xb6, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown183<0xb7, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown184<0xb8, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown185<0xb9, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown186<0xba, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown187<0xbb, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown188<0xbc, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown189<0xbd, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown190<0xbe, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown191<0xbf, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown192<0xc0, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown193<0xc1, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown194<0xc2, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown195<0xc3, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) + // Introspection opcodes + // Transaction level opcodes (following Transaction struct field order) + opcode OpTxVersion<0xb2, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) + opcode OpTxInputCount<0xb3, 1>(self, vm) { + if vm.kip10_enabled { + match vm.script_source { + ScriptSource::TxInput{tx, ..} => { + push_number(tx.inputs().len() as i64, vm) + }, + _ => Err(TxScriptError::InvalidSource("OpInputCount only applies to transaction inputs".to_string())) + } + } else { + Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) + } + } + opcode OpTxOutputCount<0xb4, 1>(self, vm) { + if vm.kip10_enabled { + match vm.script_source { + ScriptSource::TxInput{tx, ..} => { + push_number(tx.outputs().len() as i64, vm) + }, + _ => Err(TxScriptError::InvalidSource("OpOutputCount only applies to transaction inputs".to_string())) + } + } else { + Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) + } + } + opcode OpTxLockTime<0xb5, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) + opcode OpTxSubnetId<0xb6, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) + opcode OpTxGas<0xb7, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) + opcode OpTxPayload<0xb8, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) + // Input related opcodes (following TransactionInput struct field order) + opcode OpTxInputIndex<0xb9, 1>(self, vm) { + if vm.kip10_enabled { + match vm.script_source { + ScriptSource::TxInput{idx, ..} => { + push_number(idx as i64, vm) + }, + _ => Err(TxScriptError::InvalidSource("OpInputIndex only applies to transaction inputs".to_string())) + } + } else { + Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) + } + } + opcode OpOutpointTxId<0xba, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) + opcode OpOutpointIndex<0xbb, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) + opcode OpTxInputScriptSig<0xbc, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) + opcode OpTxInputSeq<0xbd, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) + // UTXO related opcodes (following UtxoEntry struct field order) + opcode OpTxInputAmount<0xbe, 1>(self, vm) { + if vm.kip10_enabled { + match vm.script_source { + ScriptSource::TxInput{tx, ..} => { + let [idx]: [i32; 1] = vm.dstack.pop_items()?; + let utxo = usize::try_from(idx).ok() + .and_then(|idx| tx.utxo(idx)) + .ok_or_else(|| TxScriptError::InvalidInputIndex(idx, tx.inputs().len()))?; + push_number(utxo.amount.try_into().map_err(|e: TryFromIntError| TxScriptError::NumberTooBig(e.to_string()))?, vm) + }, + _ => Err(TxScriptError::InvalidSource("OpInputAmount only applies to transaction inputs".to_string())) + } + } else { + Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) + } + } + opcode OpTxInputSpk<0xbf, 1>(self, vm) { + if vm.kip10_enabled { + match vm.script_source { + ScriptSource::TxInput{tx, ..} => { + let [idx]: [i32; 1] = vm.dstack.pop_items()?; + let utxo = usize::try_from(idx).ok() + .and_then(|idx| tx.utxo(idx)) + .ok_or_else(|| TxScriptError::InvalidInputIndex(idx, tx.inputs().len()))?; + vm.dstack.push(utxo.script_public_key.to_bytes()); + Ok(()) + }, + _ => Err(TxScriptError::InvalidSource("OpInputSpk only applies to transaction inputs".to_string())) + } + } else { + Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) + } + } + opcode OpTxInputBlockDaaScore<0xc0, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) + opcode OpTxInputIsCoinbase<0xc1, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) + // Output related opcodes (following TransactionOutput struct field order) + opcode OpTxOutputAmount<0xc2, 1>(self, vm) { + if vm.kip10_enabled { + match vm.script_source { + ScriptSource::TxInput{tx, ..} => { + let [idx]: [i32; 1] = vm.dstack.pop_items()?; + let output = usize::try_from(idx).ok() + .and_then(|idx| tx.outputs().get(idx)) + .ok_or_else(|| TxScriptError::InvalidOutputIndex(idx, tx.inputs().len()))?; + push_number(output.value.try_into().map_err(|e: TryFromIntError| TxScriptError::NumberTooBig(e.to_string()))?, vm) + }, + _ => Err(TxScriptError::InvalidSource("OpOutputAmount only applies to transaction inputs".to_string())) + } + } else { + Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) + } + } + opcode OpTxOutputSpk<0xc3, 1>(self, vm) { + if vm.kip10_enabled { + match vm.script_source { + ScriptSource::TxInput{tx, ..} => { + let [idx]: [i32; 1] = vm.dstack.pop_items()?; + let output = usize::try_from(idx).ok() + .and_then(|idx| tx.outputs().get(idx)) + .ok_or_else(|| TxScriptError::InvalidOutputIndex(idx, tx.inputs().len()))?; + vm.dstack.push(output.script_public_key.to_bytes()); + Ok(()) + }, + _ => Err(TxScriptError::InvalidSource("OpOutputSpk only applies to transaction inputs".to_string())) + } + } else { + Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) + } + } + // Undefined opcodes opcode OpUnknown196<0xc4, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) opcode OpUnknown197<0xc5, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) opcode OpUnknown198<0xc6, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) @@ -1009,10 +1101,12 @@ mod test { let cache = Cache::new(10_000); let reused_values = SigHashReusedValuesUnsync::new(); for TestCase { init, code, dstack } in tests { - let mut vm = TxScriptEngine::new(&reused_values, &cache); - vm.dstack = init; - code.execute(&mut vm).unwrap_or_else(|_| panic!("Opcode {} should not fail", code.value())); - assert_eq!(*vm.dstack, dstack, "OpCode {} Pushed wrong value", code.value()); + [false, true].into_iter().for_each(|kip10_enabled| { + let mut vm = TxScriptEngine::new(&reused_values, &cache, kip10_enabled); + vm.dstack = init.clone(); + code.execute(&mut vm).unwrap_or_else(|_| panic!("Opcode {} should not fail", code.value())); + assert_eq!(*vm.dstack, dstack, "OpCode {} Pushed wrong value", code.value()); + }); } } @@ -1020,16 +1114,18 @@ mod test { let cache = Cache::new(10_000); let reused_values = SigHashReusedValuesUnsync::new(); for ErrorTestCase { init, code, error } in tests { - let mut vm = TxScriptEngine::new(&reused_values, &cache); - vm.dstack.clone_from(&init); - assert_eq!( - code.execute(&mut vm) - .expect_err(format!("Opcode {} should have errored (init: {:?})", code.value(), init.clone()).as_str()), - error, - "Opcode {} returned wrong error {:?}", - code.value(), - init - ); + [false, true].into_iter().for_each(|kip10_enabled| { + let mut vm = TxScriptEngine::new(&reused_values, &cache, kip10_enabled); + vm.dstack.clone_from(&init); + assert_eq!( + code.execute(&mut vm) + .expect_err(format!("Opcode {} should have errored (init: {:?})", code.value(), init.clone()).as_str()), + error, + "Opcode {} returned wrong error {:?}", + code.value(), + init + ); + }); } } @@ -1055,7 +1151,7 @@ mod test { let cache = Cache::new(10_000); let reused_values = SigHashReusedValuesUnsync::new(); - let mut vm = TxScriptEngine::new(&reused_values, &cache); + let mut vm = TxScriptEngine::new(&reused_values, &cache, false); for pop in tests { match pop.execute(&mut vm) { @@ -1074,11 +1170,22 @@ mod test { opcodes::OpVerNotIf::empty().expect("Should accept empty"), opcodes::OpReserved1::empty().expect("Should accept empty"), opcodes::OpReserved2::empty().expect("Should accept empty"), + opcodes::OpTxVersion::empty().expect("Should accept empty"), + opcodes::OpTxLockTime::empty().expect("Should accept empty"), + opcodes::OpTxSubnetId::empty().expect("Should accept empty"), + opcodes::OpTxGas::empty().expect("Should accept empty"), + opcodes::OpTxPayload::empty().expect("Should accept empty"), + opcodes::OpOutpointTxId::empty().expect("Should accept empty"), + opcodes::OpOutpointIndex::empty().expect("Should accept empty"), + opcodes::OpTxInputScriptSig::empty().expect("Should accept empty"), + opcodes::OpTxInputSeq::empty().expect("Should accept empty"), + opcodes::OpTxInputBlockDaaScore::empty().expect("Should accept empty"), + opcodes::OpTxInputIsCoinbase::empty().expect("Should accept empty"), ]; let cache = Cache::new(10_000); let reused_values = SigHashReusedValuesUnsync::new(); - let mut vm = TxScriptEngine::new(&reused_values, &cache); + let mut vm = TxScriptEngine::new(&reused_values, &cache, false); for pop in tests { match pop.execute(&mut vm) { @@ -1093,24 +1200,6 @@ mod test { let tests: Vec>> = vec![ opcodes::OpUnknown166::empty().expect("Should accept empty"), opcodes::OpUnknown167::empty().expect("Should accept empty"), - opcodes::OpUnknown178::empty().expect("Should accept empty"), - opcodes::OpUnknown179::empty().expect("Should accept empty"), - opcodes::OpUnknown180::empty().expect("Should accept empty"), - opcodes::OpUnknown181::empty().expect("Should accept empty"), - opcodes::OpUnknown182::empty().expect("Should accept empty"), - opcodes::OpUnknown183::empty().expect("Should accept empty"), - opcodes::OpUnknown184::empty().expect("Should accept empty"), - opcodes::OpUnknown185::empty().expect("Should accept empty"), - opcodes::OpUnknown186::empty().expect("Should accept empty"), - opcodes::OpUnknown187::empty().expect("Should accept empty"), - opcodes::OpUnknown188::empty().expect("Should accept empty"), - opcodes::OpUnknown189::empty().expect("Should accept empty"), - opcodes::OpUnknown190::empty().expect("Should accept empty"), - opcodes::OpUnknown191::empty().expect("Should accept empty"), - opcodes::OpUnknown192::empty().expect("Should accept empty"), - opcodes::OpUnknown193::empty().expect("Should accept empty"), - opcodes::OpUnknown194::empty().expect("Should accept empty"), - opcodes::OpUnknown195::empty().expect("Should accept empty"), opcodes::OpUnknown196::empty().expect("Should accept empty"), opcodes::OpUnknown197::empty().expect("Should accept empty"), opcodes::OpUnknown198::empty().expect("Should accept empty"), @@ -1169,7 +1258,7 @@ mod test { let cache = Cache::new(10_000); let reused_values = SigHashReusedValuesUnsync::new(); - let mut vm = TxScriptEngine::new(&reused_values, &cache); + let mut vm = TxScriptEngine::new(&reused_values, &cache, false); for pop in tests { match pop.execute(&mut vm) { @@ -2718,6 +2807,9 @@ mod test { fn populated_input(&self, _index: usize) -> (&TransactionInput, &UtxoEntry) { unimplemented!() } + fn utxo(&self, _index: usize) -> Option<&UtxoEntry> { + unimplemented!() + } } fn make_mock_transaction(lock_time: u64) -> (VerifiableTransactionMock, TransactionInput, UtxoEntry) { @@ -2761,8 +2853,7 @@ mod test { ] { let mut tx = base_tx.clone(); tx.0.lock_time = tx_lock_time; - let mut vm = TxScriptEngine::from_transaction_input(&tx, &input, 0, &utxo_entry, &reused_values, &sig_cache) - .expect("Shouldn't fail"); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &input, 0, &utxo_entry, &reused_values, &sig_cache, false); vm.dstack = vec![lock_time.clone()]; match code.execute(&mut vm) { // Message is based on the should_fail values @@ -2804,8 +2895,7 @@ mod test { ] { let mut input = base_input.clone(); input.sequence = tx_sequence; - let mut vm = TxScriptEngine::from_transaction_input(&tx, &input, 0, &utxo_entry, &reused_values, &sig_cache) - .expect("Shouldn't fail"); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &input, 0, &utxo_entry, &reused_values, &sig_cache, false); vm.dstack = vec![sequence.clone()]; match code.execute(&mut vm) { // Message is based on the should_fail values @@ -2907,4 +2997,849 @@ mod test { TestCase { code: opcodes::OpIfDup::empty().expect("Should accept empty"), init: vec![vec![]], dstack: vec![vec![]] }, ]) } + + mod kip10 { + use super::*; + use crate::{ + data_stack::{DataStack, OpcodeData}, + opcodes::{codes::*, push_number}, + pay_to_script_hash_script, + script_builder::ScriptBuilder, + SpkEncoding, + }; + use kaspa_consensus_core::tx::MutableTransaction; + + #[derive(Clone, Debug)] + struct Kip10Mock { + spk: ScriptPublicKey, + amount: u64, + } + + fn create_mock_spk(value: u8) -> ScriptPublicKey { + let pub_key = vec![value; 32]; + let addr = Address::new(Prefix::Testnet, Version::PubKey, &pub_key); + pay_to_address_script(&addr) + } + + fn kip_10_tx_mock(inputs: Vec, outputs: Vec) -> (Transaction, Vec) { + let dummy_prev_out = TransactionOutpoint::new(kaspa_hashes::Hash::from_u64_word(1), 1); + let dummy_sig_script = vec![0u8; 65]; + let (utxos, tx_inputs) = inputs + .into_iter() + .map(|Kip10Mock { spk, amount }| { + (UtxoEntry::new(amount, spk, 0, false), TransactionInput::new(dummy_prev_out, dummy_sig_script.clone(), 10, 0)) + }) + .unzip(); + + let tx_out = outputs.into_iter().map(|Kip10Mock { spk, amount }| TransactionOutput::new(amount, spk)); + + let tx = Transaction::new(TX_VERSION + 1, tx_inputs, tx_out.collect(), 0, SUBNETWORK_ID_NATIVE, 0, vec![]); + (tx, utxos) + } + + #[derive(Debug)] + struct TestGroup { + name: &'static str, + kip10_enabled: bool, + test_cases: Vec, + } + + #[derive(Debug)] + enum Operation { + InputSpk, + OutputSpk, + InputAmount, + OutputAmount, + } + + #[derive(Debug)] + enum TestCase { + Successful { operation: Operation, index: i64, expected_result: ExpectedResult }, + Incorrect { operation: Operation, index: Option, expected_error: TxScriptError }, + } + + #[derive(Debug)] + struct ExpectedResult { + expected_spk: Option>, + expected_amount: Option>, + } + + fn execute_test_group(group: &TestGroup) { + let input_spk1 = create_mock_spk(1); + let input_spk2 = create_mock_spk(2); + let output_spk1 = create_mock_spk(3); + let output_spk2 = create_mock_spk(4); + + let inputs = + vec![Kip10Mock { spk: input_spk1.clone(), amount: 1111 }, Kip10Mock { spk: input_spk2.clone(), amount: 2222 }]; + let outputs = + vec![Kip10Mock { spk: output_spk1.clone(), amount: 3333 }, Kip10Mock { spk: output_spk2.clone(), amount: 4444 }]; + + let (tx, utxo_entries) = kip_10_tx_mock(inputs, outputs); + let tx = PopulatedTransaction::new(&tx, utxo_entries); + let sig_cache = Cache::new(10_000); + let reused_values = SigHashReusedValuesUnsync::new(); + + for current_idx in 0..tx.inputs().len() { + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[current_idx], + current_idx, + tx.utxo(current_idx).unwrap(), + &reused_values, + &sig_cache, + group.kip10_enabled, + ); + + // Check input index opcode first + let op_input_idx = opcodes::OpTxInputIndex::empty().expect("Should accept empty"); + if !group.kip10_enabled { + assert!(matches!(op_input_idx.execute(&mut vm), Err(TxScriptError::InvalidOpcode(_)))); + } else { + let mut expected = vm.dstack.clone(); + expected.push_item(current_idx as i64).unwrap(); + op_input_idx.execute(&mut vm).unwrap(); + assert_eq!(vm.dstack, expected); + vm.dstack.clear(); + } + + // Prepare opcodes + let op_input_spk = opcodes::OpTxInputSpk::empty().expect("Should accept empty"); + let op_output_spk = opcodes::OpTxOutputSpk::empty().expect("Should accept empty"); + let op_input_amount = opcodes::OpTxInputAmount::empty().expect("Should accept empty"); + let op_output_amount = opcodes::OpTxOutputAmount::empty().expect("Should accept empty"); + + // Execute each test case + for test_case in &group.test_cases { + match test_case { + TestCase::Successful { operation, index, expected_result } => { + push_number(*index, &mut vm).unwrap(); + let result = match operation { + Operation::InputSpk => op_input_spk.execute(&mut vm), + Operation::OutputSpk => op_output_spk.execute(&mut vm), + Operation::InputAmount => op_input_amount.execute(&mut vm), + Operation::OutputAmount => op_output_amount.execute(&mut vm), + }; + assert!(result.is_ok()); + + // Check the result matches expectations + if let Some(ref expected_spk) = expected_result.expected_spk { + assert_eq!(vm.dstack, vec![expected_spk.clone()]); + } + if let Some(ref expected_amount) = expected_result.expected_amount { + assert_eq!(vm.dstack, vec![expected_amount.clone()]); + } + vm.dstack.clear(); + } + TestCase::Incorrect { operation, index, expected_error } => { + if let Some(idx) = index { + push_number(*idx, &mut vm).unwrap(); + } + + let result = match operation { + Operation::InputSpk => op_input_spk.execute(&mut vm), + Operation::OutputSpk => op_output_spk.execute(&mut vm), + Operation::InputAmount => op_input_amount.execute(&mut vm), + Operation::OutputAmount => op_output_amount.execute(&mut vm), + }; + + assert!( + matches!(result, Err(ref e) if std::mem::discriminant(e) == std::mem::discriminant(expected_error)) + ); + vm.dstack.clear(); + } + } + } + } + } + + #[test] + fn test_unary_introspection_ops() { + let test_groups = vec![ + TestGroup { + name: "KIP-10 disabled", + kip10_enabled: false, + test_cases: vec![ + TestCase::Incorrect { + operation: Operation::InputSpk, + index: Some(0), + expected_error: TxScriptError::InvalidOpcode("Invalid opcode".to_string()), + }, + TestCase::Incorrect { + operation: Operation::OutputSpk, + index: Some(0), + expected_error: TxScriptError::InvalidOpcode("Invalid opcode".to_string()), + }, + TestCase::Incorrect { + operation: Operation::InputAmount, + index: Some(0), + expected_error: TxScriptError::InvalidOpcode("Invalid opcode".to_string()), + }, + TestCase::Incorrect { + operation: Operation::OutputAmount, + index: Some(0), + expected_error: TxScriptError::InvalidOpcode("Invalid opcode".to_string()), + }, + ], + }, + TestGroup { + name: "Valid input indices", + kip10_enabled: true, + test_cases: vec![ + TestCase::Successful { + operation: Operation::InputSpk, + index: 0, + expected_result: ExpectedResult { + expected_spk: Some(create_mock_spk(1).to_bytes()), + expected_amount: None, + }, + }, + TestCase::Successful { + operation: Operation::InputSpk, + index: 1, + expected_result: ExpectedResult { + expected_spk: Some(create_mock_spk(2).to_bytes()), + expected_amount: None, + }, + }, + TestCase::Successful { + operation: Operation::InputAmount, + index: 0, + expected_result: ExpectedResult { + expected_spk: None, + expected_amount: Some(OpcodeData::::serialize(&1111).unwrap()), + }, + }, + TestCase::Successful { + operation: Operation::InputAmount, + index: 1, + expected_result: ExpectedResult { + expected_spk: None, + expected_amount: Some(OpcodeData::::serialize(&2222).unwrap()), + }, + }, + ], + }, + TestGroup { + name: "Valid output indices", + kip10_enabled: true, + test_cases: vec![ + TestCase::Successful { + operation: Operation::OutputSpk, + index: 0, + expected_result: ExpectedResult { + expected_spk: Some(create_mock_spk(3).to_bytes()), + expected_amount: None, + }, + }, + TestCase::Successful { + operation: Operation::OutputSpk, + index: 1, + expected_result: ExpectedResult { + expected_spk: Some(create_mock_spk(4).to_bytes()), + expected_amount: None, + }, + }, + TestCase::Successful { + operation: Operation::OutputAmount, + index: 0, + expected_result: ExpectedResult { + expected_spk: None, + expected_amount: Some(OpcodeData::::serialize(&3333).unwrap()), + }, + }, + TestCase::Successful { + operation: Operation::OutputAmount, + index: 1, + expected_result: ExpectedResult { + expected_spk: None, + expected_amount: Some(OpcodeData::::serialize(&4444).unwrap()), + }, + }, + ], + }, + TestGroup { + name: "Error cases", + kip10_enabled: true, + test_cases: vec![ + TestCase::Incorrect { + operation: Operation::InputAmount, + index: None, + expected_error: TxScriptError::InvalidStackOperation(1, 0), + }, + TestCase::Incorrect { + operation: Operation::InputAmount, + index: Some(-1), + expected_error: TxScriptError::InvalidInputIndex(-1, 2), + }, + TestCase::Incorrect { + operation: Operation::InputAmount, + index: Some(2), + expected_error: TxScriptError::InvalidInputIndex(2, 2), + }, + TestCase::Incorrect { + operation: Operation::OutputAmount, + index: None, + expected_error: TxScriptError::InvalidStackOperation(1, 0), + }, + TestCase::Incorrect { + operation: Operation::OutputAmount, + index: Some(-1), + expected_error: TxScriptError::InvalidOutputIndex(-1, 2), + }, + TestCase::Incorrect { + operation: Operation::OutputAmount, + index: Some(2), + expected_error: TxScriptError::InvalidOutputIndex(2, 2), + }, + ], + }, + ]; + + for group in test_groups { + println!("Running test group: {}", group.name); + execute_test_group(&group); + } + } + fn create_mock_tx(input_count: usize, output_count: usize) -> (Transaction, Vec) { + let dummy_prev_out = TransactionOutpoint::new(kaspa_hashes::Hash::from_u64_word(1), 1); + let dummy_sig_script = vec![0u8; 65]; + + // Create inputs with different SPKs and amounts + let inputs: Vec = + (0..input_count).map(|i| Kip10Mock { spk: create_mock_spk(i as u8), amount: 1000 + i as u64 }).collect(); + + // Create outputs with different SPKs and amounts + let outputs: Vec = + (0..output_count).map(|i| Kip10Mock { spk: create_mock_spk((100 + i) as u8), amount: 2000 + i as u64 }).collect(); + + let (utxos, tx_inputs): (Vec<_>, Vec<_>) = inputs + .into_iter() + .map(|Kip10Mock { spk, amount }| { + (UtxoEntry::new(amount, spk, 0, false), TransactionInput::new(dummy_prev_out, dummy_sig_script.clone(), 10, 0)) + }) + .unzip(); + + let tx_outputs: Vec<_> = + outputs.into_iter().map(|Kip10Mock { spk, amount }| TransactionOutput::new(amount, spk)).collect(); + + let tx = Transaction::new(TX_VERSION + 1, tx_inputs, tx_outputs, 0, SUBNETWORK_ID_NATIVE, 0, vec![]); + + (tx, utxos) + } + + #[test] + fn test_op_input_output_count() { + // Test cases with different input/output combinations + let test_cases = vec![ + (1, 0), // Minimum inputs, no outputs + (1, 1), // Minimum inputs, one output + (1, 2), // Minimum inputs, multiple outputs + (2, 1), // Multiple inputs, one output + (3, 2), // Multiple inputs, multiple outputs + (5, 3), // More inputs than outputs + (2, 4), // More outputs than inputs + ]; + + for (input_count, output_count) in test_cases { + let (tx, utxo_entries) = create_mock_tx(input_count, output_count); + let tx = PopulatedTransaction::new(&tx, utxo_entries); + let sig_cache = Cache::new(10_000); + let reused_values = SigHashReusedValuesUnsync::new(); + + // Test with KIP-10 enabled and disabled + for kip10_enabled in [true, false] { + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[0], // Use first input + 0, + tx.utxo(0).unwrap(), + &reused_values, + &sig_cache, + kip10_enabled, + ); + + let op_input_count = opcodes::OpTxInputCount::empty().expect("Should accept empty"); + let op_output_count = opcodes::OpTxOutputCount::empty().expect("Should accept empty"); + + if kip10_enabled { + // Test input count + op_input_count.execute(&mut vm).unwrap(); + assert_eq!( + vm.dstack, + vec![ as OpcodeData>::serialize(&(input_count as i64)).unwrap()], + "Input count mismatch for {} inputs", + input_count + ); + vm.dstack.clear(); + + // Test output count + op_output_count.execute(&mut vm).unwrap(); + assert_eq!( + vm.dstack, + vec![ as OpcodeData>::serialize(&(output_count as i64)).unwrap()], + "Output count mismatch for {} outputs", + output_count + ); + vm.dstack.clear(); + } else { + // Test that operations fail when KIP-10 is disabled + assert!( + matches!(op_input_count.execute(&mut vm), Err(TxScriptError::InvalidOpcode(_))), + "OpInputCount should fail when KIP-10 is disabled" + ); + assert!( + matches!(op_output_count.execute(&mut vm), Err(TxScriptError::InvalidOpcode(_))), + "OpOutputCount should fail when KIP-10 is disabled" + ); + } + } + } + } + + #[test] + fn test_output_amount() { + // Create script: 0 OP_OUTPUTAMOUNT 100 EQUAL + let redeem_script = ScriptBuilder::new() + .add_op(Op0) + .unwrap() + .add_op(OpTxOutputAmount) + .unwrap() + .add_i64(100) + .unwrap() + .add_op(OpEqual) + .unwrap() + .drain(); + + let spk = pay_to_script_hash_script(&redeem_script); + + // Create transaction with output amount 100 + let input_mock = Kip10Mock { spk: spk.clone(), amount: 200 }; + let output_mock = Kip10Mock { spk: create_mock_spk(1), amount: 100 }; + + let (tx, utxo_entries) = kip_10_tx_mock(vec![input_mock.clone()], vec![output_mock]); + let mut tx = MutableTransaction::with_entries(tx, utxo_entries); + + // Set signature script to push redeem script + tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let sig_cache = Cache::new(10_000); + let reused_values = SigHashReusedValuesUnsync::new(); + + // Test success case + { + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[0], + 0, + tx.utxo(0).unwrap(), + &reused_values, + &sig_cache, + true, + ); + + assert_eq!(vm.execute(), Ok(())); + } + + // Test failure case with wrong amount + { + let output_mock = Kip10Mock { + spk: create_mock_spk(1), + amount: 99, // Wrong amount + }; + let (tx, utxo_entries) = kip_10_tx_mock(vec![input_mock.clone()], vec![output_mock]); + let mut tx = MutableTransaction::with_entries(tx, utxo_entries); + tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[0], + 0, + tx.utxo(0).unwrap(), + &reused_values, + &sig_cache, + true, + ); + + assert_eq!(vm.execute(), Err(TxScriptError::EvalFalse)); + } + } + + #[test] + fn test_input_amount() { + // Create script: 0 OP_INPUTAMOUNT 200 EQUAL + let redeem_script = ScriptBuilder::new() + .add_op(Op0) + .unwrap() + .add_op(OpTxInputAmount) + .unwrap() + .add_i64(200) + .unwrap() + .add_op(OpEqual) + .unwrap() + .drain(); + let sig_cache = Cache::new(10_000); + let reused_values = SigHashReusedValuesUnsync::new(); + let spk = pay_to_script_hash_script(&redeem_script); + + // Test success case + { + let input_mock = Kip10Mock { spk: spk.clone(), amount: 200 }; + let output_mock = Kip10Mock { spk: create_mock_spk(1), amount: 100 }; + + let (tx, utxo_entries) = kip_10_tx_mock(vec![input_mock], vec![output_mock]); + let mut tx = MutableTransaction::with_entries(tx, utxo_entries); + tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[0], + 0, + tx.utxo(0).unwrap(), + &reused_values, + &sig_cache, + true, + ); + + assert_eq!(vm.execute(), Ok(())); + } + + // Test failure case + { + let input_mock = Kip10Mock { + spk: spk.clone(), + amount: 199, // Wrong amount + }; + let output_mock = Kip10Mock { spk: create_mock_spk(1), amount: 100 }; + + let (tx, utxo_entries) = kip_10_tx_mock(vec![input_mock], vec![output_mock]); + let mut tx = MutableTransaction::with_entries(tx, utxo_entries); + tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[0], + 0, + tx.utxo(0).unwrap(), + &reused_values, + &sig_cache, + true, + ); + + assert_eq!(vm.execute(), Err(TxScriptError::EvalFalse)); + } + } + + #[test] + fn test_input_spk_basic() { + let sig_cache = Cache::new(10_000); + let reused_values = SigHashReusedValuesUnsync::new(); + + // Create script: 0 OP_INPUTSPK OpNop + // Just verify that OpInputSpk pushes something onto stack + let redeem_script = ScriptBuilder::new().add_ops(&[Op0, OpTxInputSpk, OpNop]).unwrap().drain(); + let spk = pay_to_script_hash_script(&redeem_script); + + let (tx, utxo_entries) = kip_10_tx_mock(vec![Kip10Mock { spk, amount: 100 }], vec![]); + let mut tx = MutableTransaction::with_entries(tx, utxo_entries); + tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = + TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, tx.utxo(0).unwrap(), &reused_values, &sig_cache, true); + + // OpInputSpk should push input's SPK onto stack, making it non-empty + assert_eq!(vm.execute(), Ok(())); + } + + #[test] + fn test_input_spk_different() { + let sig_cache = Cache::new(10_000); + let reused_values = SigHashReusedValuesUnsync::new(); + + // Create script: 0 OP_INPUTSPK 1 OP_INPUTSPK OP_EQUAL OP_NOT + // Verifies that two different inputs have different SPKs + let redeem_script = ScriptBuilder::new().add_ops(&[Op0, OpTxInputSpk, Op1, OpTxInputSpk, OpEqual, OpNot]).unwrap().drain(); + let spk = pay_to_script_hash_script(&redeem_script); + let input_mock1 = Kip10Mock { spk, amount: 100 }; + let input_mock2 = Kip10Mock { spk: create_mock_spk(2), amount: 100 }; // Different SPK + + let (tx, utxo_entries) = kip_10_tx_mock(vec![input_mock1, input_mock2], vec![]); + let mut tx = MutableTransaction::with_entries(tx, utxo_entries); + tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = + TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, tx.utxo(0).unwrap(), &reused_values, &sig_cache, true); + + // Should succeed because the SPKs are different + assert_eq!(vm.execute(), Ok(())); + } + + #[test] + fn test_input_spk_same() { + let sig_cache = Cache::new(10_000); + let reused_values = SigHashReusedValuesUnsync::new(); + + // Create script: 0 OP_INPUTSPK 1 OP_INPUTSPK OP_EQUAL + // Verifies that two inputs with same SPK are equal + let redeem_script = ScriptBuilder::new().add_ops(&[Op0, OpTxInputSpk, Op1, OpTxInputSpk, OpEqual]).unwrap().drain(); + + let spk = pay_to_script_hash_script(&redeem_script); + let input_mock1 = Kip10Mock { spk: spk.clone(), amount: 100 }; + let input_mock2 = Kip10Mock { spk, amount: 100 }; + + let (tx, utxo_entries) = kip_10_tx_mock(vec![input_mock1, input_mock2], vec![]); + let mut tx = MutableTransaction::with_entries(tx, utxo_entries); + tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = + TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, tx.utxo(0).unwrap(), &reused_values, &sig_cache, true); + + // Should succeed because both SPKs are identical + assert_eq!(vm.execute(), Ok(())); + } + + #[test] + fn test_output_spk() { + // Create unique SPK to check + let expected_spk = create_mock_spk(42); + let expected_spk_bytes = expected_spk.to_bytes(); + let sig_cache = Cache::new(10_000); + let reused_values = SigHashReusedValuesUnsync::new(); + // Create script: 0 OP_OUTPUTSPK EQUAL + let redeem_script = ScriptBuilder::new() + .add_op(Op0) + .unwrap() + .add_op(OpTxOutputSpk) + .unwrap() + .add_data(&expected_spk_bytes) + .unwrap() + .add_op(OpEqual) + .unwrap() + .drain(); + + let spk = pay_to_script_hash_script(&redeem_script); + + // Test success case + { + let input_mock = Kip10Mock { spk: spk.clone(), amount: 200 }; + let output_mock = Kip10Mock { spk: expected_spk.clone(), amount: 100 }; + + let (tx, utxo_entries) = kip_10_tx_mock(vec![input_mock], vec![output_mock]); + let mut tx = MutableTransaction::with_entries(tx, utxo_entries); + tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[0], + 0, + tx.utxo(0).unwrap(), + &reused_values, + &sig_cache, + true, + ); + + assert_eq!(vm.execute(), Ok(())); + } + + // Test failure case + { + let input_mock = Kip10Mock { spk: spk.clone(), amount: 200 }; + let output_mock = Kip10Mock { + spk: create_mock_spk(43), // Different SPK + amount: 100, + }; + + let (tx, utxo_entries) = kip_10_tx_mock(vec![input_mock], vec![output_mock]); + let mut tx = MutableTransaction::with_entries(tx, utxo_entries); + tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[0], + 0, + tx.utxo(0).unwrap(), + &reused_values, + &sig_cache, + true, + ); + + assert_eq!(vm.execute(), Err(TxScriptError::EvalFalse)); + } + } + + #[test] + fn test_input_index() { + // Create script: OP_INPUTINDEX 0 EQUAL + let redeem_script = + ScriptBuilder::new().add_op(OpTxInputIndex).unwrap().add_i64(0).unwrap().add_op(OpEqual).unwrap().drain(); + + let spk = pay_to_script_hash_script(&redeem_script); + let sig_cache = Cache::new(10_000); + let reused_values = SigHashReusedValuesUnsync::new(); + // Test first input (success case) + { + let input_mock = Kip10Mock { spk: spk.clone(), amount: 200 }; + let output_mock = Kip10Mock { spk: create_mock_spk(1), amount: 100 }; + + let (tx, utxo_entries) = kip_10_tx_mock(vec![input_mock], vec![output_mock]); + let mut tx = MutableTransaction::with_entries(tx, utxo_entries); + tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[0], + 0, + tx.utxo(0).unwrap(), + &reused_values, + &sig_cache, + true, + ); + + assert_eq!(vm.execute(), Ok(())); + } + + // Test second input (failure case) + { + let input_mock1 = Kip10Mock { spk: create_mock_spk(1), amount: 100 }; + let input_mock2 = Kip10Mock { spk: spk.clone(), amount: 200 }; + let output_mock = Kip10Mock { spk: create_mock_spk(2), amount: 100 }; + + let (tx, utxo_entries) = kip_10_tx_mock(vec![input_mock1, input_mock2], vec![output_mock]); + let mut tx = MutableTransaction::with_entries(tx, utxo_entries); + tx.tx.inputs[1].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[1], + 1, + tx.utxo(1).unwrap(), + &reused_values, + &sig_cache, + true, + ); + + // Should fail because script expects index 0 but we're at index 1 + assert_eq!(vm.execute(), Err(TxScriptError::EvalFalse)); + } + } + + #[test] + fn test_counts() { + let sig_cache = Cache::new(10_000); + let reused_values = SigHashReusedValuesUnsync::new(); + // Test OpInputCount: "OP_INPUTCOUNT 2 EQUAL" + let input_count_script = + ScriptBuilder::new().add_op(OpTxInputCount).unwrap().add_i64(2).unwrap().add_op(OpEqual).unwrap().drain(); + + // Test OpOutputCount: "OP_OUTPUTCOUNT 3 EQUAL" + let output_count_script = + ScriptBuilder::new().add_op(OpTxOutputCount).unwrap().add_i64(3).unwrap().add_op(OpEqual).unwrap().drain(); + + let input_spk = pay_to_script_hash_script(&input_count_script); + let output_spk = pay_to_script_hash_script(&output_count_script); + + // Create transaction with 2 inputs and 3 outputs + let input_mock1 = Kip10Mock { spk: input_spk.clone(), amount: 100 }; + let input_mock2 = Kip10Mock { spk: output_spk.clone(), amount: 200 }; + let output_mock1 = Kip10Mock { spk: create_mock_spk(1), amount: 50 }; + let output_mock2 = Kip10Mock { spk: create_mock_spk(2), amount: 100 }; + let output_mock3 = Kip10Mock { spk: create_mock_spk(3), amount: 150 }; + + let (tx, utxo_entries) = + kip_10_tx_mock(vec![input_mock1.clone(), input_mock2.clone()], vec![output_mock1, output_mock2, output_mock3]); + + // Test InputCount + { + let mut tx = MutableTransaction::with_entries(tx.clone(), utxo_entries.clone()); + tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&input_count_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[0], + 0, + tx.utxo(0).unwrap(), + &reused_values, + &sig_cache, + true, + ); + + assert_eq!(vm.execute(), Ok(())); + } + + // Test OutputCount + { + let mut tx = MutableTransaction::with_entries(tx.clone(), utxo_entries.clone()); + tx.tx.inputs[1].signature_script = ScriptBuilder::new().add_data(&output_count_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[1], + 1, + tx.utxo(1).unwrap(), + &reused_values, + &sig_cache, + true, + ); + + assert_eq!(vm.execute(), Ok(())); + } + + // Test failure cases with wrong counts + { + // Wrong input count script: "OP_INPUTCOUNT 3 EQUAL" + let wrong_input_count_script = + ScriptBuilder::new().add_op(OpTxInputCount).unwrap().add_i64(3).unwrap().add_op(OpEqual).unwrap().drain(); + + let mut tx = MutableTransaction::with_entries(tx.clone(), utxo_entries.clone()); + tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&wrong_input_count_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[0], + 0, + tx.utxo(0).unwrap(), + &reused_values, + &sig_cache, + true, + ); + + assert_eq!(vm.execute(), Err(TxScriptError::EvalFalse)); + } + + { + // Wrong output count script: "OP_OUTPUTCOUNT 2 EQUAL" + let wrong_output_count_script = + ScriptBuilder::new().add_op(OpTxOutputCount).unwrap().add_i64(2).unwrap().add_op(OpEqual).unwrap().drain(); + + let mut tx = MutableTransaction::with_entries(tx.clone(), utxo_entries.clone()); + tx.tx.inputs[1].signature_script = ScriptBuilder::new().add_data(&wrong_output_count_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[1], + 1, + tx.utxo(1).unwrap(), + &reused_values, + &sig_cache, + true, + ); + + assert_eq!(vm.execute(), Err(TxScriptError::EvalFalse)); + } + } + } } diff --git a/crypto/txscript/src/script_builder.rs b/crypto/txscript/src/script_builder.rs index 731c47680e..466b8b4089 100644 --- a/crypto/txscript/src/script_builder.rs +++ b/crypto/txscript/src/script_builder.rs @@ -6,6 +6,7 @@ use crate::{ MAX_SCRIPTS_SIZE, MAX_SCRIPT_ELEMENT_SIZE, }; use hexplay::{HexView, HexViewBuilder}; +use kaspa_txscript_errors::SerializationError; use thiserror::Error; /// DEFAULT_SCRIPT_ALLOC is the default size used for the backing array @@ -31,6 +32,9 @@ pub enum ScriptBuilderError { #[error("adding integer {0} would exceed the maximum allowed canonical script length of {MAX_SCRIPTS_SIZE}")] IntegerRejected(i64), + + #[error(transparent)] + Serialization(#[from] SerializationError), } pub type ScriptBuilderResult = std::result::Result; @@ -228,7 +232,14 @@ impl ScriptBuilder { return Ok(self); } - let bytes: Vec<_> = OpcodeData::serialize(&val); + let bytes: Vec<_> = OpcodeData::::serialize(&val)?; + self.add_data(&bytes) + } + + // Bitcoind tests utilizes this function + #[cfg(test)] + pub fn add_i64_min(&mut self) -> ScriptBuilderResult<&mut Self> { + let bytes: Vec<_> = OpcodeData::serialize(&crate::data_stack::SizedEncodeInt::<9>(i64::MIN)).expect("infallible"); self.add_data(&bytes) } @@ -355,6 +366,11 @@ mod tests { Test { name: "push -256", val: -256, expected: vec![OpData2, 0x00, 0x81] }, Test { name: "push -32767", val: -32767, expected: vec![OpData2, 0xff, 0xff] }, Test { name: "push -32768", val: -32768, expected: vec![OpData3, 0x00, 0x80, 0x80] }, + Test { + name: "push 9223372036854775807", + val: 9223372036854775807, + expected: vec![OpData8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F], + }, ]; for test in tests { @@ -362,6 +378,15 @@ mod tests { let result = builder.add_i64(test.val).expect("the script is canonical").script(); assert_eq!(result, test.expected, "{} wrong result", test.name); } + + // special case that used in bitcoind test + let mut builder = ScriptBuilder::new(); + let result = builder.add_i64_min().expect("the script is canonical").script(); + assert_eq!( + result, + vec![OpData9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x80], + "push -9223372036854775808 wrong result" + ); } /// Tests that pushing data to a script via the ScriptBuilder API works as expected and conforms to BIP0062. diff --git a/crypto/txscript/src/standard/multisig.rs b/crypto/txscript/src/standard/multisig.rs index cbd9dbe6da..6c51fd361f 100644 --- a/crypto/txscript/src/standard/multisig.rs +++ b/crypto/txscript/src/standard/multisig.rs @@ -184,7 +184,7 @@ mod tests { let (input, entry) = tx.populated_inputs().next().unwrap(); let cache = Cache::new(10_000); - let mut engine = TxScriptEngine::from_transaction_input(&tx, input, 0, entry, &reused_values, &cache).unwrap(); + let mut engine = TxScriptEngine::from_transaction_input(&tx, input, 0, entry, &reused_values, &cache, false); assert_eq!(engine.execute().is_ok(), is_ok); } #[test] diff --git a/crypto/txscript/test-data/script_tests-kip10.json b/crypto/txscript/test-data/script_tests-kip10.json new file mode 100644 index 0000000000..947c8810de --- /dev/null +++ b/crypto/txscript/test-data/script_tests-kip10.json @@ -0,0 +1,5397 @@ +[ + [ + "Format is: [[wit..., amount]?, scriptSig, scriptPubKey, flags, expected_scripterror, ... comments]" + ], + [ + "It is evaluated as if there was a crediting coinbase transaction with two 0" + ], + [ + "pushes as scriptSig, and one output of 0 satoshi and given scriptPubKey," + ], + [ + "followed by a spending transaction which spends this output as only input (and" + ], + [ + "correct prevout hash), using the given scriptSig. All nLockTimes are 0, all" + ], + [ + "nSequences are max." + ], + [ + "", + "DEPTH 0 EQUAL", + "", + "OK", + "Test the test: we should have an empty stack after scriptSig evaluation" + ], + [ + " ", + "DEPTH 0 EQUAL", + "", + "OK", + "and multiple spaces should not change that." + ], + [ + " ", + "DEPTH 0 EQUAL", + "", + "OK" + ], + [ + " ", + "DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "1 2", + "2 EQUALVERIFY 1 EQUAL", + "", + "OK", + "Similarly whitespace around and between symbols" + ], + [ + "1 2", + "2 EQUALVERIFY 1 EQUAL", + "", + "OK" + ], + [ + " 1 2", + "2 EQUALVERIFY 1 EQUAL", + "", + "OK" + ], + [ + "1 2 ", + "2 EQUALVERIFY 1 EQUAL", + "", + "OK" + ], + [ + " 1 2 ", + "2 EQUALVERIFY 1 EQUAL", + "", + "OK" + ], + [ + "1", + "", + "", + "OK" + ], + [ + "0x02 0x01 0x00", + "", + "", + "OK", + "all bytes are significant, not only the last one" + ], + [ + "0x09 0x00000000 0x00000000 0x10", + "", + "", + "OK", + "equals zero when cast to Int64" + ], + [ + "0x01 0x11", + "17 EQUAL", + "", + "OK", + "push 1 byte" + ], + [ + "0x02 0x417a", + "'Az' EQUAL", + "", + "OK" + ], + [ + "0x4b 0x417a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a", + "'Azzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' EQUAL", + "", + "OK", + "push 75 bytes" + ], + [ + "0x4c 0x4c 0x417a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a", + "'Azzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' EQUAL", + "", + "OK", + "0x4c is OP_PUSHDATA1 (push 76 bytes)" + ], + [ + "0x4d 0x0001 0x417a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a", + "'Azzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' EQUAL", + "", + "OK", + "0x4d is OP_PUSHDATA2" + ], + [ + "0x4f 1000", + "ADD 999 EQUAL", + "", + "OK" + ], + [ + "0", + "IF 0x50 ENDIF 1", + "", + "OK", + "0x50 is reserved (ok if not executed)" + ], + [ + "0x51", + "0x5f ADD 0x60 EQUAL", + "", + "OK", + "0x51 through 0x60 push 1 through 16 onto stack" + ], + [ + "1", + "NOP", + "", + "OK" + ], + [ + "0", + "IF VER ELSE 1 ENDIF", + "", + "OK", + "VER non-functional (ok if not executed)" + ], + [ + "0", + "IF RESERVED RESERVED1 RESERVED2 ELSE 1 ENDIF", + "", + "OK", + "RESERVED ok in un-executed IF" + ], + [ + "1", + "DUP IF ENDIF", + "", + "OK" + ], + [ + "1", + "IF 1 ENDIF", + "", + "OK" + ], + [ + "1", + "DUP IF ELSE ENDIF", + "", + "OK" + ], + [ + "1", + "IF 1 ELSE ENDIF", + "", + "OK" + ], + [ + "0", + "IF ELSE 1 ENDIF", + "", + "OK" + ], + [ + "1 1", + "IF IF 1 ELSE 0 ENDIF ENDIF", + "", + "OK" + ], + [ + "1 0", + "IF IF 1 ELSE 0 ENDIF ENDIF", + "", + "OK" + ], + [ + "1 1", + "IF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", + "", + "OK" + ], + [ + "0 0", + "IF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", + "", + "OK" + ], + [ + "1 0", + "NOTIF IF 1 ELSE 0 ENDIF ENDIF", + "", + "OK" + ], + [ + "1 1", + "NOTIF IF 1 ELSE 0 ENDIF ENDIF", + "", + "OK" + ], + [ + "1 0", + "NOTIF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", + "", + "OK" + ], + [ + "0 1", + "NOTIF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0 ELSE 1 ELSE 0 ENDIF", + "", + "OK", + "Multiple ELSE's are valid and executed inverts on each ELSE encountered" + ], + [ + "1", + "IF 1 ELSE 0 ELSE ENDIF", + "", + "OK" + ], + [ + "1", + "IF ELSE 0 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "1", + "IF 1 ELSE 0 ELSE 1 ENDIF ADD 2 EQUAL", + "", + "OK" + ], + [ + "'' 1", + "IF SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ENDIF 0x20 0x2c49a55fe0ca3e7a005420c19a527865df8f17e468d234f562ef238d4236a632 EQUAL", + "", + "OK" + ], + [ + "1", + "NOTIF 0 ELSE 1 ELSE 0 ENDIF", + "", + "OK", + "Multiple ELSE's are valid and execution inverts on each ELSE encountered" + ], + [ + "0", + "NOTIF 1 ELSE 0 ELSE ENDIF", + "", + "OK" + ], + [ + "0", + "NOTIF ELSE 0 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "NOTIF 1 ELSE 0 ELSE 1 ENDIF ADD 2 EQUAL", + "", + "OK" + ], + [ + "'' 0", + "NOTIF SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ENDIF 0x20 0x2c49a55fe0ca3e7a005420c19a527865df8f17e468d234f562ef238d4236a632 EQUAL", + "", + "OK" + ], + [ + "0", + "IF 1 IF RETURN ELSE RETURN ELSE RETURN ENDIF ELSE 1 IF 1 ELSE RETURN ELSE 1 ENDIF ELSE RETURN ENDIF ADD 2 EQUAL", + "", + "OK", + "Nested ELSE ELSE" + ], + [ + "1", + "NOTIF 0 NOTIF RETURN ELSE RETURN ELSE RETURN ENDIF ELSE 0 NOTIF 1 ELSE RETURN ELSE 1 ENDIF ELSE RETURN ENDIF ADD 2 EQUAL", + "", + "OK" + ], + [ + "0", + "IF RETURN ENDIF 1", + "", + "OK", + "RETURN only works if executed" + ], + [ + "1 1", + "VERIFY", + "", + "OK" + ], + [ + "1 0x05 0x01 0x00 0x00 0x00 0x00", + "VERIFY", + "", + "OK", + "values >4 bytes can be cast to boolean" + ], + [ + "0x01 0x80", + "VERIFY TRUE", + "", + "VERIFY", + "negative 0 is false" + ], + [ + "10 0 11", + "TOALTSTACK DROP FROMALTSTACK ADD 21 EQUAL", + "", + "OK" + ], + [ + "'gavin_was_here'", + "TOALTSTACK 11 FROMALTSTACK 'gavin_was_here' EQUALVERIFY 11 EQUAL", + "", + "OK" + ], + [ + "0", + "IFDUP DEPTH 1 EQUALVERIFY 0 EQUAL", + "", + "OK" + ], + [ + "1", + "IFDUP DEPTH 2 EQUALVERIFY 1 EQUALVERIFY 1 EQUAL", + "", + "OK" + ], + [ + "0x05 0x0100000000", + "IFDUP DEPTH 2 EQUALVERIFY 0x05 0x0100000000 EQUALVERIFY DROP TRUE", + "", + "OK", + "IFDUP dups non ints" + ], + [ + "0", + "DROP DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "0", + "DUP 1 ADD 1 EQUALVERIFY 0 EQUAL", + "", + "OK" + ], + [ + "0 1", + "NIP", + "", + "OK" + ], + [ + "1 0", + "OVER DEPTH 3 EQUALVERIFY DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "0 PICK 20 EQUALVERIFY DEPTH 3 EQUALVERIFY DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "1 PICK 21 EQUALVERIFY DEPTH 3 EQUALVERIFY DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "2 PICK 22 EQUALVERIFY DEPTH 3 EQUALVERIFY DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "0 ROLL 20 EQUALVERIFY DEPTH 2 EQUALVERIFY DROP DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "1 ROLL 21 EQUALVERIFY DEPTH 2 EQUALVERIFY DROP DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "2 ROLL 22 EQUALVERIFY DEPTH 2 EQUALVERIFY DROP DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "ROT 22 EQUALVERIFY DROP DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "ROT DROP 20 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "ROT DROP DROP 21 EQUAL", + "", + "OK" + ], + [ + "22 21 20", + "ROT ROT 21 EQUAL 2DROP", + "", + "OK" + ], + [ + "22 21 20", + "ROT ROT ROT 20 EQUALVERIFY DROP DROP TRUE", + "", + "OK" + ], + [ + "25 24 23 22 21 20", + "2ROT 24 EQUALVERIFY DROP DROP DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "25 24 23 22 21 20", + "2ROT DROP 25 EQUALVERIFY DROP DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "25 24 23 22 21 20", + "2ROT 2DROP 20 EQUALVERIFY DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "25 24 23 22 21 20", + "2ROT 2DROP DROP 21 EQUALVERIFY 2DROP TRUE", + "", + "OK" + ], + [ + "25 24 23 22 21 20", + "2ROT 2DROP 2DROP 22 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "25 24 23 22 21 20", + "2ROT 2DROP 2DROP DROP 23 EQUALVERIFY TRUE", + "", + "OK" + ], + [ + "25 24 23 22 21 20", + "2ROT 2ROT 22 EQUALVERIFY 2DROP DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "25 24 23 22 21 20", + "2ROT 2ROT 2ROT 20 EQUALVERIFY DROP DROP DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "1 0", + "SWAP 1 EQUALVERIFY 0 EQUAL", + "", + "OK" + ], + [ + "0 1", + "TUCK DEPTH 3 EQUALVERIFY SWAP 2DROP", + "", + "OK" + ], + [ + "13 14", + "2DUP ROT EQUALVERIFY EQUAL", + "", + "OK" + ], + [ + "-1 0 1 2", + "3DUP DEPTH 7 EQUALVERIFY ADD ADD 3 EQUALVERIFY 2DROP 0 EQUALVERIFY", + "", + "OK" + ], + [ + "1 2 3 5", + "2OVER ADD ADD 8 EQUALVERIFY ADD ADD 6 EQUAL", + "", + "OK" + ], + [ + "1 3 5 7", + "2SWAP ADD 4 EQUALVERIFY ADD 12 EQUAL", + "", + "OK" + ], + [ + "0", + "SIZE 0 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "1", + "SIZE 1 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "127", + "SIZE 1 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "128", + "SIZE 2 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "32767", + "SIZE 2 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "32768", + "SIZE 3 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "8388607", + "SIZE 3 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "8388608", + "SIZE 4 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "2147483647", + "SIZE 4 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "2147483648", + "SIZE 5 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "549755813887", + "SIZE 5 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "549755813888", + "SIZE 6 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "9223372036854775807", + "SIZE 8 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-1", + "SIZE 1 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-127", + "SIZE 1 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-128", + "SIZE 2 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-32767", + "SIZE 2 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-32768", + "SIZE 3 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-8388607", + "SIZE 3 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-8388608", + "SIZE 4 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-2147483647", + "SIZE 4 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-2147483648", + "SIZE 5 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-549755813887", + "SIZE 5 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-549755813888", + "SIZE 6 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-9223372036854775807", + "SIZE 8 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "'abcdefghijklmnopqrstuvwxyz'", + "SIZE 26 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "42", + "SIZE 1 EQUALVERIFY 42 EQUAL", + "", + "OK", + "SIZE does not consume argument" + ], + [ + "2 -2", + "ADD 0 EQUAL", + "", + "OK" + ], + [ + "2147483647 -2147483647", + "ADD 0 EQUAL", + "", + "OK" + ], + [ + "-1 -1", + "ADD -2 EQUAL", + "", + "OK" + ], + [ + "0 0", + "EQUAL", + "", + "OK" + ], + [ + "1 1", + "ADD 2 EQUAL", + "", + "OK" + ], + [ + "1", + "1ADD 2 EQUAL", + "", + "OK" + ], + [ + "111", + "1SUB 110 EQUAL", + "", + "OK" + ], + [ + "111 1", + "ADD 12 SUB 100 EQUAL", + "", + "OK" + ], + [ + "0", + "ABS 0 EQUAL", + "", + "OK" + ], + [ + "16", + "ABS 16 EQUAL", + "", + "OK" + ], + [ + "-16", + "ABS -16 NEGATE EQUAL", + "", + "OK" + ], + [ + "0", + "NOT NOP", + "", + "OK" + ], + [ + "1", + "NOT 0 EQUAL", + "", + "OK" + ], + [ + "11", + "NOT 0 EQUAL", + "", + "OK" + ], + [ + "0", + "0NOTEQUAL 0 EQUAL", + "", + "OK" + ], + [ + "1", + "0NOTEQUAL 1 EQUAL", + "", + "OK" + ], + [ + "111", + "0NOTEQUAL 1 EQUAL", + "", + "OK" + ], + [ + "-111", + "0NOTEQUAL 1 EQUAL", + "", + "OK" + ], + [ + "1 1", + "BOOLAND NOP", + "", + "OK" + ], + [ + "1 0", + "BOOLAND NOT", + "", + "OK" + ], + [ + "0 1", + "BOOLAND NOT", + "", + "OK" + ], + [ + "0 0", + "BOOLAND NOT", + "", + "OK" + ], + [ + "16 17", + "BOOLAND NOP", + "", + "OK" + ], + [ + "1 1", + "BOOLOR NOP", + "", + "OK" + ], + [ + "1 0", + "BOOLOR NOP", + "", + "OK" + ], + [ + "0 1", + "BOOLOR NOP", + "", + "OK" + ], + [ + "0 0", + "BOOLOR NOT", + "", + "OK" + ], + [ + "16 17", + "BOOLOR NOP", + "", + "OK" + ], + [ + "11 10 1", + "ADD NUMEQUAL", + "", + "OK" + ], + [ + "11 10 1", + "ADD NUMEQUALVERIFY 1", + "", + "OK" + ], + [ + "11 10 1", + "ADD NUMNOTEQUAL NOT", + "", + "OK" + ], + [ + "111 10 1", + "ADD NUMNOTEQUAL", + "", + "OK" + ], + [ + "11 10", + "LESSTHAN NOT", + "", + "OK" + ], + [ + "4 4", + "LESSTHAN NOT", + "", + "OK" + ], + [ + "10 11", + "LESSTHAN", + "", + "OK" + ], + [ + "-11 11", + "LESSTHAN", + "", + "OK" + ], + [ + "-11 -10", + "LESSTHAN", + "", + "OK" + ], + [ + "11 10", + "GREATERTHAN", + "", + "OK" + ], + [ + "4 4", + "GREATERTHAN NOT", + "", + "OK" + ], + [ + "10 11", + "GREATERTHAN NOT", + "", + "OK" + ], + [ + "-11 11", + "GREATERTHAN NOT", + "", + "OK" + ], + [ + "-11 -10", + "GREATERTHAN NOT", + "", + "OK" + ], + [ + "11 10", + "LESSTHANOREQUAL NOT", + "", + "OK" + ], + [ + "4 4", + "LESSTHANOREQUAL", + "", + "OK" + ], + [ + "10 11", + "LESSTHANOREQUAL", + "", + "OK" + ], + [ + "-11 11", + "LESSTHANOREQUAL", + "", + "OK" + ], + [ + "-11 -10", + "LESSTHANOREQUAL", + "", + "OK" + ], + [ + "11 10", + "GREATERTHANOREQUAL", + "", + "OK" + ], + [ + "4 4", + "GREATERTHANOREQUAL", + "", + "OK" + ], + [ + "10 11", + "GREATERTHANOREQUAL NOT", + "", + "OK" + ], + [ + "-11 11", + "GREATERTHANOREQUAL NOT", + "", + "OK" + ], + [ + "-11 -10", + "GREATERTHANOREQUAL NOT", + "", + "OK" + ], + [ + "1 0", + "MIN 0 NUMEQUAL", + "", + "OK" + ], + [ + "0 1", + "MIN 0 NUMEQUAL", + "", + "OK" + ], + [ + "-1 0", + "MIN -1 NUMEQUAL", + "", + "OK" + ], + [ + "0 -2147483647", + "MIN -2147483647 NUMEQUAL", + "", + "OK" + ], + [ + "2147483647 0", + "MAX 2147483647 NUMEQUAL", + "", + "OK" + ], + [ + "0 100", + "MAX 100 NUMEQUAL", + "", + "OK" + ], + [ + "-100 0", + "MAX 0 NUMEQUAL", + "", + "OK" + ], + [ + "0 -2147483647", + "MAX 0 NUMEQUAL", + "", + "OK" + ], + [ + "0 0 1", + "WITHIN", + "", + "OK" + ], + [ + "1 0 1", + "WITHIN NOT", + "", + "OK" + ], + [ + "0 -2147483647 2147483647", + "WITHIN", + "", + "OK" + ], + [ + "-1 -100 100", + "WITHIN", + "", + "OK" + ], + [ + "11 -100 100", + "WITHIN", + "", + "OK" + ], + [ + "-2147483647 -100 100", + "WITHIN NOT", + "", + "OK" + ], + [ + "2147483647 -100 100", + "WITHIN NOT", + "", + "OK" + ], + [ + "2147483647 2147483647", + "SUB 0 EQUAL", + "", + "OK" + ], + [ + "2147483647", + "DUP ADD 4294967294 EQUAL", + "", + "OK", + ">32 bit EQUAL is valid" + ], + [ + "2147483647", + "NEGATE DUP ADD -4294967294 EQUAL", + "", + "OK" + ], + [ + "''", + "SHA256 0x20 0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 EQUAL", + "", + "OK" + ], + [ + "'a'", + "SHA256 0x20 0xca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb EQUAL", + "", + "OK" + ], + [ + "'abcdefghijklmnopqrstuvwxyz'", + "SHA256 0x20 0x71c480df93d6ae2f1efad1447c66c9525e316218cf51fc8d9ed832f2daf18b73 EQUAL", + "", + "OK" + ], + [ + "''", + "SHA256 0x20 0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 EQUAL", + "", + "OK" + ], + [ + "'a'", + "SHA256 0x20 0xca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb EQUAL", + "", + "OK" + ], + [ + "'abcdefghijklmnopqrstuvwxyz'", + "SHA256 0x20 0x71c480df93d6ae2f1efad1447c66c9525e316218cf51fc8d9ed832f2daf18b73 EQUAL", + "", + "OK" + ], + [ + "''", + "SHA256 0x20 0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 EQUAL", + "", + "OK" + ], + [ + "'a'", + "SHA256 0x20 0xca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb EQUAL", + "", + "OK" + ], + [ + "'abcdefghijklmnopqrstuvwxyz'", + "SHA256 0x20 0x71c480df93d6ae2f1efad1447c66c9525e316218cf51fc8d9ed832f2daf18b73 EQUAL", + "", + "OK" + ], + [ + "''", + "NOP BLAKE2B 0x20 0x0e5751c026e543b2e8ab2eb06099daa1d1e5df47778f7787faab45cdf12fe3a8 EQUAL", + "", + "OK" + ], + [ + "'a'", + "BLAKE2B NOP 0x20 0x8928aae63c84d87ea098564d1e03ad813f107add474e56aedd286349c0c03ea4 EQUAL", + "", + "OK" + ], + [ + "'abcdefghijklmnopqrstuvwxyz'", + "NOP BLAKE2B 0x20 0x117ad6b940f5e8292c007d9c7e7350cd33cf85b5887e8da71c7957830f536e7c EQUAL", + "", + "OK", + "The NOP is added so the script won't be interpreted as P2SH" + ], + [ + "'a'", + "NOP BLAKE2B 0x20 0x8928aae63c84d87ea098564d1e03ad813f107add474e56aedd286349c0c03ea4 EQUAL", + "", + "OK" + ], + [ + "0", + "IF 0xb2 ELSE 1 ENDIF", + "", + "OK", + "opcodes above OP_CHECKSEQUENCEVERIFY invalid if executed" + ], + [ + "0", + "IF 0xbd ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xbe ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xbf ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc0 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc1 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc2 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc3 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc4 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc5 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc6 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc7 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc8 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc9 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xca ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xcb ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xcc ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xcd ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xce ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xcf ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd0 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd1 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd2 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd3 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd4 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd5 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd6 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd7 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd8 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd9 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xda ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xdb ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xdc ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xdd ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xde ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xdf ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe0 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe1 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe2 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe3 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe4 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe5 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe6 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe7 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe8 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe9 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xea ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xeb ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xec ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xed ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xee ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xef ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf0 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf1 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf2 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf3 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf4 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf5 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf6 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf7 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf8 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf9 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xfa ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xfb ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xfc ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xfd ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xfe ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xff ELSE 1 ENDIF", + "", + "OK" + ], + [ + "", + "'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'", + "", + "OK", + "520 byte push" + ], + [ + "1", + "0x616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161", + "", + "OK", + "201 opcodes executed. 0x61 is NOP" + ], + [ + "1 2 3 4 5", + "0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f 1 2 3 4 5 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f 0x6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d75", + "", + "OK", + "244 stack size (0x6f is 3DUP, 0x6d is 2DROP, and 0x75 is DROP)" + ], + [ + "'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'", + "'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f 2DUP DROP 0x6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d 0x61616161", + "", + "OK", + "Max-size (10,000-byte), max-push(520 bytes), max-opcodes(201), max stack size(244 items). 0x6f is 3DUP, 0x61 is NOP, 0x6d is 2DROP" + ], + [ + "0", + "IF 0x5050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050 ENDIF 1", + "", + "OK", + ">201 opcodes, but RESERVED (0x50) doesn't count towards opcode limit." + ], + [ + "", + "1", + "", + "OK" + ], + [ + "127", + "0x01 0x7F EQUAL", + "", + "OK" + ], + [ + "128", + "0x02 0x8000 EQUAL", + "", + "OK", + "Leave room for the sign bit" + ], + [ + "32767", + "0x02 0xFF7F EQUAL", + "", + "OK" + ], + [ + "32768", + "0x03 0x008000 EQUAL", + "", + "OK" + ], + [ + "8388607", + "0x03 0xFFFF7F EQUAL", + "", + "OK" + ], + [ + "8388608", + "0x04 0x00008000 EQUAL", + "", + "OK" + ], + [ + "2147483647", + "0x04 0xFFFFFF7F EQUAL", + "", + "OK" + ], + [ + "2147483648", + "0x05 0x0000008000 EQUAL", + "", + "OK" + ], + [ + "549755813887", + "0x05 0xFFFFFFFF7F EQUAL", + "", + "OK" + ], + [ + "549755813888", + "0x06 0xFFFFFFFF7F EQUALVERIFY 2DROP TRUE", + "", + "OK" + ], + [ + "9223372036854775807", + "0x08 0xFFFFFFFFFFFFFF7F EQUAL", + "", + "OK" + ], + [ + "-2", + "0x01 0x82 EQUAL", + "", + "OK", + "Numbers are little-endian with the MSB being a sign bit" + ], + [ + "-127", + "0x01 0xFF EQUAL", + "", + "OK" + ], + [ + "-128", + "0x02 0x8080 EQUAL", + "", + "OK" + ], + [ + "-32767", + "0x02 0xFFFF EQUAL", + "", + "OK" + ], + [ + "-32768", + "0x03 0x008080 EQUAL", + "", + "OK" + ], + [ + "-8388607", + "0x03 0xFFFFFF EQUAL", + "", + "OK" + ], + [ + "-8388608", + "0x04 0x00008080 EQUAL", + "", + "OK" + ], + [ + "-2147483647", + "0x04 0xFFFFFFFF EQUAL", + "", + "OK" + ], + [ + "-2147483648", + "0x05 0x0000008080 EQUAL", + "", + "OK" + ], + [ + "-4294967295", + "0x05 0xFFFFFFFF80 EQUAL", + "", + "OK" + ], + [ + "-549755813887", + "0x05 0xFFFFFFFFFF EQUAL", + "", + "OK" + ], + [ + "-549755813888", + "0x06 0x000000008080 EQUAL", + "", + "OK" + ], + [ + "-9223372036854775807", + "0x08 0xFFFFFFFFFFFFFFFF EQUAL", + "", + "OK" + ], + [ + "2147483647", + "1ADD 2147483648 EQUAL", + "", + "OK", + "We can do math on 4-byte integers, and compare 5-byte ones" + ], + [ + "2147483647", + "1ADD DROP 1", + "", + "OK" + ], + [ + "-2147483647", + "1ADD DROP 1", + "", + "OK" + ], + [ + "1", + "0x02 0x0100 EQUAL NOT", + "", + "OK", + "Not the same byte array..." + ], + [ + "0", + "0x01 0x80 EQUAL NOT", + "", + "OK" + ], + [ + "", + "NOP 1", + "", + "OK", + "The following tests check the if(stack.size() < N) tests in each opcode" + ], + [ + "1", + "IF 1 ENDIF", + "", + "OK", + "They are here to catch copy-and-paste errors" + ], + [ + "0", + "NOTIF 1 ENDIF", + "", + "OK", + "Most of them are duplicated elsewhere," + ], + [ + "1", + "VERIFY 1", + "", + "OK", + "but, hey, more is always better, right?" + ], + [ + "0", + "TOALTSTACK 1", + "", + "OK" + ], + [ + "1", + "TOALTSTACK FROMALTSTACK", + "", + "OK" + ], + [ + "0 0", + "2DROP 1", + "", + "OK" + ], + [ + "0 1", + "2DUP VERIFY DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "0 0 1", + "3DUP VERIFY DROP DROP DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "0 1 0 0", + "2OVER VERIFY DROP DROP DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "0 1 0 0 0 0", + "2ROT VERIFY DROP DROP DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "0 1 0 0", + "2SWAP VERIFY DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "1", + "IFDUP VERIFY", + "", + "OK" + ], + [ + "", + "DEPTH 1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0", + "DROP 1", + "", + "OK" + ], + [ + "1", + "DUP VERIFY", + "", + "OK" + ], + [ + "0 1", + "NIP", + "", + "OK" + ], + [ + "1 0", + "OVER VERIFY DROP DROP TRUE", + "", + "OK" + ], + [ + "1 0 0 0 3", + "PICK VERIFY DROP DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "1 0", + "PICK VERIFY DROP TRUE", + "", + "OK" + ], + [ + "1 0 0 0 3", + "ROLL VERIFY DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "1 0", + "ROLL", + "", + "OK" + ], + [ + "1 0 0", + "ROT VERIFY DROP DROP TRUE", + "", + "OK" + ], + [ + "1 0", + "SWAP VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0 1", + "TUCK VERIFY DROP DROP TRUE", + "", + "OK" + ], + [ + "1", + "SIZE VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0 0", + "EQUAL", + "", + "OK" + ], + [ + "0 0", + "EQUALVERIFY 1", + "", + "OK" + ], + [ + "0 0 1", + "EQUAL EQUAL", + "", + "OK", + "OP_0 and bools must have identical byte representations" + ], + [ + "0", + "1ADD", + "", + "OK" + ], + [ + "2", + "1SUB", + "", + "OK" + ], + [ + "-1", + "NEGATE", + "", + "OK" + ], + [ + "-1", + "ABS", + "", + "OK" + ], + [ + "0", + "NOT", + "", + "OK" + ], + [ + "-1", + "0NOTEQUAL", + "", + "OK" + ], + [ + "1 0", + "ADD", + "", + "OK" + ], + [ + "1 0", + "SUB", + "", + "OK" + ], + [ + "-1 -1", + "BOOLAND", + "", + "OK" + ], + [ + "-1 0", + "BOOLOR", + "", + "OK" + ], + [ + "0 0", + "NUMEQUAL", + "", + "OK" + ], + [ + "5 4", + "NUMEQUAL FALSE EQUAL", + "", + "OK" + ], + [ + "0 0", + "NUMEQUALVERIFY 1", + "", + "OK" + ], + [ + "-1 0", + "NUMNOTEQUAL", + "", + "OK" + ], + [ + "-1 0", + "LESSTHAN", + "", + "OK" + ], + [ + "1 0", + "GREATERTHAN", + "", + "OK" + ], + [ + "0 0", + "LESSTHANOREQUAL", + "", + "OK" + ], + [ + "0 0", + "GREATERTHANOREQUAL", + "", + "OK" + ], + [ + "-1 0", + "MIN", + "", + "OK" + ], + [ + "1 0", + "MAX", + "", + "OK" + ], + [ + "-1 -1 0", + "WITHIN", + "", + "OK" + ], + [ + "0", + "SHA256", + "", + "OK" + ], + [ + "0", + "BLAKE2B", + "", + "OK" + ], + [ + "", + "0 0 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK", + "CHECKMULTISIG is allowed to have zero keys and/or sigs" + ], + [ + "", + "0 0 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 0 1 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK", + "Zero sigs means no sigs are checked" + ], + [ + "", + "0 0 1 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 0 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK", + "CHECKMULTISIG is allowed to have zero keys and/or sigs" + ], + [ + "", + "0 0 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 0 1 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK", + "Zero sigs means no sigs are checked" + ], + [ + "", + "0 0 1 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 2 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK", + "Test from up to 20 pubkeys, all not checked" + ], + [ + "", + "0 'a' 'b' 'c' 3 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 4 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 5 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 6 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 7 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 8 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 9 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 10 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 11 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 12 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 13 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 14 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 15 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 16 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 17 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 18 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 19 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 1 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 2 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 3 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 4 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 5 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 6 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 7 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 8 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 9 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 10 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 11 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 12 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 13 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 14 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 15 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 16 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 17 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 18 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 19 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "1", + "0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY", + "", + "OK", + "nOpCount is incremented by the number of keys evaluated in addition to the usual one op per op. In this case we have zero keys, so we can execute 201 CHECKMULTISIGS" + ], + [ + "", + "NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY DROP DROP DROP DROP DROP DROP DROP TRUE", + "", + "OK", + "Even though there are no signatures being checked nOpCount is incremented by the number of keys." + ], + [ + "1", + "NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY", + "", + "OK" + ], + [ + "0x01 1", + "BLAKE2B 0x20 0xce57216285125006ec18197bd8184221cefa559bb0798410d99a5bba5b07cd1d EQUAL", + "", + "OK", + "Very basic P2SH" + ], + [ + "0x00", + "SIZE 0 EQUALVERIFY DROP TRUE", + "", + "OK", + "Basic OP_0 execution" + ], + [ + "Numeric pushes" + ], + [ + "-1", + "0x4f EQUAL", + "", + "OK", + "OP1_NEGATE pushes 0x81" + ], + [ + "1", + "0x51 EQUAL", + "", + "OK", + "OP_1 pushes 0x01" + ], + [ + "2", + "0x52 EQUAL", + "", + "OK", + "OP_2 pushes 0x02" + ], + [ + "3", + "0x53 EQUAL", + "", + "OK", + "OP_3 pushes 0x03" + ], + [ + "4", + "0x54 EQUAL", + "", + "OK", + "OP_4 pushes 0x04" + ], + [ + "5", + "0x55 EQUAL", + "", + "OK", + "OP_5 pushes 0x05" + ], + [ + "6", + "0x56 EQUAL", + "", + "OK", + "OP_6 pushes 0x06" + ], + [ + "7", + "0x57 EQUAL", + "", + "OK", + "OP_7 pushes 0x07" + ], + [ + "8", + "0x58 EQUAL", + "", + "OK", + "OP_8 pushes 0x08" + ], + [ + "9", + "0x59 EQUAL", + "", + "OK", + "OP_9 pushes 0x09" + ], + [ + "10", + "0x5a EQUAL", + "", + "OK", + "OP_10 pushes 0x0a" + ], + [ + "11", + "0x5b EQUAL", + "", + "OK", + "OP_11 pushes 0x0b" + ], + [ + "12", + "0x5c EQUAL", + "", + "OK", + "OP_12 pushes 0x0c" + ], + [ + "13", + "0x5d EQUAL", + "", + "OK", + "OP_13 pushes 0x0d" + ], + [ + "14", + "0x5e EQUAL", + "", + "OK", + "OP_14 pushes 0x0e" + ], + [ + "15", + "0x5f EQUAL", + "", + "OK", + "OP_15 pushes 0x0f" + ], + [ + "16", + "0x60 EQUAL", + "", + "OK", + "OP_16 pushes 0x10" + ], + [ + "Unevaluated non-minimal pushes are ignored" + ], + [ + "0", + "IF 0x4c 0x00 ENDIF 1 ", + "", + "OK", + "non-minimal PUSHDATA1 ignored" + ], + [ + "0", + "IF 0x4d 0x0000 ENDIF 1 ", + "", + "OK", + "non-minimal PUSHDATA2 ignored" + ], + [ + "0", + "IF 0x4c 0x00000000 ENDIF 1 ", + "", + "OK", + "non-minimal PUSHDATA4 ignored" + ], + [ + "0", + "IF 0x01 0x81 ENDIF 1 ", + "", + "OK", + "1NEGATE equiv" + ], + [ + "0", + "IF 0x01 0x01 ENDIF 1 ", + "", + "OK", + "OP_1 equiv" + ], + [ + "0", + "IF 0x01 0x02 ENDIF 1 ", + "", + "OK", + "OP_2 equiv" + ], + [ + "0", + "IF 0x01 0x03 ENDIF 1 ", + "", + "OK", + "OP_3 equiv" + ], + [ + "0", + "IF 0x01 0x04 ENDIF 1 ", + "", + "OK", + "OP_4 equiv" + ], + [ + "0", + "IF 0x01 0x05 ENDIF 1 ", + "", + "OK", + "OP_5 equiv" + ], + [ + "0", + "IF 0x01 0x06 ENDIF 1 ", + "", + "OK", + "OP_6 equiv" + ], + [ + "0", + "IF 0x01 0x07 ENDIF 1 ", + "", + "OK", + "OP_7 equiv" + ], + [ + "0", + "IF 0x01 0x08 ENDIF 1 ", + "", + "OK", + "OP_8 equiv" + ], + [ + "0", + "IF 0x01 0x09 ENDIF 1 ", + "", + "OK", + "OP_9 equiv" + ], + [ + "0", + "IF 0x01 0x0a ENDIF 1 ", + "", + "OK", + "OP_10 equiv" + ], + [ + "0", + "IF 0x01 0x0b ENDIF 1 ", + "", + "OK", + "OP_11 equiv" + ], + [ + "0", + "IF 0x01 0x0c ENDIF 1 ", + "", + "OK", + "OP_12 equiv" + ], + [ + "0", + "IF 0x01 0x0d ENDIF 1 ", + "", + "OK", + "OP_13 equiv" + ], + [ + "0", + "IF 0x01 0x0e ENDIF 1 ", + "", + "OK", + "OP_14 equiv" + ], + [ + "0", + "IF 0x01 0x0f ENDIF 1 ", + "", + "OK", + "OP_15 equiv" + ], + [ + "0", + "IF 0x01 0x10 ENDIF 1 ", + "", + "OK", + "OP_16 equiv" + ], + [ + "Numeric minimaldata rules are only applied when a stack item is numerically evaluated; the push itself is allowed" + ], + [ + "0x01 0x00", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x01 0x80", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0180", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0100", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0200", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0300", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0400", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0500", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0600", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0700", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0800", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0900", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0a00", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0b00", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0c00", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0d00", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0e00", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0f00", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x1000", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "While not really correctly DER encoded, the empty signature is allowed" + ], + [ + "to provide a compact way to provide a delibrately invalid signature." + ], + [ + "0", + "0x21 0x02865c40293a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac0 CHECKSIG NOT", + "", + "OK" + ], + [ + "0", + "1 0x21 0x02865c40293a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac0 1 CHECKMULTISIG NOT", + "", + "OK" + ], + [ + "TRUE DATA_8 0x0000000000000080", + "CHECKSEQUENCEVERIFY", + "", + "OK", + "CSV passes if stack top bit 1 << 63 is set" + ], + [ + "", + "DEPTH", + "", + "EVAL_FALSE", + "Test the test: we should have an empty stack after scriptSig evaluation" + ], + [ + " ", + "DEPTH", + "", + "EVAL_FALSE", + "and multiple spaces should not change that." + ], + [ + " ", + "DEPTH", + "", + "EVAL_FALSE" + ], + [ + " ", + "DEPTH", + "", + "EVAL_FALSE" + ], + [ + "", + "", + "", + "EVAL_FALSE" + ], + [ + "", + "NOP", + "", + "EVAL_FALSE" + ], + [ + "", + "NOP DEPTH", + "", + "EVAL_FALSE" + ], + [ + "", + "DEPTH", + "", + "EVAL_FALSE" + ], + [ + "", + "NOP", + "", + "EVAL_FALSE" + ], + [ + "", + "NOP DEPTH", + "", + "EVAL_FALSE" + ], + [ + "0x4c01", + "0x01 NOP", + "", + "BAD_OPCODE", + "PUSHDATA1 with not enough bytes" + ], + [ + "0x4d0200ff", + "0x01 NOP", + "", + "BAD_OPCODE", + "PUSHDATA2 with not enough bytes" + ], + [ + "0x4e03000000ffff", + "0x01 NOP", + "", + "BAD_OPCODE", + "PUSHDATA4 with not enough bytes" + ], + [ + "1", + "IF 0x50 ENDIF 1", + "", + "BAD_OPCODE", + "0x50 is reserved" + ], + [ + "0x52", + "0x5f ADD 0x60 EQUAL", + "", + "EVAL_FALSE", + "0x51 through 0x60 push 1 through 16 onto stack" + ], + [ + "0", + "NOP", + "", + "EVAL_FALSE", + "" + ], + [ + "1", + "IF VER ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "VER non-functional" + ], + [ + "0", + "IF VERIF ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "VERIF illegal everywhere" + ], + [ + "0", + "IF ELSE 1 ELSE VERIF ENDIF", + "", + "BAD_OPCODE", + "VERIF illegal everywhere" + ], + [ + "0", + "IF VERNOTIF ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "VERNOTIF illegal everywhere" + ], + [ + "0", + "IF ELSE 1 ELSE VERNOTIF ENDIF", + "", + "BAD_OPCODE", + "VERNOTIF illegal everywhere" + ], + [ + "0", + "DUP IF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0", + "IF 1 ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0", + "DUP IF ELSE ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0", + "IF 1 ELSE ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0", + "NOTIF ELSE 1 ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0 1", + "IF IF 1 ELSE 0 ENDIF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0 0", + "IF IF 1 ELSE 0 ENDIF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "1 0", + "IF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0 1", + "IF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0 0", + "NOTIF IF 1 ELSE 0 ENDIF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0 1", + "NOTIF IF 1 ELSE 0 ENDIF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "1 1", + "NOTIF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0 0", + "NOTIF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "1", + "IF RETURN ELSE ELSE 1 ENDIF", + "", + "OP_RETURN", + "Multiple ELSEs" + ], + [ + "1", + "IF 1 ELSE ELSE RETURN ENDIF", + "", + "OP_RETURN" + ], + [ + "1", + "ENDIF", + "", + "UNBALANCED_CONDITIONAL", + "Malformed IF/ELSE/ENDIF sequence" + ], + [ + "1", + "ELSE ENDIF", + "", + "UNBALANCED_CONDITIONAL" + ], + [ + "1", + "ENDIF ELSE", + "", + "UNBALANCED_CONDITIONAL" + ], + [ + "1", + "ENDIF ELSE IF", + "", + "UNBALANCED_CONDITIONAL" + ], + [ + "1", + "IF ELSE ENDIF ELSE", + "", + "UNBALANCED_CONDITIONAL" + ], + [ + "1", + "IF ELSE ENDIF ELSE ENDIF", + "", + "UNBALANCED_CONDITIONAL" + ], + [ + "1", + "IF ENDIF ENDIF", + "", + "UNBALANCED_CONDITIONAL" + ], + [ + "1", + "IF ELSE ELSE ENDIF ENDIF", + "", + "UNBALANCED_CONDITIONAL" + ], + [ + "1", + "RETURN", + "", + "OP_RETURN" + ], + [ + "1", + "DUP IF RETURN ENDIF", + "", + "OP_RETURN" + ], + [ + "1", + "RETURN 'data'", + "", + "OP_RETURN", + "canonical prunable txout format" + ], + [ + "0", + "VERIFY 1", + "", + "VERIFY" + ], + [ + "1", + "VERIFY", + "", + "EVAL_FALSE" + ], + [ + "1", + "VERIFY 0", + "", + "EVAL_FALSE" + ], + [ + "", + "IFDUP DEPTH 0 EQUAL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "DROP DEPTH 0 EQUAL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "DUP DEPTH 0 EQUAL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "DUP 1 ADD 2 EQUALVERIFY 0 EQUAL", + "", + "EVAL_FALSE" + ], + [ + "", + "NIP", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "1 NIP", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "1 0 NIP", + "", + "EVAL_FALSE" + ], + [ + "", + "OVER 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "OVER", + "", + "INVALID_STACK_OPERATION" + ], + [ + "19 20 21", + "PICK 19 EQUALVERIFY DEPTH 2 EQUAL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "0 PICK", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "-1 PICK", + "", + "INVALID_STACK_OPERATION" + ], + [ + "19 20 21", + "0 PICK 20 EQUALVERIFY DEPTH 3 EQUAL", + "", + "EQUALVERIFY" + ], + [ + "19 20 21", + "1 PICK 21 EQUALVERIFY DEPTH 3 EQUAL", + "", + "EQUALVERIFY" + ], + [ + "19 20 21", + "2 PICK 22 EQUALVERIFY DEPTH 3 EQUAL", + "", + "EQUALVERIFY" + ], + [ + "", + "0 ROLL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "-1 ROLL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "19 20 21", + "0 ROLL 20 EQUALVERIFY DEPTH 2 EQUAL", + "", + "EQUALVERIFY" + ], + [ + "19 20 21", + "1 ROLL 21 EQUALVERIFY DEPTH 2 EQUAL", + "", + "EQUALVERIFY" + ], + [ + "19 20 21", + "2 ROLL 22 EQUALVERIFY DEPTH 2 EQUAL", + "", + "EQUALVERIFY" + ], + [ + "", + "ROT 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "1 ROT 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "1 2 ROT 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "SWAP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "SWAP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "0 1", + "SWAP 1 EQUALVERIFY", + "", + "EQUALVERIFY" + ], + [ + "", + "TUCK 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "TUCK 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 0", + "TUCK DEPTH 3 EQUALVERIFY SWAP 2DROP", + "", + "EVAL_FALSE" + ], + [ + "", + "2DUP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "2DUP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "3DUP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "3DUP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 2", + "3DUP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "2OVER 1 VERIFY DROP DROP DROP DROP TRUE", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "2 3 2OVER 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "2SWAP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "2 3 2SWAP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "'a' 'b'", + "CAT", + "", + "DISABLED_OPCODE", + "CAT disabled" + ], + [ + "'a' 'b' 0", + "IF CAT ELSE 1 ENDIF", + "", + "DISABLED_OPCODE", + "CAT disabled" + ], + [ + "'abc' 1 1", + "SUBSTR", + "", + "DISABLED_OPCODE", + "SUBSTR disabled" + ], + [ + "'abc' 1 1 0", + "IF SUBSTR ELSE 1 ENDIF", + "", + "DISABLED_OPCODE", + "SUBSTR disabled" + ], + [ + "'abc' 2 0", + "IF LEFT ELSE 1 ENDIF", + "", + "DISABLED_OPCODE", + "LEFT disabled" + ], + [ + "'abc' 2 0", + "IF RIGHT ELSE 1 ENDIF", + "", + "DISABLED_OPCODE", + "RIGHT disabled" + ], + [ + "", + "SIZE 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "NOP", + "", + "EMPTY_STACK", + "Checks EMPTY_STACK error" + ], + [ + "'abc'", + "INVERT VERIFY TRUE", + "", + "DISABLED_OPCODE", + "INVERT disabled" + ], + [ + "1 2 0", + "IF AND ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "AND disabled" + ], + [ + "1 2 0", + "IF OR ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "OR disabled" + ], + [ + "1 2 0", + "IF XOR ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "XOR disabled" + ], + [ + "2 0", + "IF 2MUL ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "2MUL disabled" + ], + [ + "2 0", + "IF 2DIV ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "2DIV disabled" + ], + [ + "2 2 0", + "IF MUL ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "MUL disabled" + ], + [ + "2 2 0", + "IF DIV ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "DIV disabled" + ], + [ + "2 2 0", + "IF MOD ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "MOD disabled" + ], + [ + "2 2 0", + "IF LSHIFT ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "LSHIFT disabled" + ], + [ + "2 2 0", + "IF RSHIFT ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "RSHIFT disabled" + ], + [ + "", + "EQUAL NOT", + "", + "INVALID_STACK_OPERATION", + "EQUAL must error when there are no stack items" + ], + [ + "0", + "EQUAL NOT", + "", + "INVALID_STACK_OPERATION", + "EQUAL must error when there are not 2 stack items" + ], + [ + "0 1", + "EQUAL", + "", + "EVAL_FALSE" + ], + [ + "1 1", + "ADD 0 EQUAL", + "", + "EVAL_FALSE" + ], + [ + "11 1", + "ADD 12 SUB 11 EQUAL", + "", + "EVAL_FALSE" + ], + [ + "2147483648 0", + "ADD NOP", + "", + "OK", + "numbers up to 8 bytes are supported since kip10" + ], + [ + "-2147483648 0", + "ADD NOP", + "", + "OK", + "numbers up to 8 bytes are supported since kip10" + ], + [ + "-9223372036854775808 0", + "ADD NOP", + "", + "UNKNOWN_ERROR", + "" + ], + [ + "2147483647", + "DUP ADD 4294967294 NUMEQUAL", + "", + "OK", + "NUMEQUAL is in numeric range since kip10" + ], + [ + "'abcdef'", + "NOT 0 EQUAL", + "", + "OK", + "numbers up to 8 bytes are supported since kip10" + ], + [ + "'abcdefghi'", + "NOT 0 EQUAL", + "", + "UNKNOWN_ERROR", + "NOT is an arithmetic operand" + ], + [ + "2", + "DUP MUL 4 EQUAL", + "", + "DISABLED_OPCODE", + "disabled" + ], + [ + "2", + "DUP DIV 1 EQUAL", + "", + "DISABLED_OPCODE", + "disabled" + ], + [ + "2", + "2MUL 4 EQUAL", + "", + "DISABLED_OPCODE", + "disabled" + ], + [ + "2", + "2DIV 1 EQUAL", + "", + "DISABLED_OPCODE", + "disabled" + ], + [ + "7 3", + "MOD 1 EQUAL", + "", + "DISABLED_OPCODE", + "disabled" + ], + [ + "2 2", + "LSHIFT 8 EQUAL", + "", + "DISABLED_OPCODE", + "disabled" + ], + [ + "2 1", + "RSHIFT 1 EQUAL", + "", + "DISABLED_OPCODE", + "disabled" + ], + [ + "0x50", + "1", + "", + "BAD_OPCODE", + "opcode 0x50 is reserved" + ], + [ + "1", + "IF 0xb2 ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "OpTxVersion is reserved" + ], + [ + "1", + "IF 0xb3 ELSE 1 ENDIF", + "", + "OK", + "OpTxInputCount is enabled since kip10" + ], + [ + "1", + "IF 0xb4 ELSE 1 ENDIF", + "", + "OK", + "OpTxOutputCount is enabled since kip10" + ], + [ + "1", + "IF 0xb5 ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "OpTxLockTime is reserved" + ], + [ + "1", + "IF 0xb6 ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "OpTxSubnetId is reserved" + ], + [ + "1", + "IF 0xb7 ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "OpTxGas is reserved" + ], + [ + "1", + "IF 0xb8 ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "OpTxPayload is reserved" + ], + [ + "1", + "IF 0xb9 0 NUMEQUAL ELSE 1 ENDIF", + "", + "OK", + "OpTxInputIndex is enabled since kip10" + ], + [ + "1", + "IF 0xba ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "OpOutpointTxId is reserved" + ], + [ + "1", + "IF 0xbb ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "OpOutpointOutputIdx is reserved" + ], + [ + "1", + "IF 0xbc ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "OpTxInputScriptSig is reserved" + ], + [ + "1", + "IF 0xbd ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "OpTxInputSeq is reserved" + ], + [ + "0 1", + "IF 0xbe 0 NUMEQUAL ELSE 1 ENDIF", + "", + "OK", + "OpTxInputAmount is enabled since kip10" + ], + [ + "0 1", + "IF 0xbf ELSE 1 ENDIF", + "", + "OK", + "OpTxInputSpk is enabled since kip10" + ], + [ + "1", + "IF 0xc0 ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "OpTxInputBlockDaaScore is reserved" + ], + [ + "1", + "IF 0xc1 ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "OpTxInputIsCoinbase is reserved" + ], + [ + "0 1", + "IF 0xc2 0 NUMEQUAL ELSE 1 ENDIF", + "", + "OK", + "OpTxOutputAmount is enabled since kip10" + ], + [ + "0 1", + "IF 0xc3 0x02 0x0000 EQUAL ELSE 1 ENDIF", + "", + "OK", + "OpTxOutputSpk is enabled since kip10" + ], + [ + "1", + "IF 0xc4 ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "opcodes above OpTxOutputSpk invalid if executed" + ], + [ + "1", + "IF 0xc5 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xc6 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xc7 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xc8 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xc9 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xca ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xcb ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xcc ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xcd ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xce ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xcf ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd0 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd1 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd2 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd3 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd4 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd5 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd6 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd7 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd8 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd9 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xda ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xdb ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xdc ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xdd ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xde ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xdf ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe0 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe1 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe2 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe3 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe4 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe5 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe6 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe7 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe8 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe9 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xea ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xeb ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xec ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xed ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xee ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xef ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf0 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf1 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf2 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf3 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf4 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf5 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf6 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf7 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf8 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf9 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xfa ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xfb ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xfc ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xfd ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xfe ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xff ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "", + "SHA256", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "SHA256", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "SHA256", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "BLAKE2B", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "BLAKE2B", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'", + "", + "PUSH_SIZE", + ">520 byte push" + ], + [ + "0", + "IF 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' ENDIF 1", + "", + "PUSH_SIZE", + ">520 byte push in non-executed IF branch" + ], + [ + "1", + "0x61616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161", + "", + "OP_COUNT", + ">201 opcodes executed. 0x61 is NOP" + ], + [ + "0", + "IF 0x6161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161 ENDIF 1", + "", + "OP_COUNT", + ">201 opcodes including non-executed IF branch. 0x61 is NOP" + ], + [ + "", + "1 2 3 4 5 6 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f", + "", + "STACK_SIZE", + ">244 stack size (0x6f is 3DUP)" + ], + [ + "", + "1 TOALTSTACK 2 TOALTSTACK 3 4 5 6 7 8 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f", + "", + "STACK_SIZE", + ">244 stack+altstack size" + ], + [ + "", + "0 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f 2DUP 0x616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161", + "", + "SCRIPT_SIZE", + "10,001-byte scriptPubKey" + ], + [ + "1", + "VER", + "", + "BAD_OPCODE", + "OP_VER is reserved" + ], + [ + "1", + "VERIF", + "", + "BAD_OPCODE", + "OP_VERIF is reserved" + ], + [ + "1", + "VERNOTIF", + "", + "BAD_OPCODE", + "OP_VERNOTIF is reserved" + ], + [ + "1", + "RESERVED", + "", + "BAD_OPCODE", + "OP_RESERVED is reserved" + ], + [ + "1", + "RESERVED1", + "", + "BAD_OPCODE", + "OP_RESERVED1 is reserved" + ], + [ + "1", + "RESERVED2", + "", + "BAD_OPCODE", + "OP_RESERVED2 is reserved" + ], + [ + "1", + "0xb2", + "", + "BAD_OPCODE", + "0xb2 == OP_CHECKSEQUENCEVERIFY + 1" + ], + [ + "2147483648", + "1ADD 2147483649 NUMEQUAL", + "", + "OK", + "We can do math on 5-byte integers since kip10" + ], + [ + "2147483648", + "NEGATE -2147483648 NUMEQUAL", + "", + "OK", + "We can do math on 5-byte integers since kip10" + ], + [ + "-2147483648", + "1ADD -2147483647 NUMEQUAL", + "", + "OK", + "We can do math on 5-byte integers since kip10" + ], + [ + "2147483647", + "DUP 1ADD 1SUB NUMEQUAL", + "", + "OK", + "We can do math on 5-byte integers since kip10" + ], + [ + "2147483648", + "1SUB 2147483647 NUMEQUAL", + "", + "OK", + "We can do math on 5-byte integers since kip10" + ], + [ + "2147483648 1", + "BOOLOR 1 EQUAL", + "", + "OK", + "We can do math on 5-byte integers since kip10" + ], + [ + "2147483648 1", + "BOOLAND 1 EQUAL", + "", + "OK", + "We can do math on 5-byte integers since kip10" + ], + [ + "-9223372036854775808", + "1ADD 1", + "", + "UNKNOWN_ERROR", + "We cannot do math on 9-byte integers" + ], + [ + "-9223372036854775808", + "NEGATE 1", + "", + "UNKNOWN_ERROR", + "We cannot do math on 9-byte integers" + ], + [ + "-9223372036854775808", + "1ADD 1", + "", + "UNKNOWN_ERROR", + "Because we use a sign bit, -9223372036854775808 is also 9 bytes" + ], + [ + "-9223372036854775808", + "1ADD 1SUB 1", + "", + "UNKNOWN_ERROR", + "We cannot do math on 9-byte integers, even if the result is 8-bytes" + ], + [ + "-9223372036854775808", + "1SUB 1", + "", + "UNKNOWN_ERROR", + "We cannot do math on 9-byte integers, even if the result is 8-bytes" + ], + [ + "-9223372036854775808 1", + "BOOLOR 1", + "", + "UNKNOWN_ERROR", + "We cannot do BOOLOR on 9-byte integers (but we can still do IF etc)" + ], + [ + "-9223372036854775808 1", + "BOOLAND 1", + "", + "UNKNOWN_ERROR", + "We cannot do BOOLAND on 9-byte integers" + ], + [ + "-9223372036854775807", + "1SUB", + "", + "UNKNOWN_ERROR", + "result of math operation can't exceed 8 bytes" + ], + [ + "1", + "1 ENDIF", + "", + "UNBALANCED_CONDITIONAL", + "ENDIF without IF" + ], + [ + "1", + "IF 1", + "", + "UNBALANCED_CONDITIONAL", + "IF without ENDIF" + ], + [ + "", + "IF 1 ENDIF", + "", + "UNBALANCED_CONDITIONAL", + "The following tests check the if(stack.size() < N) tests in each opcode" + ], + [ + "", + "NOTIF 1 ENDIF", + "", + "UNBALANCED_CONDITIONAL", + "They are here to catch copy-and-paste errors" + ], + [ + "", + "VERIFY 1", + "", + "INVALID_STACK_OPERATION", + "Most of them are duplicated elsewhere," + ], + [ + "", + "TOALTSTACK 1", + "", + "INVALID_STACK_OPERATION", + "but, hey, more is always better, right?" + ], + [ + "1", + "FROMALTSTACK", + "", + "INVALID_ALTSTACK_OPERATION" + ], + [ + "1", + "2DROP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "2DUP", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 1", + "3DUP", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 1 1", + "2OVER", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 1 1 1 1", + "2ROT", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 1 1", + "2SWAP", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "IFDUP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "DROP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "DUP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "NIP", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "OVER", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 1 1 3", + "PICK", + "", + "INVALID_STACK_OPERATION" + ], + [ + "0", + "PICK 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 1 1 3", + "ROLL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "0", + "ROLL 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 1", + "ROT", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "SWAP", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "TUCK", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "SIZE 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "EQUAL 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "EQUALVERIFY 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "1ADD 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "1SUB 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "NEGATE 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "ABS 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "NOT 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "0NOTEQUAL 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "ADD", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "SUB", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "BOOLAND", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "BOOLOR", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "NUMEQUAL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "NUMEQUALVERIFY 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "NUMNOTEQUAL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "LESSTHAN", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "GREATERTHAN", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "LESSTHANOREQUAL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "GREATERTHANOREQUAL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "MIN", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "MAX", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 1", + "WITHIN", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "SHA256 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "BLAKE2B 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "BLAKE2B 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "Increase CHECKSIG and CHECKMULTISIG negative test coverage" + ], + [ + "", + "CHECKSIG NOT", + "", + "INVALID_STACK_OPERATION", + "CHECKSIG must error when there are no stack items" + ], + [ + "0", + "CHECKSIG NOT", + "", + "INVALID_STACK_OPERATION", + "CHECKSIG must error when there are not 2 stack items" + ], + [ + "", + "CHECKMULTISIG NOT", + "", + "INVALID_STACK_OPERATION", + "CHECKMULTISIG must error when there are no stack items" + ], + [ + "", + "-1 CHECKMULTISIG NOT", + "", + "PUBKEY_COUNT", + "CHECKMULTISIG must error when the specified number of pubkeys is negative" + ], + [ + "", + "1 CHECKMULTISIG NOT", + "", + "INVALID_STACK_OPERATION", + "CHECKMULTISIG must error when there are not enough pubkeys on the stack" + ], + [ + "", + "-1 0 CHECKMULTISIG NOT", + "", + "SIG_COUNT", + "CHECKMULTISIG must error when the specified number of signatures is negative" + ], + [ + "", + "1 'pk1' 1 CHECKMULTISIG NOT", + "", + "INVALID_STACK_OPERATION", + "CHECKMULTISIG must error when there are not enough signatures on the stack" + ], + [ + "", + "0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG", + "", + "OP_COUNT", + "202 CHECKMULTISIGS, fails due to 201 op limit" + ], + [ + "", + "NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG", + "", + "OP_COUNT", + "Fails due to 201 script operation limit" + ], + [ + "1", + "NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY", + "", + "OP_COUNT", + "" + ], + [ + "0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21", + "21 CHECKMULTISIG 1", + "", + "PUBKEY_COUNT", + "nPubKeys > 20" + ], + [ + "0 'sig' 1 0", + "CHECKMULTISIG 1", + "", + "SIG_COUNT", + "nSigs > nPubKeys" + ], + [ + "NOP 0x01 1", + "BLAKE2B 0x20 0xda1745e9b549bd0bfa1a569971c77eba30cd5a4b EQUAL", + "", + "SIG_PUSHONLY", + "Tests for Script.IsPushOnly()" + ], + [ + "0 0x01 0x50", + "BLAKE2B 0x20 0xece424a6bb6ddf4db592c0faed60685047a361b1 EQUAL", + "", + "BAD_OPCODE", + "OP_RESERVED in P2SH should fail" + ], + [ + "0 0x01", + "VER BLAKE2B 0x20 0x0f4d7845db968f2a81b530b6f3c1d6246d4c7e01 EQUAL", + "", + "BAD_OPCODE", + "OP_VER in P2SH should fail" + ], + [ + "0x00", + "'00' EQUAL", + "", + "EVAL_FALSE", + "Basic OP_0 execution" + ], + [ + "MINIMALDATA enforcement for PUSHDATAs" + ], + [ + "0x4c 0x00", + "DROP 1", + "", + "MINIMALDATA", + "Empty vector minimally represented by OP_0" + ], + [ + "0x01 0x81", + "DROP 1", + "", + "MINIMALDATA", + "-1 minimally represented by OP_1NEGATE" + ], + [ + "0x01 0x01", + "DROP 1", + "", + "MINIMALDATA", + "1 to 16 minimally represented by OP_1 to OP_16" + ], + [ + "0x01 0x02", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x03", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x04", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x05", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x06", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x07", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x08", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x09", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x0a", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x0b", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x0c", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x0d", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x0e", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x0f", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x10", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x4c 0x48 0x111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + "DROP 1", + "", + "MINIMALDATA", + "PUSHDATA1 of 72 bytes minimally represented by direct push" + ], + [ + "0x4d 0xFF00 0x111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + "DROP 1", + "", + "MINIMALDATA", + "PUSHDATA2 of 255 bytes minimally represented by PUSHDATA1" + ], + [ + "0x4e 0x00010000 0x11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + "DROP 1", + "", + "MINIMALDATA", + "PUSHDATA4 of 256 bytes minimally represented by PUSHDATA2" + ], + [ + "MINIMALDATA enforcement for numeric arguments" + ], + [ + "0x01 0x00", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "numequals 0" + ], + [ + "0x02 0x0000", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "numequals 0" + ], + [ + "0x01 0x80", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "0x80 (negative zero) numequals 0" + ], + [ + "0x02 0x0080", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "numequals 0" + ], + [ + "0x02 0x0500", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "numequals 5" + ], + [ + "0x03 0x050000", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "numequals 5" + ], + [ + "0x02 0x0580", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "numequals -5" + ], + [ + "0x03 0x050080", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "numequals -5" + ], + [ + "0x03 0xff7f80", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "Minimal encoding is 0xffff" + ], + [ + "0x03 0xff7f00", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "Minimal encoding is 0xff7f" + ], + [ + "0x04 0xffff7f80", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "Minimal encoding is 0xffffff" + ], + [ + "0x04 0xffff7f00", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "Minimal encoding is 0xffff7f" + ], + [ + "Test every numeric-accepting opcode for correct handling of the numeric minimal encoding rule" + ], + [ + "1 0x02 0x0000", + "PICK DROP", + "", + "UNKNOWN_ERROR" + ], + [ + "1 0x02 0x0000", + "ROLL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000", + "1ADD DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000", + "1SUB DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000", + "NEGATE DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000", + "ABS DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000", + "NOT DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000", + "0NOTEQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "ADD DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "ADD DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "SUB DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "SUB DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "BOOLAND DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "BOOLAND DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "BOOLOR DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "BOOLOR DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "NUMEQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 1", + "NUMEQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "NUMEQUALVERIFY 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "NUMEQUALVERIFY 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "NUMNOTEQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "NUMNOTEQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "LESSTHAN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "LESSTHAN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "GREATERTHAN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "GREATERTHAN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "LESSTHANOREQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "LESSTHANOREQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "GREATERTHANOREQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "GREATERTHANOREQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "MIN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "MIN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "MAX DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "MAX DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0 0", + "WITHIN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000 0", + "WITHIN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0 0x02 0x0000", + "WITHIN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "CHECKMULTISIG DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "CHECKMULTISIG DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0 1", + "CHECKMULTISIG DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "CHECKMULTISIGVERIFY 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "CHECKMULTISIGVERIFY 1", + "", + "UNKNOWN_ERROR" + ], + [ + "Check MINIMALIF" + ], + [ + "2", + "IF TRUE ELSE FALSE", + "", + "MINIMALIF" + ], + [ + "2", + "NOTIF TRUE ELSE FALSE", + "", + "MINIMALIF" + ], + [ + "Order of CHECKMULTISIG evaluation tests, inverted by swapping the order of" + ], + [ + "pubkeys/signatures so they fail due to the STRICTENC rules on validly encoded" + ], + [ + "signatures and pubkeys." + ], + [ + "0x41 0x833682d4f60cc916a22a2c263e658fa662c49badb1e2a8c6208987bf99b1abd740498371480069e7a7a6e7471bf78c27bd9a1fd04fb212a92017346250ac187b01 0x41 0xea4a8d20562a950f4695dc24804565482e9fa111704886179d0c348f2b8a15fe691a305cd599c59c131677146661d5b98cb935330989a85f33afc70d0a21add101", + "2 0x21 0x02865c40293a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac0 0 2 CHECKMULTISIG NOT", + "", + "PUBKEYFORMAT", + "2-of-2 CHECKMULTISIG NOT with the first pubkey invalid, and both signatures validly encoded." + ], + [ + "CHECKSEQUENCEVERIFY tests" + ], + [ + "", + "CHECKSEQUENCEVERIFY", + "", + "INVALID_STACK_OPERATION", + "CSV automatically fails on a empty stack" + ], + [ + "0", + "CHECKSEQUENCEVERIFY", + "", + "UNSATISFIED_LOCKTIME", + "CSV fails if stack top bit 1 << 31 is set and the tx version < 2" + ], + [ + "4294967296", + "CHECKSEQUENCEVERIFY", + "", + "UNSATISFIED_LOCKTIME", + "CSV fails if stack top bit 1 << 31 is not set, and tx version < 2" + ], + [ + "NULLFAIL should cover all signatures and signatures only" + ], + [ + "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0", + "0x01 0x14 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0x01 0x14 CHECKMULTISIG NOT", + "", + "OK", + "BIP66 and NULLFAIL-compliant" + ], + [ + "0x09 0x300602010102010101 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0", + "0x01 0x14 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0x01 0x14 CHECKMULTISIG NOT", + "", + "NULLFAIL", + "BIP66-compliant but not NULLFAIL-compliant 4" + ], + [ + "The End" + ] +] diff --git a/testing/integration/src/consensus_integration_tests.rs b/testing/integration/src/consensus_integration_tests.rs index 2256cd3b34..3db614dc41 100644 --- a/testing/integration/src/consensus_integration_tests.rs +++ b/testing/integration/src/consensus_integration_tests.rs @@ -27,7 +27,7 @@ use kaspa_consensus_core::api::{BlockValidationFutures, ConsensusApi}; use kaspa_consensus_core::block::Block; use kaspa_consensus_core::blockhash::new_unique; use kaspa_consensus_core::blockstatus::BlockStatus; -use kaspa_consensus_core::constants::{BLOCK_VERSION, STORAGE_MASS_PARAMETER}; +use kaspa_consensus_core::constants::{BLOCK_VERSION, SOMPI_PER_KASPA, STORAGE_MASS_PARAMETER}; use kaspa_consensus_core::errors::block::{BlockProcessResult, RuleError}; use kaspa_consensus_core::header::Header; use kaspa_consensus_core::network::{NetworkId, NetworkType::Mainnet}; @@ -43,9 +43,13 @@ use kaspa_core::time::unix_now; use kaspa_database::utils::get_kaspa_tempdir; use kaspa_hashes::Hash; +use crate::common; use flate2::read::GzDecoder; use futures_util::future::try_join_all; use itertools::Itertools; +use kaspa_consensus_core::coinbase::MinerData; +use kaspa_consensus_core::merkle::calc_hash_merkle_root; +use kaspa_consensus_core::muhash::MuHashExtensions; use kaspa_core::core::Core; use kaspa_core::signals::Shutdown; use kaspa_core::task::runtime::AsyncRuntime; @@ -72,8 +76,6 @@ use std::{ str::{from_utf8, FromStr}, }; -use crate::common; - #[derive(Serialize, Deserialize, Debug)] struct JsonBlock { id: String, @@ -833,6 +835,7 @@ impl KaspadGoParams { max_block_mass: self.MaxBlockMass, storage_mass_parameter: STORAGE_MASS_PARAMETER, storage_mass_activation: ForkActivation::never(), + kip10_activation: ForkActivation::never(), deflationary_phase_daa_score: self.DeflationaryPhaseDaaScore, pre_deflationary_phase_base_subsidy: self.PreDeflationaryPhaseBaseSubsidy, coinbase_maturity: MAINNET_PARAMS.coinbase_maturity, @@ -1757,3 +1760,108 @@ async fn staging_consensus_test() { core.shutdown(); core.join(joins); } + +/// Tests the KIP-10 transaction introspection opcode activation by verifying that: +/// 1. Transactions using these opcodes are rejected before the activation DAA score +/// 2. The same transactions are accepted at and after the activation score +/// Uses OpInputSpk opcode as an example +#[tokio::test] +async fn run_kip10_activation_test() { + use kaspa_consensus_core::subnets::SUBNETWORK_ID_NATIVE; + use kaspa_txscript::opcodes::codes::{Op0, OpTxInputSpk}; + use kaspa_txscript::pay_to_script_hash_script; + use kaspa_txscript::script_builder::ScriptBuilder; + + // KIP-10 activates at DAA score 3 in this test + const KIP10_ACTIVATION_DAA_SCORE: u64 = 3; + + init_allocator_with_default_settings(); + + // Create P2SH script that attempts to use OpInputSpk - this will be our test subject + // The script should fail before KIP-10 activation and succeed after + let redeem_script = ScriptBuilder::new() + .add_op(Op0).unwrap() // Push 0 for input index + .add_op(OpTxInputSpk).unwrap() // Get the input's script pubkey + .drain(); + let spk = pay_to_script_hash_script(&redeem_script); + + // Set up initial UTXO with our test script + let initial_utxo_collection = [( + TransactionOutpoint::new(1.into(), 0), + UtxoEntry { amount: SOMPI_PER_KASPA, script_public_key: spk.clone(), block_daa_score: 0, is_coinbase: false }, + )]; + + // Initialize consensus with KIP-10 activation point + let config = ConfigBuilder::new(DEVNET_PARAMS) + .skip_proof_of_work() + .apply_args(|cfg| { + let mut genesis_multiset = MuHash::new(); + initial_utxo_collection.iter().for_each(|(outpoint, utxo)| { + genesis_multiset.add_utxo(outpoint, utxo); + }); + cfg.params.genesis.utxo_commitment = genesis_multiset.finalize(); + let genesis_header: Header = (&cfg.params.genesis).into(); + cfg.params.genesis.hash = genesis_header.hash; + }) + .edit_consensus_params(|p| { + p.kip10_activation = ForkActivation::new(KIP10_ACTIVATION_DAA_SCORE); + }) + .build(); + + let consensus = TestConsensus::new(&config); + let mut genesis_multiset = MuHash::new(); + consensus.append_imported_pruning_point_utxos(&initial_utxo_collection, &mut genesis_multiset); + consensus.import_pruning_point_utxo_set(config.genesis.hash, genesis_multiset).unwrap(); + consensus.init(); + + // Build blockchain up to one block before activation + let mut index = 0; + for _ in 0..KIP10_ACTIVATION_DAA_SCORE - 1 { + let parent = if index == 0 { config.genesis.hash } else { index.into() }; + consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![parent], vec![]).await.unwrap(); + index += 1; + } + assert_eq!(consensus.get_virtual_daa_score(), index); + + // Create transaction that attempts to use the KIP-10 opcode + let mut spending_tx = Transaction::new( + 0, + vec![TransactionInput::new( + initial_utxo_collection[0].0, + ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(), + 0, + 0, + )], + vec![TransactionOutput::new(initial_utxo_collection[0].1.amount - 5000, spk)], + 0, + SUBNETWORK_ID_NATIVE, + 0, + vec![], + ); + spending_tx.finalize(); + let tx_id = spending_tx.id(); + // Test 1: Build empty block, then manually insert invalid tx and verify consensus rejects it + { + let miner_data = MinerData::new(ScriptPublicKey::from_vec(0, vec![]), vec![]); + + // First build block without transactions + let mut block = + consensus.build_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], miner_data.clone(), vec![]); + + // Insert our test transaction and recalculate block hashes + block.transactions.push(spending_tx.clone()); + block.header.hash_merkle_root = calc_hash_merkle_root(block.transactions.iter(), false); + let block_status = consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await; + assert!(matches!(block_status, Ok(BlockStatus::StatusDisqualifiedFromChain))); + assert_eq!(consensus.lkg_virtual_state.load().daa_score, 2); + index += 1; + } + // // Add one more block to reach activation score + consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![(index - 1).into()], vec![]).await.unwrap(); + index += 1; + + // Test 2: Verify the same transaction is accepted after activation + let status = consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], vec![spending_tx.clone()]).await; + assert!(matches!(status, Ok(BlockStatus::StatusUTXOValid))); + assert!(consensus.lkg_virtual_state.load().accepted_tx_ids.contains(&tx_id)); +} diff --git a/wallet/pskt/src/pskt.rs b/wallet/pskt/src/pskt.rs index 93c16ccc85..abae18f2dc 100644 --- a/wallet/pskt/src/pskt.rs +++ b/wallet/pskt/src/pskt.rs @@ -436,7 +436,7 @@ impl PSKT { let reused_values = SigHashReusedValuesUnsync::new(); tx.populated_inputs().enumerate().try_for_each(|(idx, (input, entry))| { - TxScriptEngine::from_transaction_input(&tx, input, idx, entry, &reused_values, &cache)?.execute()?; + TxScriptEngine::from_transaction_input(&tx, input, idx, entry, &reused_values, &cache, false).execute()?; >::Ok(()) })?; } From 116dfb0f886f99bbe2a9f77b133e7649d6d6a833 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Wed, 13 Nov 2024 12:34:01 +0200 Subject: [PATCH 24/31] Some simplification to script number types (#594) * Some simplification to script number types * Add TODO * Address review comments --- crypto/txscript/src/data_stack.rs | 170 ++++++++++++-------------- crypto/txscript/src/lib.rs | 8 +- crypto/txscript/src/opcodes/mod.rs | 1 + crypto/txscript/src/script_builder.rs | 4 +- 4 files changed, 84 insertions(+), 99 deletions(-) diff --git a/crypto/txscript/src/data_stack.rs b/crypto/txscript/src/data_stack.rs index cb5935bbbd..898550fec2 100644 --- a/crypto/txscript/src/data_stack.rs +++ b/crypto/txscript/src/data_stack.rs @@ -3,14 +3,58 @@ use core::fmt::Debug; use core::iter; use kaspa_txscript_errors::SerializationError; use std::cmp::Ordering; +use std::num::TryFromIntError; use std::ops::Deref; -const DEFAULT_SCRIPT_NUM_LEN: usize = 4; -const DEFAULT_SCRIPT_NUM_LEN_KIP10: usize = 8; - -#[derive(PartialEq, Eq, Debug, Default)] +#[derive(PartialEq, Eq, Debug, Default, PartialOrd, Ord)] pub(crate) struct SizedEncodeInt(pub(crate) i64); +impl From for SizedEncodeInt { + fn from(value: i64) -> Self { + SizedEncodeInt(value) + } +} + +impl From for SizedEncodeInt { + fn from(value: i32) -> Self { + SizedEncodeInt(value as i64) + } +} + +impl TryFrom> for i32 { + type Error = TryFromIntError; + + fn try_from(value: SizedEncodeInt) -> Result { + value.0.try_into() + } +} + +impl PartialEq for SizedEncodeInt { + fn eq(&self, other: &i64) -> bool { + self.0 == *other + } +} + +impl PartialOrd for SizedEncodeInt { + fn partial_cmp(&self, other: &i64) -> Option { + self.0.partial_cmp(other) + } +} + +impl Deref for SizedEncodeInt { + type Target = i64; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl From> for i64 { + fn from(value: SizedEncodeInt) -> Self { + value.0 + } +} + pub(crate) type Stack = Vec>; pub(crate) trait DataStack { @@ -111,94 +155,36 @@ fn deserialize_i64(v: &[u8]) -> Result { } } -#[derive(Debug, Copy, Clone, PartialEq, Eq, Ord, PartialOrd)] -#[repr(transparent)] -pub struct Kip10I64(pub i64); - -impl From for i64 { - fn from(value: Kip10I64) -> Self { - value.0 - } -} - -impl PartialEq for Kip10I64 { - fn eq(&self, other: &i64) -> bool { - self.0.eq(other) - } -} - -impl PartialOrd for Kip10I64 { - fn partial_cmp(&self, other: &i64) -> Option { - self.0.partial_cmp(other) - } -} - -impl Deref for Kip10I64 { - type Target = i64; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl OpcodeData for Vec { - #[inline] - fn deserialize(&self) -> Result { - match self.len() > DEFAULT_SCRIPT_NUM_LEN_KIP10 { - true => Err(TxScriptError::NumberTooBig(format!( - "numeric value encoded as {:x?} is {} bytes which exceeds the max allowed of {}", - self, - self.len(), - DEFAULT_SCRIPT_NUM_LEN_KIP10 - ))), - false => deserialize_i64(self).map(Kip10I64), - } - } - - #[inline] - fn serialize(from: &Kip10I64) -> Result { - if from.0 == i64::MIN { - return Err(SerializationError::NumberTooLong(from.0)); - } - Ok(serialize_i64(&from.0)) - } -} +// TODO: Rename to DefaultSizedEncodeInt when KIP-10 is activated +pub type Kip10I64 = SizedEncodeInt<8>; impl OpcodeData for Vec { #[inline] fn deserialize(&self) -> Result { - match self.len() > DEFAULT_SCRIPT_NUM_LEN { - true => Err(TxScriptError::NumberTooBig(format!( - "numeric value encoded as {:x?} is {} bytes which exceeds the max allowed of {}", - self, - self.len(), - DEFAULT_SCRIPT_NUM_LEN - ))), - false => deserialize_i64(self), - } + // TODO: Change LEN to 8 once KIP-10 is activated + OpcodeData::>::deserialize(self).map(i64::from) } #[inline] fn serialize(from: &i64) -> Result { - if from == &i64::MIN { - return Err(SerializationError::NumberTooLong(*from)); - } - Ok(serialize_i64(from)) + // Note that serialization and deserialization use different LEN. + // This is because prior to KIP-10, only deserialization size was limited. + // It's safe to use 8 here because i32 arithmetic operations (which were the + // only ones that were supported prior to KIP-10) can't get to i64::MIN + // (the only i64 value that requires more than 8 bytes to serialize). + OpcodeData::>::serialize(&(*from).into()) } } impl OpcodeData for Vec { #[inline] fn deserialize(&self) -> Result { - let res = OpcodeData::::deserialize(self)?; - // TODO: Consider getting rid of clamp, since the call to deserialize should return an error - // if the number is not in the i32 range (this should be done with proper testing)? - Ok(res.clamp(i32::MIN as i64, i32::MAX as i64) as i32) + OpcodeData::>::deserialize(self).map(|v| v.try_into().expect("number is within i32 range")) } #[inline] fn serialize(from: &i32) -> Result { - Ok(OpcodeData::::serialize(&(*from as i64)).expect("should never happen")) + OpcodeData::>::serialize(&(*from).into()) } } @@ -206,7 +192,7 @@ impl OpcodeData> for Vec { #[inline] fn deserialize(&self) -> Result, TxScriptError> { match self.len() > LEN { - true => Err(TxScriptError::InvalidState(format!( + true => Err(TxScriptError::NumberTooBig(format!( "numeric value encoded as {:x?} is {} bytes which exceeds the max allowed of {}", self, self.len(), @@ -218,7 +204,11 @@ impl OpcodeData> for Vec { #[inline] fn serialize(from: &SizedEncodeInt) -> Result { - Ok(serialize_i64(&from.0)) + let bytes = serialize_i64(&from.0); + if bytes.len() > LEN { + return Err(SerializationError::NumberTooLong(from.0)); + } + Ok(bytes) } } @@ -614,59 +604,59 @@ mod tests { let kip10_tests = vec![ TestCase:: { serialized: hex::decode("0000008000").expect("failed parsing hex"), - result: Ok(Kip10I64(2147483648)), + result: Ok(Kip10I64::from(2147483648i64)), }, TestCase:: { serialized: hex::decode("0000008080").expect("failed parsing hex"), - result: Ok(Kip10I64(-2147483648)), + result: Ok(Kip10I64::from(-2147483648i64)), }, TestCase:: { serialized: hex::decode("0000009000").expect("failed parsing hex"), - result: Ok(Kip10I64(2415919104)), + result: Ok(Kip10I64::from(2415919104i64)), }, TestCase:: { serialized: hex::decode("0000009080").expect("failed parsing hex"), - result: Ok(Kip10I64(-2415919104)), + result: Ok(Kip10I64::from(-2415919104i64)), }, TestCase:: { serialized: hex::decode("ffffffff00").expect("failed parsing hex"), - result: Ok(Kip10I64(4294967295)), + result: Ok(Kip10I64::from(4294967295i64)), }, TestCase:: { serialized: hex::decode("ffffffff80").expect("failed parsing hex"), - result: Ok(Kip10I64(-4294967295)), + result: Ok(Kip10I64::from(-4294967295i64)), }, TestCase:: { serialized: hex::decode("0000000001").expect("failed parsing hex"), - result: Ok(Kip10I64(4294967296)), + result: Ok(Kip10I64::from(4294967296i64)), }, TestCase:: { serialized: hex::decode("0000000081").expect("failed parsing hex"), - result: Ok(Kip10I64(-4294967296)), + result: Ok(Kip10I64::from(-4294967296i64)), }, TestCase:: { serialized: hex::decode("ffffffffffff00").expect("failed parsing hex"), - result: Ok(Kip10I64(281474976710655)), + result: Ok(Kip10I64::from(281474976710655i64)), }, TestCase:: { serialized: hex::decode("ffffffffffff80").expect("failed parsing hex"), - result: Ok(Kip10I64(-281474976710655)), + result: Ok(Kip10I64::from(-281474976710655i64)), }, TestCase:: { serialized: hex::decode("ffffffffffffff00").expect("failed parsing hex"), - result: Ok(Kip10I64(72057594037927935)), + result: Ok(Kip10I64::from(72057594037927935i64)), }, TestCase:: { serialized: hex::decode("ffffffffffffff80").expect("failed parsing hex"), - result: Ok(Kip10I64(-72057594037927935)), + result: Ok(Kip10I64::from(-72057594037927935i64)), }, TestCase:: { serialized: hex::decode("ffffffffffffff7f").expect("failed parsing hex"), - result: Ok(Kip10I64(9223372036854775807)), + result: Ok(Kip10I64::from(9223372036854775807i64)), }, TestCase:: { serialized: hex::decode("ffffffffffffffff").expect("failed parsing hex"), - result: Ok(Kip10I64(-9223372036854775807)), + result: Ok(Kip10I64::from(-9223372036854775807i64)), }, // Minimally encoded values that are out of range for data that // is interpreted as script numbers with the minimal encoding diff --git a/crypto/txscript/src/lib.rs b/crypto/txscript/src/lib.rs index f36307a60a..a82be592f6 100644 --- a/crypto/txscript/src/lib.rs +++ b/crypto/txscript/src/lib.rs @@ -1196,17 +1196,11 @@ mod bitcoind_tests { // Read the JSON contents of the file as an instance of `User`. let tests: Vec = serde_json::from_reader(reader).expect("Failed Parsing {:?}"); - let mut had_errors = 0; - let total_tests = tests.len(); for row in tests { if let Err(error) = row.test_row(kip10_enabled) { - println!("Test: {:?} failed: {:?}", row.clone(), error); - had_errors += 1; + panic!("Test: {:?} failed for {}: {:?}", row.clone(), file_name, error); } } - if had_errors > 0 { - panic!("{}/{} json tests failed", had_errors, total_tests) - } } } } diff --git a/crypto/txscript/src/opcodes/mod.rs b/crypto/txscript/src/opcodes/mod.rs index c59bc27d91..5ee6fbaae1 100644 --- a/crypto/txscript/src/opcodes/mod.rs +++ b/crypto/txscript/src/opcodes/mod.rs @@ -219,6 +219,7 @@ fn push_number( /// This macro helps to avoid code duplication in numeric opcodes where the only difference /// between KIP10_ENABLED and disabled states is the numeric type used (Kip10I64 vs i64). /// KIP10I64 deserializator supports 8-byte integers +// TODO: Remove this macro after KIP-10 activation. macro_rules! numeric_op { ($vm: expr, $pattern: pat, $count: expr, $block: expr) => { if $vm.kip10_enabled { diff --git a/crypto/txscript/src/script_builder.rs b/crypto/txscript/src/script_builder.rs index 466b8b4089..7a5b28ca5a 100644 --- a/crypto/txscript/src/script_builder.rs +++ b/crypto/txscript/src/script_builder.rs @@ -1,7 +1,7 @@ use std::iter::once; use crate::{ - data_stack::OpcodeData, + data_stack::{Kip10I64, OpcodeData}, opcodes::{codes::*, OP_1_NEGATE_VAL, OP_DATA_MAX_VAL, OP_DATA_MIN_VAL, OP_SMALL_INT_MAX_VAL}, MAX_SCRIPTS_SIZE, MAX_SCRIPT_ELEMENT_SIZE, }; @@ -232,7 +232,7 @@ impl ScriptBuilder { return Ok(self); } - let bytes: Vec<_> = OpcodeData::::serialize(&val)?; + let bytes: Vec<_> = OpcodeData::::serialize(&val.into())?; self.add_data(&bytes) } From 1d3b9a95911b99dbd1b45e8b23739eeb0c7c6ba8 Mon Sep 17 00:00:00 2001 From: witter-deland <87846830+witter-deland@users.noreply.github.com> Date: Thu, 14 Nov 2024 15:36:21 +0700 Subject: [PATCH 25/31] feat: add signMessage noAuxRand option for kaspa wasm (#587) * feat: add signMessageWithoutRand method for kaspa wasm * enhance: sign message api * fix: unit test fail * chore: update noAuxRand of ISignMessage * chore: add sign message demo for noAuxRand --- cli/src/modules/message.rs | 4 +- wallet/core/src/message.rs | 54 +++++++++++++++++-- wallet/core/src/wasm/message.rs | 5 +- .../javascript/general/message-signing.js | 6 ++- 4 files changed, 60 insertions(+), 9 deletions(-) diff --git a/cli/src/modules/message.rs b/cli/src/modules/message.rs index dce7f36790..d38624dc2b 100644 --- a/cli/src/modules/message.rs +++ b/cli/src/modules/message.rs @@ -1,5 +1,6 @@ use kaspa_addresses::Version; use kaspa_bip32::secp256k1::XOnlyPublicKey; +use kaspa_wallet_core::message::SignMessageOptions; use kaspa_wallet_core::{ account::{BIP32_ACCOUNT_KIND, KEYPAIR_ACCOUNT_KIND}, message::{sign_message, verify_message, PersonalMessage}, @@ -87,8 +88,9 @@ impl Message { let pm = PersonalMessage(message); let privkey = self.get_address_private_key(&ctx, kaspa_address).await?; + let sign_options = SignMessageOptions { no_aux_rand: false }; - let sig_result = sign_message(&pm, &privkey); + let sig_result = sign_message(&pm, &privkey, &sign_options); match sig_result { Ok(signature) => { diff --git a/wallet/core/src/message.rs b/wallet/core/src/message.rs index 01dc78676b..152bf28aad 100644 --- a/wallet/core/src/message.rs +++ b/wallet/core/src/message.rs @@ -15,13 +15,28 @@ impl AsRef<[u8]> for PersonalMessage<'_> { } } +#[derive(Clone)] +pub struct SignMessageOptions { + /// The auxiliary randomness exists only to mitigate specific kinds of power analysis + /// side-channel attacks. Providing it definitely improves security, but omitting it + /// should not be considered dangerous, as most legacy signature schemes don't provide + /// mitigations against such attacks. To read more about the relevant discussions that + /// arose in adding this randomness please see: https://github.com/sipa/bips/issues/195 + pub no_aux_rand: bool, +} + /// Sign a message with the given private key -pub fn sign_message(msg: &PersonalMessage, privkey: &[u8; 32]) -> Result, Error> { +pub fn sign_message(msg: &PersonalMessage, privkey: &[u8; 32], options: &SignMessageOptions) -> Result, Error> { let hash = calc_personal_message_hash(msg); let msg = secp256k1::Message::from_digest_slice(hash.as_bytes().as_slice())?; let schnorr_key = secp256k1::Keypair::from_seckey_slice(secp256k1::SECP256K1, privkey)?; - let sig: [u8; 64] = *schnorr_key.sign_schnorr(msg).as_ref(); + + let sig: [u8; 64] = if options.no_aux_rand { + *secp256k1::SECP256K1.sign_schnorr_no_aux_rand(&msg, &schnorr_key).as_ref() + } else { + *schnorr_key.sign_schnorr(msg).as_ref() + }; Ok(sig.to_vec()) } @@ -74,7 +89,26 @@ mod tests { ]) .unwrap(); - verify_message(&pm, &sign_message(&pm, &privkey).expect("sign_message failed"), &pubkey).expect("verify_message failed"); + let sign_with_aux_rand = SignMessageOptions { no_aux_rand: false }; + let sign_with_no_aux_rand = SignMessageOptions { no_aux_rand: true }; + verify_message(&pm, &sign_message(&pm, &privkey, &sign_with_aux_rand).expect("sign_message failed"), &pubkey) + .expect("verify_message failed"); + verify_message(&pm, &sign_message(&pm, &privkey, &sign_with_no_aux_rand).expect("sign_message failed"), &pubkey) + .expect("verify_message failed"); + } + + #[test] + fn test_basic_sign_without_rand_twice_should_get_same_signature() { + let pm = PersonalMessage("Hello Kaspa!"); + let privkey: [u8; 32] = [ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + ]; + + let sign_with_no_aux_rand = SignMessageOptions { no_aux_rand: true }; + let signature = sign_message(&pm, &privkey, &sign_with_no_aux_rand).expect("sign_message failed"); + let signature_twice = sign_message(&pm, &privkey, &sign_with_no_aux_rand).expect("sign_message failed"); + assert_eq!(signature, signature_twice); } #[test] @@ -90,7 +124,12 @@ mod tests { ]) .unwrap(); - verify_message(&pm, &sign_message(&pm, &privkey).expect("sign_message failed"), &pubkey).expect("verify_message failed"); + let sign_with_aux_rand = SignMessageOptions { no_aux_rand: false }; + let sign_with_no_aux_rand = SignMessageOptions { no_aux_rand: true }; + verify_message(&pm, &sign_message(&pm, &privkey, &sign_with_aux_rand).expect("sign_message failed"), &pubkey) + .expect("verify_message failed"); + verify_message(&pm, &sign_message(&pm, &privkey, &sign_with_no_aux_rand).expect("sign_message failed"), &pubkey) + .expect("verify_message failed"); } #[test] @@ -110,7 +149,12 @@ Ut omnis magnam et accusamus earum rem impedit provident eum commodi repellat qu ]) .unwrap(); - verify_message(&pm, &sign_message(&pm, &privkey).expect("sign_message failed"), &pubkey).expect("verify_message failed"); + let sign_with_aux_rand = SignMessageOptions { no_aux_rand: false }; + let sign_with_no_aux_rand = SignMessageOptions { no_aux_rand: true }; + verify_message(&pm, &sign_message(&pm, &privkey, &sign_with_aux_rand).expect("sign_message failed"), &pubkey) + .expect("verify_message failed"); + verify_message(&pm, &sign_message(&pm, &privkey, &sign_with_no_aux_rand).expect("sign_message failed"), &pubkey) + .expect("verify_message failed"); } #[test] diff --git a/wallet/core/src/wasm/message.rs b/wallet/core/src/wasm/message.rs index 25c7f399ad..372129280d 100644 --- a/wallet/core/src/wasm/message.rs +++ b/wallet/core/src/wasm/message.rs @@ -14,6 +14,7 @@ const TS_MESSAGE_TYPES: &'static str = r#" export interface ISignMessage { message: string; privateKey: PrivateKey | string; + noAuxRand?: boolean; } "#; @@ -30,10 +31,12 @@ pub fn js_sign_message(value: ISignMessage) -> Result { if let Some(object) = Object::try_from(&value) { let private_key = object.cast_into::("privateKey")?; let raw_msg = object.get_string("message")?; + let no_aux_rand = object.get_bool("noAuxRand").unwrap_or(false); let mut privkey_bytes = [0u8; 32]; privkey_bytes.copy_from_slice(&private_key.secret_bytes()); let pm = PersonalMessage(&raw_msg); - let sig_vec = sign_message(&pm, &privkey_bytes)?; + let sign_options = SignMessageOptions { no_aux_rand }; + let sig_vec = sign_message(&pm, &privkey_bytes, &sign_options)?; privkey_bytes.zeroize(); Ok(faster_hex::hex_string(sig_vec.as_slice()).into()) } else { diff --git a/wasm/examples/nodejs/javascript/general/message-signing.js b/wasm/examples/nodejs/javascript/general/message-signing.js index ed12afd451..832af2ca67 100644 --- a/wasm/examples/nodejs/javascript/general/message-signing.js +++ b/wasm/examples/nodejs/javascript/general/message-signing.js @@ -12,8 +12,8 @@ let message = 'Hello Kaspa!'; let privkey = 'b7e151628aed2a6abf7158809cf4f3c762e7160f38b4da56a784d9045190cfef'; let pubkey = 'dff1d77f2a671c5f36183726db2341be58feae1da2deced843240f7b502ba659'; -function runDemo(message, privateKey, publicKey) { - let signature = signMessage({message, privateKey}); +function runDemo(message, privateKey, publicKey, noAuxRand) { + let signature = signMessage({message, privateKey, noAuxRand}); console.info(`Message: ${message} => Signature: ${signature}`); @@ -26,5 +26,7 @@ function runDemo(message, privateKey, publicKey) { // Using strings: runDemo(message, privkey, pubkey); +runDemo(message, privkey, pubkey, true); // Using Objects: runDemo(message, new PrivateKey(privkey), new PublicKey(pubkey)); +runDemo(message, new PrivateKey(privkey), new PublicKey(pubkey), true); From a0aeec30ec3dc6ea1abbfe83ca2bcb05107b9728 Mon Sep 17 00:00:00 2001 From: D-Stacks <78099568+D-Stacks@users.noreply.github.com> Date: Tue, 19 Nov 2024 09:54:36 +0100 Subject: [PATCH 26/31] Optimize window cache building for ibd (#576) * show changes. * optimize window caches for ibd. * do lints and checks etc.. * bench and compare. * clean-up * rework lock time check a bit. * // bool: todo!(), * fmt * address some reveiw points. * address reveiw comments. * update comments. * pass tests. * fix blue work assumption, update error message. * Update window.rs slight comment update. * simplify a bit more. * remove some unneeded things. rearrange access to cmpct gdd. * fix conflicts. * lints.. * address reveiw points from m. sutton. * uncomplicate check_block_transactions_in_context * commit in lazy * fix lints. * query compact data as much as possible. * Use DefefMut to unify push_mergeset logic for all cases (addresses @Tiram's review) * comment on cache_sink_windows * add comment to new_sink != prev_sink * return out of push_mergeset, if we cannot push any more. * remove unused diff cache and do non-daa as option. --- consensus/core/src/config/constants.rs | 2 +- consensus/core/src/errors/block.rs | 4 +- consensus/src/consensus/mod.rs | 16 +- consensus/src/model/stores/ghostdag.rs | 1 + .../body_validation_in_context.rs | 29 ++- .../src/pipeline/body_processor/processor.rs | 61 +++--- .../header_processor/pre_pow_validation.rs | 2 +- .../pipeline/virtual_processor/processor.rs | 46 ++++- consensus/src/processes/window.rs | 176 +++++++++++------- 9 files changed, 220 insertions(+), 117 deletions(-) diff --git a/consensus/core/src/config/constants.rs b/consensus/core/src/config/constants.rs index c4635083b7..146e30a17c 100644 --- a/consensus/core/src/config/constants.rs +++ b/consensus/core/src/config/constants.rs @@ -121,7 +121,7 @@ pub mod perf { const BASELINE_HEADER_DATA_CACHE_SIZE: usize = 10_000; const BASELINE_BLOCK_DATA_CACHE_SIZE: usize = 200; - const BASELINE_BLOCK_WINDOW_CACHE_SIZE: usize = 2000; + const BASELINE_BLOCK_WINDOW_CACHE_SIZE: usize = 2_000; const BASELINE_UTXOSET_CACHE_SIZE: usize = 10_000; #[derive(Clone, Debug)] diff --git a/consensus/core/src/errors/block.rs b/consensus/core/src/errors/block.rs index f5c235476a..132c6619f7 100644 --- a/consensus/core/src/errors/block.rs +++ b/consensus/core/src/errors/block.rs @@ -64,8 +64,8 @@ pub enum RuleError { #[error("expected header blue work {0} but got {1}")] UnexpectedHeaderBlueWork(BlueWorkType, BlueWorkType), - #[error("block difficulty of {0} is not the expected value of {1}")] - UnexpectedDifficulty(u32, u32), + #[error("block {0} difficulty of {1} is not the expected value of {2}")] + UnexpectedDifficulty(Hash, u32, u32), #[error("block timestamp of {0} is not after expected {1}")] TimeTooOld(u64, u64), diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index b3edd55ca4..eca78ee2a4 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -241,23 +241,13 @@ impl Consensus { body_receiver, virtual_sender, block_processors_pool, + params, db.clone(), - storage.statuses_store.clone(), - storage.ghostdag_store.clone(), - storage.headers_store.clone(), - storage.block_transactions_store.clone(), - storage.body_tips_store.clone(), - services.reachability_service.clone(), - services.coinbase_manager.clone(), - services.mass_calculator.clone(), - services.transaction_validator.clone(), - services.window_manager.clone(), - params.max_block_mass, - params.genesis.clone(), + &storage, + &services, pruning_lock.clone(), notification_root.clone(), counters.clone(), - params.storage_mass_activation, )); let virtual_processor = Arc::new(VirtualStateProcessor::new( diff --git a/consensus/src/model/stores/ghostdag.rs b/consensus/src/model/stores/ghostdag.rs index fd2600a1c4..4ed02e4cec 100644 --- a/consensus/src/model/stores/ghostdag.rs +++ b/consensus/src/model/stores/ghostdag.rs @@ -48,6 +48,7 @@ impl MemSizeEstimator for GhostdagData { impl MemSizeEstimator for CompactGhostdagData {} impl From<&GhostdagData> for CompactGhostdagData { + #[inline(always)] fn from(value: &GhostdagData) -> Self { Self { blue_score: value.blue_score, blue_work: value.blue_work, selected_parent: value.selected_parent } } diff --git a/consensus/src/pipeline/body_processor/body_validation_in_context.rs b/consensus/src/pipeline/body_processor/body_validation_in_context.rs index b03643df87..ec42f0f447 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_context.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_context.rs @@ -8,6 +8,7 @@ use kaspa_consensus_core::block::Block; use kaspa_database::prelude::StoreResultExtensions; use kaspa_hashes::Hash; use kaspa_utils::option::OptionExtensions; +use once_cell::unsync::Lazy; use std::sync::Arc; impl BlockBodyProcessor { @@ -18,13 +19,31 @@ impl BlockBodyProcessor { } fn check_block_transactions_in_context(self: &Arc, block: &Block) -> BlockProcessResult<()> { - let (pmt, _) = self.window_manager.calc_past_median_time(&self.ghostdag_store.get_data(block.hash()).unwrap())?; + // Note: This is somewhat expensive during ibd, as it incurs cache misses. + + // Use lazy evaluation to avoid unnecessary work, as most of the time we expect the txs not to have lock time. + let lazy_pmt_res = + Lazy::new(|| match self.window_manager.calc_past_median_time(&self.ghostdag_store.get_data(block.hash()).unwrap()) { + Ok((pmt, pmt_window)) => { + if !self.block_window_cache_for_past_median_time.contains_key(&block.hash()) { + self.block_window_cache_for_past_median_time.insert(block.hash(), pmt_window); + }; + Ok(pmt) + } + Err(e) => Err(e), + }); + for tx in block.transactions.iter() { - if let Err(e) = self.transaction_validator.utxo_free_tx_validation(tx, block.header.daa_score, pmt) { - return Err(RuleError::TxInContextFailed(tx.id(), e)); - } + // Quick check to avoid the expensive Lazy eval during ibd (in most cases). + // TODO: refactor this and avoid classifying the tx lock outside of the transaction validator. + if tx.lock_time != 0 { + if let Err(e) = + self.transaction_validator.utxo_free_tx_validation(tx, block.header.daa_score, (*lazy_pmt_res).clone()?) + { + return Err(RuleError::TxInContextFailed(tx.id(), e)); + }; + }; } - Ok(()) } diff --git a/consensus/src/pipeline/body_processor/processor.rs b/consensus/src/pipeline/body_processor/processor.rs index 6885c78b5e..ebb11a2003 100644 --- a/consensus/src/pipeline/body_processor/processor.rs +++ b/consensus/src/pipeline/body_processor/processor.rs @@ -1,10 +1,14 @@ use crate::{ - consensus::services::DbWindowManager, + consensus::{ + services::{ConsensusServices, DbWindowManager}, + storage::ConsensusStorage, + }, errors::{BlockProcessResult, RuleError}, model::{ services::reachability::MTReachabilityService, stores::{ block_transactions::DbBlockTransactionsStore, + block_window_cache::BlockWindowCacheStore, ghostdag::DbGhostdagStore, headers::DbHeadersStore, reachability::DbReachabilityStore, @@ -23,7 +27,10 @@ use crossbeam_channel::{Receiver, Sender}; use kaspa_consensus_core::{ block::Block, blockstatus::BlockStatus::{self, StatusHeaderOnly, StatusInvalid}, - config::{genesis::GenesisBlock, params::ForkActivation}, + config::{ + genesis::GenesisBlock, + params::{ForkActivation, Params}, + }, mass::MassCalculator, tx::Transaction, }; @@ -60,6 +67,7 @@ pub struct BlockBodyProcessor { pub(super) headers_store: Arc, pub(super) block_transactions_store: Arc, pub(super) body_tips_store: Arc>, + pub(super) block_window_cache_for_past_median_time: Arc, // Managers and services pub(super) reachability_service: MTReachabilityService, @@ -91,47 +99,42 @@ impl BlockBodyProcessor { sender: Sender, thread_pool: Arc, + params: &Params, db: Arc, - statuses_store: Arc>, - ghostdag_store: Arc, - headers_store: Arc, - block_transactions_store: Arc, - body_tips_store: Arc>, - - reachability_service: MTReachabilityService, - coinbase_manager: CoinbaseManager, - mass_calculator: MassCalculator, - transaction_validator: TransactionValidator, - window_manager: DbWindowManager, - max_block_mass: u64, - genesis: GenesisBlock, + storage: &Arc, + services: &Arc, + pruning_lock: SessionLock, notification_root: Arc, counters: Arc, - storage_mass_activation: ForkActivation, ) -> Self { Self { receiver, sender, thread_pool, db, - statuses_store, - reachability_service, - ghostdag_store, - headers_store, - block_transactions_store, - body_tips_store, - coinbase_manager, - mass_calculator, - transaction_validator, - window_manager, - max_block_mass, - genesis, + + max_block_mass: params.max_block_mass, + genesis: params.genesis.clone(), + + statuses_store: storage.statuses_store.clone(), + ghostdag_store: storage.ghostdag_store.clone(), + headers_store: storage.headers_store.clone(), + block_transactions_store: storage.block_transactions_store.clone(), + body_tips_store: storage.body_tips_store.clone(), + block_window_cache_for_past_median_time: storage.block_window_cache_for_past_median_time.clone(), + + reachability_service: services.reachability_service.clone(), + coinbase_manager: services.coinbase_manager.clone(), + mass_calculator: services.mass_calculator.clone(), + transaction_validator: services.transaction_validator.clone(), + window_manager: services.window_manager.clone(), + pruning_lock, task_manager: BlockTaskDependencyManager::new(), notification_root, counters, - storage_mass_activation, + storage_mass_activation: params.storage_mass_activation, } } diff --git a/consensus/src/pipeline/header_processor/pre_pow_validation.rs b/consensus/src/pipeline/header_processor/pre_pow_validation.rs index a4dfb8b1e7..7764e1c150 100644 --- a/consensus/src/pipeline/header_processor/pre_pow_validation.rs +++ b/consensus/src/pipeline/header_processor/pre_pow_validation.rs @@ -35,7 +35,7 @@ impl HeaderProcessor { ctx.mergeset_non_daa = Some(daa_window.mergeset_non_daa); if header.bits != expected_bits { - return Err(RuleError::UnexpectedDifficulty(header.bits, expected_bits)); + return Err(RuleError::UnexpectedDifficulty(header.hash, header.bits, expected_bits)); } ctx.block_window_for_difficulty = Some(daa_window.window); diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index c654fef430..1f0c4ff38b 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -16,6 +16,7 @@ use crate::{ stores::{ acceptance_data::{AcceptanceDataStoreReader, DbAcceptanceDataStore}, block_transactions::{BlockTransactionsStoreReader, DbBlockTransactionsStore}, + block_window_cache::BlockWindowCacheStore, daa::DbDaaStore, depth::{DbDepthStore, DepthStoreReader}, ghostdag::{DbGhostdagStore, GhostdagData, GhostdagStoreReader}, @@ -76,6 +77,7 @@ use kaspa_database::prelude::{StoreError, StoreResultEmptyTuple, StoreResultExte use kaspa_hashes::Hash; use kaspa_muhash::MuHash; use kaspa_notify::{events::EventType, notifier::Notify}; +use once_cell::unsync::Lazy; use super::errors::{PruningImportError, PruningImportResult}; use crossbeam_channel::{Receiver as CrossbeamReceiver, Sender as CrossbeamSender}; @@ -149,6 +151,10 @@ pub struct VirtualStateProcessor { pub(super) parents_manager: DbParentsManager, pub(super) depth_manager: DbBlockDepthManager, + // block window caches + pub(super) block_window_cache_for_difficulty: Arc, + pub(super) block_window_cache_for_past_median_time: Arc, + // Pruning lock pruning_lock: SessionLock, @@ -206,6 +212,9 @@ impl VirtualStateProcessor { pruning_utxoset_stores: storage.pruning_utxoset_stores.clone(), lkg_virtual_state: storage.lkg_virtual_state.clone(), + block_window_cache_for_difficulty: storage.block_window_cache_for_difficulty.clone(), + block_window_cache_for_past_median_time: storage.block_window_cache_for_past_median_time.clone(), + ghostdag_manager: services.ghostdag_manager.clone(), reachability_service: services.reachability_service.clone(), relations_service: services.relations_service.clone(), @@ -291,6 +300,10 @@ impl VirtualStateProcessor { let sink_multiset = self.utxo_multisets_store.get(new_sink).unwrap(); let chain_path = self.dag_traversal_manager.calculate_chain_path(prev_sink, new_sink, None); + let sink_ghostdag_data = Lazy::new(|| self.ghostdag_store.get_data(new_sink).unwrap()); + // Cache the DAA and Median time windows of the sink for future use, as well as prepare for virtual's window calculations + self.cache_sink_windows(new_sink, prev_sink, &sink_ghostdag_data); + let new_virtual_state = self .calculate_and_commit_virtual_state( virtual_read, @@ -302,12 +315,19 @@ impl VirtualStateProcessor { ) .expect("all possible rule errors are unexpected here"); + let compact_sink_ghostdag_data = if let Some(sink_ghostdag_data) = Lazy::get(&sink_ghostdag_data) { + // If we had to retrieve the full data, we convert it to compact + sink_ghostdag_data.to_compact() + } else { + // Else we query the compact data directly. + self.ghostdag_store.get_compact_data(new_sink).unwrap() + }; + // Update the pruning processor about the virtual state change - let sink_ghostdag_data = self.ghostdag_store.get_compact_data(new_sink).unwrap(); // Empty the channel before sending the new message. If pruning processor is busy, this step makes sure // the internal channel does not grow with no need (since we only care about the most recent message) let _consume = self.pruning_receiver.try_iter().count(); - self.pruning_sender.send(PruningProcessingMessage::Process { sink_ghostdag_data }).unwrap(); + self.pruning_sender.send(PruningProcessingMessage::Process { sink_ghostdag_data: compact_sink_ghostdag_data }).unwrap(); // Emit notifications let accumulated_diff = Arc::new(accumulated_diff); @@ -319,7 +339,7 @@ impl VirtualStateProcessor { .notify(Notification::UtxosChanged(UtxosChangedNotification::new(accumulated_diff, virtual_parents))) .expect("expecting an open unbounded channel"); self.notification_root - .notify(Notification::SinkBlueScoreChanged(SinkBlueScoreChangedNotification::new(sink_ghostdag_data.blue_score))) + .notify(Notification::SinkBlueScoreChanged(SinkBlueScoreChangedNotification::new(compact_sink_ghostdag_data.blue_score))) .expect("expecting an open unbounded channel"); self.notification_root .notify(Notification::VirtualDaaScoreChanged(VirtualDaaScoreChangedNotification::new(new_virtual_state.daa_score))) @@ -540,6 +560,26 @@ impl VirtualStateProcessor { drop(selected_chain_write); } + /// Caches the DAA and Median time windows of the sink block (if needed). Following, virtual's window calculations will + /// naturally hit the cache finding the sink's windows and building upon them. + fn cache_sink_windows(&self, new_sink: Hash, prev_sink: Hash, sink_ghostdag_data: &impl Deref>) { + // We expect that the `new_sink` is cached (or some close-enough ancestor thereof) if it is equal to the `prev_sink`, + // Hence we short-circuit the check of the keys in such cases, thereby reducing the access of the read-lock + if new_sink != prev_sink { + // this is only important for ibd performance, as we incur expensive cache misses otherwise. + // this occurs because we cannot rely on header processing to pre-cache in this scenario. + if !self.block_window_cache_for_difficulty.contains_key(&new_sink) { + self.block_window_cache_for_difficulty + .insert(new_sink, self.window_manager.block_daa_window(sink_ghostdag_data.deref()).unwrap().window); + }; + + if !self.block_window_cache_for_past_median_time.contains_key(&new_sink) { + self.block_window_cache_for_past_median_time + .insert(new_sink, self.window_manager.calc_past_median_time(sink_ghostdag_data.deref()).unwrap().1); + }; + } + } + /// Returns the max number of tips to consider as virtual parents in a single virtual resolve operation. /// /// Guaranteed to be `>= self.max_block_parents` diff --git a/consensus/src/processes/window.rs b/consensus/src/processes/window.rs index ca0f71cf20..ab09b1e7cb 100644 --- a/consensus/src/processes/window.rs +++ b/consensus/src/processes/window.rs @@ -17,7 +17,12 @@ use kaspa_hashes::Hash; use kaspa_math::Uint256; use kaspa_utils::refs::Refs; use once_cell::unsync::Lazy; -use std::{cmp::Reverse, iter::once, ops::Deref, sync::Arc}; +use std::{ + cmp::Reverse, + iter::once, + ops::{Deref, DerefMut}, + sync::Arc, +}; use super::{ difficulty::{FullDifficultyManager, SampledDifficultyManager}, @@ -332,52 +337,36 @@ impl None, }; - if let Some(cache) = cache { - if let Some(selected_parent_binary_heap) = cache.get(&ghostdag_data.selected_parent) { - // Only use the cached window if it originates from here - if let WindowOrigin::Sampled = selected_parent_binary_heap.origin() { - let selected_parent_blue_work = self.ghostdag_store.get_blue_work(ghostdag_data.selected_parent).unwrap(); - - let mut heap = - Lazy::new(|| BoundedSizeBlockHeap::from_binary_heap(window_size, (*selected_parent_binary_heap).clone())); - for block in self.sampled_mergeset_iterator(sample_rate, ghostdag_data, selected_parent_blue_work) { - match block { - SampledBlock::Sampled(block) => { - heap.try_push(block.hash, block.blue_work); - } - SampledBlock::NonDaa(hash) => { - mergeset_non_daa_inserter(hash); - } - } - } - - return if let Ok(heap) = Lazy::into_value(heap) { - Ok(Arc::new(heap.binary_heap)) - } else { - Ok(selected_parent_binary_heap.clone()) - }; - } - } + let selected_parent_blue_work = self.ghostdag_store.get_blue_work(ghostdag_data.selected_parent).unwrap(); + + // Try to initialize the window from the cache directly + if let Some(res) = self.try_init_from_cache( + window_size, + sample_rate, + cache, + ghostdag_data, + selected_parent_blue_work, + Some(&mut mergeset_non_daa_inserter), + ) { + return Ok(res); } + // else we populate the window with the passed ghostdag_data. let mut window_heap = BoundedSizeBlockHeap::new(WindowOrigin::Sampled, window_size); - let parent_ghostdag = self.ghostdag_store.get_data(ghostdag_data.selected_parent).unwrap(); - - for block in self.sampled_mergeset_iterator(sample_rate, ghostdag_data, parent_ghostdag.blue_work) { - match block { - SampledBlock::Sampled(block) => { - window_heap.try_push(block.hash, block.blue_work); - } - SampledBlock::NonDaa(hash) => { - mergeset_non_daa_inserter(hash); - } - } - } + self.push_mergeset( + &mut &mut window_heap, + sample_rate, + ghostdag_data, + selected_parent_blue_work, + Some(&mut mergeset_non_daa_inserter), + ); + let mut current_ghostdag = self.ghostdag_store.get_data(ghostdag_data.selected_parent).unwrap(); - let mut current_ghostdag = parent_ghostdag; + // Note: no need to check for cache here, as we already tried to initialize from the passed ghostdag's selected parent cache in `self.try_init_from_cache` - // Walk down the chain until we cross the window boundaries + // Walk down the chain until we cross the window boundaries. loop { + // check if we may exit early. if current_ghostdag.selected_parent.is_origin() { // Reaching origin means there's no more data, so we expect the window to already be full, otherwise we err. // This error can happen only during an IBD from pruning proof when processing the first headers in the pruning point's @@ -387,50 +376,101 @@ impl); + + // see if we can inherit and merge with the selected parent cache + if self.try_merge_with_selected_parent_cache(&mut window_heap, cache, ¤t_ghostdag.selected_parent) { + // if successful, we may break out of the loop, with the window already filled. + break; + }; + + // update the current ghostdag to the parent ghostdag, and continue the loop. current_ghostdag = parent_ghostdag; } Ok(Arc::new(window_heap.binary_heap)) } - fn try_push_mergeset( + /// Push the mergeset samples into the bounded heap. + /// Note: receives the heap argument as a DerefMut so that Lazy can be passed and be evaluated *only if an actual push is needed* + fn push_mergeset( &self, - heap: &mut BoundedSizeBlockHeap, + heap: &mut impl DerefMut, sample_rate: u64, ghostdag_data: &GhostdagData, selected_parent_blue_work: BlueWorkType, - ) -> bool { - // If the window is full and the selected parent is less than the minimum then we break - // because this means that there cannot be any more blocks in the past with higher blue work - if !heap.can_push(ghostdag_data.selected_parent, selected_parent_blue_work) { - return true; - } - - for block in self.sampled_mergeset_iterator(sample_rate, ghostdag_data, selected_parent_blue_work) { - match block { - SampledBlock::Sampled(block) => { + mergeset_non_daa_inserter: Option, + ) { + if let Some(mut mergeset_non_daa_inserter) = mergeset_non_daa_inserter { + // If we have a non-daa inserter, we most iterate over the whole mergeset and op the sampled and non-daa blocks. + for block in self.sampled_mergeset_iterator(sample_rate, ghostdag_data, selected_parent_blue_work) { + match block { + SampledBlock::Sampled(block) => { + heap.try_push(block.hash, block.blue_work); + } + SampledBlock::NonDaa(hash) => mergeset_non_daa_inserter(hash), + }; + } + } else { + // If we don't have a non-daa inserter, we can iterate over the sampled mergeset and return early if we can't push anymore. + for block in self.sampled_mergeset_iterator(sample_rate, ghostdag_data, selected_parent_blue_work) { + if let SampledBlock::Sampled(block) = block { if !heap.try_push(block.hash, block.blue_work) { - break; + return; } } - SampledBlock::NonDaa(_) => {} } } - false + } + + fn try_init_from_cache( + &self, + window_size: usize, + sample_rate: u64, + cache: Option<&Arc>, + ghostdag_data: &GhostdagData, + selected_parent_blue_work: BlueWorkType, + mergeset_non_daa_inserter: Option, + ) -> Option> { + cache.and_then(|cache| { + cache.get(&ghostdag_data.selected_parent).map(|selected_parent_window| { + let mut heap = Lazy::new(|| BoundedSizeBlockHeap::from_binary_heap(window_size, (*selected_parent_window).clone())); + // We pass a Lazy heap as an optimization to avoid cloning the selected parent heap in cases where the mergeset contains no samples + self.push_mergeset(&mut heap, sample_rate, ghostdag_data, selected_parent_blue_work, mergeset_non_daa_inserter); + if let Ok(heap) = Lazy::into_value(heap) { + Arc::new(heap.binary_heap) + } else { + selected_parent_window.clone() + } + }) + }) + } + + fn try_merge_with_selected_parent_cache( + &self, + heap: &mut BoundedSizeBlockHeap, + cache: Option<&Arc>, + selected_parent: &Hash, + ) -> bool { + cache + .and_then(|cache| { + cache.get(selected_parent).map(|selected_parent_window| { + heap.merge_ancestor_heap(&mut (*selected_parent_window).clone()); + }) + }) + .is_some() } fn sampled_mergeset_iterator<'a>( @@ -686,4 +726,14 @@ impl BoundedSizeBlockHeap { self.binary_heap.push(r_sortable_block); true } + + // This method is intended to be used to merge the ancestor heap with the current heap. + fn merge_ancestor_heap(&mut self, ancestor_heap: &mut BlockWindowHeap) { + self.binary_heap.blocks.append(&mut ancestor_heap.blocks); + // Below we saturate for cases where ancestor may be close to, the origin, or genesis. + // Note: this is a no-op if overflow_amount is 0, i.e. because of the saturating sub, the sum of the two heaps is less or equal to the size bound. + for _ in 0..self.binary_heap.len().saturating_sub(self.size_bound) { + self.binary_heap.blocks.pop(); + } + } } From 64eeb89a1a43aa61c0e7687ec70f78dbd340e59c Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Sun, 24 Nov 2024 16:23:38 +0200 Subject: [PATCH 27/31] Enable payloads for non coinbase transactions (#591) * Enable payloads for non coinbase transactions * Add payload hash to sighash * test reflects enabling payload * Enhance benchmarking: add payload size variations Refactored `mock_tx` to `mock_tx_with_payload` to support custom payload sizes. Introduced new benchmark function `benchmark_check_scripts_with_payload` to test performance with varying payload sizes. Commented out the old benchmark function to focus on payload-based tests. * Enhance script checking benchmarks Added benchmarks to evaluate script checking performance with varying payload sizes and input counts. This helps in understanding the impact of transaction payload size on validation and the relationship between input count and payload processing overhead. * Add new test case for transaction hashing and refactor code This commit introduces a new test case to verify that transaction IDs and hashes change with payload modifications. Additionally, code readability and consistency are improved by refactoring multi-line expressions into single lines where appropriate. * Add payload activation test for transactions This commit introduces a new integration test to validate the enforcement of payload activation rules at a specified DAA score. The test ensures that transactions with large payloads are rejected before activation and accepted afterward, maintaining consensus integrity. * style: fmt * test: add test that checks that payload change reflects sighash * rename test * Don't ever skip utxo_free_tx_validation * lints --------- Co-authored-by: max143672 --- consensus/benches/check_scripts.rs | 56 +++++-- consensus/core/src/config/params.rs | 13 ++ consensus/core/src/hashing/sighash.rs | 61 ++++++-- consensus/core/src/hashing/tx.rs | 7 + consensus/src/consensus/services.rs | 1 + .../body_validation_in_context.rs | 29 ++-- .../processes/transaction_validator/mod.rs | 4 + .../tx_validation_in_isolation.rs | 11 +- .../tx_validation_not_utxo_related.rs | 12 ++ simpa/Cargo.toml | 5 + simpa/src/main.rs | 4 + simpa/src/simulator/miner.rs | 8 +- simpa/src/simulator/network.rs | 2 + .../src/consensus_integration_tests.rs | 142 +++++++++++++++++- 14 files changed, 297 insertions(+), 58 deletions(-) diff --git a/consensus/benches/check_scripts.rs b/consensus/benches/check_scripts.rs index a451eec650..5d13c43d8e 100644 --- a/consensus/benches/check_scripts.rs +++ b/consensus/benches/check_scripts.rs @@ -13,8 +13,10 @@ use kaspa_utils::iter::parallelism_in_power_steps; use rand::{thread_rng, Rng}; use secp256k1::Keypair; -// You may need to add more detailed mocks depending on your actual code. -fn mock_tx(inputs_count: usize, non_uniq_signatures: usize) -> (Transaction, Vec) { +fn mock_tx_with_payload(inputs_count: usize, non_uniq_signatures: usize, payload_size: usize) -> (Transaction, Vec) { + let mut payload = vec![0u8; payload_size]; + thread_rng().fill(&mut payload[..]); + let reused_values = SigHashReusedValuesUnsync::new(); let dummy_prev_out = TransactionOutpoint::new(kaspa_hashes::Hash::from_u64_word(1), 1); let mut tx = Transaction::new( @@ -24,10 +26,11 @@ fn mock_tx(inputs_count: usize, non_uniq_signatures: usize) -> (Transaction, Vec 0, SubnetworkId::from_bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), 0, - vec![], + payload, ); let mut utxos = vec![]; let mut kps = vec![]; + for _ in 0..inputs_count - non_uniq_signatures { let kp = Keypair::new(secp256k1::SECP256K1, &mut thread_rng()); tx.inputs.push(TransactionInput { previous_outpoint: dummy_prev_out, signature_script: vec![], sequence: 0, sig_op_count: 1 }); @@ -40,6 +43,7 @@ fn mock_tx(inputs_count: usize, non_uniq_signatures: usize) -> (Transaction, Vec }); kps.push(kp); } + for _ in 0..non_uniq_signatures { let kp = kps.last().unwrap(); tx.inputs.push(TransactionInput { previous_outpoint: dummy_prev_out, signature_script: vec![], sequence: 0, sig_op_count: 1 }); @@ -51,14 +55,15 @@ fn mock_tx(inputs_count: usize, non_uniq_signatures: usize) -> (Transaction, Vec is_coinbase: false, }); } + for (i, kp) in kps.iter().enumerate().take(inputs_count - non_uniq_signatures) { let mut_tx = MutableTransaction::with_entries(&tx, utxos.clone()); let sig_hash = calc_schnorr_signature_hash(&mut_tx.as_verifiable(), i, SIG_HASH_ALL, &reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let sig: [u8; 64] = *kp.sign_schnorr(msg).as_ref(); - // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) tx.inputs[i].signature_script = std::iter::once(65u8).chain(sig).chain([SIG_HASH_ALL.to_u8()]).collect(); } + let length = tx.inputs.len(); for i in (inputs_count - non_uniq_signatures)..length { let kp = kps.last().unwrap(); @@ -66,16 +71,16 @@ fn mock_tx(inputs_count: usize, non_uniq_signatures: usize) -> (Transaction, Vec let sig_hash = calc_schnorr_signature_hash(&mut_tx.as_verifiable(), i, SIG_HASH_ALL, &reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let sig: [u8; 64] = *kp.sign_schnorr(msg).as_ref(); - // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) tx.inputs[i].signature_script = std::iter::once(65u8).chain(sig).chain([SIG_HASH_ALL.to_u8()]).collect(); } + (tx, utxos) } fn benchmark_check_scripts(c: &mut Criterion) { for inputs_count in [100, 50, 25, 10, 5, 2] { for non_uniq_signatures in [0, inputs_count / 2] { - let (tx, utxos) = mock_tx(inputs_count, non_uniq_signatures); + let (tx, utxos) = mock_tx_with_payload(inputs_count, non_uniq_signatures, 0); let mut group = c.benchmark_group(format!("inputs: {inputs_count}, non uniq: {non_uniq_signatures}")); group.sampling_mode(SamplingMode::Flat); @@ -97,12 +102,10 @@ fn benchmark_check_scripts(c: &mut Criterion) { }) }); - // Iterate powers of two up to available parallelism for i in parallelism_in_power_steps() { if inputs_count >= i { group.bench_function(format!("rayon, custom thread pool, thread count {i}"), |b| { let tx = MutableTransaction::with_entries(tx.clone(), utxos.clone()); - // Create a custom thread pool with the specified number of threads let pool = rayon::ThreadPoolBuilder::new().num_threads(i).build().unwrap(); let cache = Cache::new(inputs_count as u64); b.iter(|| { @@ -117,11 +120,44 @@ fn benchmark_check_scripts(c: &mut Criterion) { } } +/// Benchmarks script checking performance with different payload sizes and input counts. +/// +/// This benchmark evaluates the performance impact of transaction payload size +/// on script validation, testing multiple scenarios: +/// +/// * Payload sizes: 0KB, 16KB, 32KB, 64KB, 128KB +/// * Input counts: 1, 2, 10, 50 transactions +/// +/// The benchmark helps understand: +/// 1. How payload size affects validation performance +/// 2. The relationship between input count and payload processing overhead +fn benchmark_check_scripts_with_payload(c: &mut Criterion) { + let payload_sizes = [0, 16_384, 32_768, 65_536, 131_072]; // 0, 16KB, 32KB, 64KB, 128KB + let input_counts = [1, 2, 10, 50]; + let non_uniq_signatures = 0; + + for inputs_count in input_counts { + for &payload_size in &payload_sizes { + let (tx, utxos) = mock_tx_with_payload(inputs_count, non_uniq_signatures, payload_size); + let mut group = c.benchmark_group(format!("script_check/inputs_{}/payload_{}_kb", inputs_count, payload_size / 1024)); + group.sampling_mode(SamplingMode::Flat); + + group.bench_function("parallel_validation", |b| { + let tx = MutableTransaction::with_entries(tx.clone(), utxos.clone()); + let cache = Cache::new(inputs_count as u64); + b.iter(|| { + cache.clear(); + check_scripts_par_iter(black_box(&cache), black_box(&tx.as_verifiable()), false).unwrap(); + }) + }); + } + } +} + criterion_group! { name = benches; - // This can be any expression that returns a `Criterion` object. config = Criterion::default().with_output_color(true).measurement_time(std::time::Duration::new(20, 0)); - targets = benchmark_check_scripts + targets = benchmark_check_scripts, benchmark_check_scripts_with_payload } criterion_main!(benches); diff --git a/consensus/core/src/config/params.rs b/consensus/core/src/config/params.rs index 8cab11c92d..e5da18c256 100644 --- a/consensus/core/src/config/params.rs +++ b/consensus/core/src/config/params.rs @@ -130,6 +130,9 @@ pub struct Params { pub skip_proof_of_work: bool, pub max_block_level: BlockLevel, pub pruning_proof_m: u64, + + /// Activation rules for when to enable using the payload field in transactions + pub payload_activation: ForkActivation, } fn unix_now() -> u64 { @@ -406,6 +409,8 @@ pub const MAINNET_PARAMS: Params = Params { skip_proof_of_work: false, max_block_level: 225, pruning_proof_m: 1000, + + payload_activation: ForkActivation::never(), }; pub const TESTNET_PARAMS: Params = Params { @@ -469,6 +474,8 @@ pub const TESTNET_PARAMS: Params = Params { skip_proof_of_work: false, max_block_level: 250, pruning_proof_m: 1000, + + payload_activation: ForkActivation::never(), }; pub const TESTNET11_PARAMS: Params = Params { @@ -530,6 +537,8 @@ pub const TESTNET11_PARAMS: Params = Params { skip_proof_of_work: false, max_block_level: 250, + + payload_activation: ForkActivation::never(), }; pub const SIMNET_PARAMS: Params = Params { @@ -584,6 +593,8 @@ pub const SIMNET_PARAMS: Params = Params { skip_proof_of_work: true, // For simnet only, PoW can be simulated by default max_block_level: 250, + + payload_activation: ForkActivation::never(), }; pub const DEVNET_PARAMS: Params = Params { @@ -641,4 +652,6 @@ pub const DEVNET_PARAMS: Params = Params { skip_proof_of_work: false, max_block_level: 250, pruning_proof_m: 1000, + + payload_activation: ForkActivation::never(), }; diff --git a/consensus/core/src/hashing/sighash.rs b/consensus/core/src/hashing/sighash.rs index e6c7ad4dd0..05645356dd 100644 --- a/consensus/core/src/hashing/sighash.rs +++ b/consensus/core/src/hashing/sighash.rs @@ -3,10 +3,7 @@ use kaspa_hashes::{Hash, Hasher, HasherBase, TransactionSigningHash, Transaction use std::cell::Cell; use std::sync::Arc; -use crate::{ - subnets::SUBNETWORK_ID_NATIVE, - tx::{ScriptPublicKey, Transaction, TransactionOutpoint, TransactionOutput, VerifiableTransaction}, -}; +use crate::tx::{ScriptPublicKey, Transaction, TransactionOutpoint, TransactionOutput, VerifiableTransaction}; use super::{sighash_type::SigHashType, HasherExtensions}; @@ -19,6 +16,7 @@ pub struct SigHashReusedValuesUnsync { sequences_hash: Cell>, sig_op_counts_hash: Cell>, outputs_hash: Cell>, + payload_hash: Cell>, } impl SigHashReusedValuesUnsync { @@ -33,6 +31,7 @@ pub struct SigHashReusedValuesSync { sequences_hash: ArcSwapOption, sig_op_counts_hash: ArcSwapOption, outputs_hash: ArcSwapOption, + payload_hash: ArcSwapOption, } impl SigHashReusedValuesSync { @@ -46,6 +45,7 @@ pub trait SigHashReusedValues { fn sequences_hash(&self, set: impl Fn() -> Hash) -> Hash; fn sig_op_counts_hash(&self, set: impl Fn() -> Hash) -> Hash; fn outputs_hash(&self, set: impl Fn() -> Hash) -> Hash; + fn payload_hash(&self, set: impl Fn() -> Hash) -> Hash; } impl SigHashReusedValues for Arc { @@ -64,6 +64,10 @@ impl SigHashReusedValues for Arc { fn outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { self.as_ref().outputs_hash(set) } + + fn payload_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.as_ref().outputs_hash(set) + } } impl SigHashReusedValues for SigHashReusedValuesUnsync { @@ -98,6 +102,14 @@ impl SigHashReusedValues for SigHashReusedValuesUnsync { hash }) } + + fn payload_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.payload_hash.get().unwrap_or_else(|| { + let hash = set(); + self.payload_hash.set(Some(hash)); + hash + }) + } } impl SigHashReusedValues for SigHashReusedValuesSync { @@ -136,6 +148,15 @@ impl SigHashReusedValues for SigHashReusedValuesSync { self.outputs_hash.rcu(|_| Arc::new(hash)); hash } + + fn payload_hash(&self, set: impl Fn() -> Hash) -> Hash { + if let Some(value) = self.payload_hash.load().as_ref() { + return **value; + } + let hash = set(); + self.payload_hash.rcu(|_| Arc::new(hash)); + hash + } } pub fn previous_outputs_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &impl SigHashReusedValues) -> Hash { @@ -182,17 +203,17 @@ pub fn sig_op_counts_hash(tx: &Transaction, hash_type: SigHashType, reused_value reused_values.sig_op_counts_hash(hash) } -pub fn payload_hash(tx: &Transaction) -> Hash { - if tx.subnetwork_id == SUBNETWORK_ID_NATIVE { - return ZERO_HASH; - } +pub fn payload_hash(tx: &Transaction, reused_values: &impl SigHashReusedValues) -> Hash { + let hash = || { + if tx.subnetwork_id.is_native() && tx.payload.is_empty() { + return ZERO_HASH; + } - // TODO: Right now this branch will never be executed, since payload is disabled - // for all non coinbase transactions. Once payload is enabled, the payload hash - // should be cached to make it cost O(1) instead of O(tx.inputs.len()). - let mut hasher = TransactionSigningHash::new(); - hasher.write_var_bytes(&tx.payload); - hasher.finalize() + let mut hasher = TransactionSigningHash::new(); + hasher.write_var_bytes(&tx.payload); + hasher.finalize() + }; + reused_values.payload_hash(hash) } pub fn outputs_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &impl SigHashReusedValues, input_index: usize) -> Hash { @@ -260,7 +281,7 @@ pub fn calc_schnorr_signature_hash( .write_u64(tx.lock_time) .update(&tx.subnetwork_id) .write_u64(tx.gas) - .update(payload_hash(tx)) + .update(payload_hash(tx, reused_values)) .write_u8(hash_type.to_u8()); hasher.finalize() } @@ -285,7 +306,7 @@ mod tests { use crate::{ hashing::sighash_type::{SIG_HASH_ALL, SIG_HASH_ANY_ONE_CAN_PAY, SIG_HASH_NONE, SIG_HASH_SINGLE}, - subnets::SubnetworkId, + subnets::{SubnetworkId, SUBNETWORK_ID_NATIVE}, tx::{PopulatedTransaction, Transaction, TransactionId, TransactionInput, UtxoEntry}, }; @@ -608,6 +629,14 @@ mod tests { action: ModifyAction::NoAction, expected_hash: "846689131fb08b77f83af1d3901076732ef09d3f8fdff945be89aa4300562e5f", // should change the hash }, + TestVector { + name: "native-all-0-modify-payload", + populated_tx: &native_populated_tx, + hash_type: SIG_HASH_ALL, + input_index: 0, + action: ModifyAction::Payload, + expected_hash: "72ea6c2871e0f44499f1c2b556f265d9424bfea67cca9cb343b4b040ead65525", // should change the hash + }, // subnetwork transaction TestVector { name: "subnetwork-all-0", diff --git a/consensus/core/src/hashing/tx.rs b/consensus/core/src/hashing/tx.rs index 019f2a8f5b..9216a1c16e 100644 --- a/consensus/core/src/hashing/tx.rs +++ b/consensus/core/src/hashing/tx.rs @@ -157,6 +157,13 @@ mod tests { expected_hash: "31da267d5c34f0740c77b8c9ebde0845a01179ec68074578227b804bac306361", }); + // Test #8, same as 7 but with a non-zero payload. The test checks id and hash are affected by payload change + tests.push(Test { + tx: Transaction::new(2, inputs.clone(), outputs.clone(), 54, subnets::SUBNETWORK_ID_REGISTRY, 3, vec![1, 2, 3]), + expected_id: "1f18b18ab004ff1b44dd915554b486d64d7ebc02c054e867cc44e3d746e80b3b", + expected_hash: "a2029ebd66d29d41aa7b0c40230c1bfa7fe8e026fb44b7815dda4e991b9a5fad", + }); + for (i, test) in tests.iter().enumerate() { assert_eq!(test.tx.id(), Hash::from_str(test.expected_id).unwrap(), "transaction id failed for test {}", i + 1); assert_eq!( diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index 16247db18b..06abb4e0bb 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -147,6 +147,7 @@ impl ConsensusServices { mass_calculator.clone(), params.storage_mass_activation, params.kip10_activation, + params.payload_activation, ); let pruning_point_manager = PruningPointManager::new( diff --git a/consensus/src/pipeline/body_processor/body_validation_in_context.rs b/consensus/src/pipeline/body_processor/body_validation_in_context.rs index ec42f0f447..2b1bd99487 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_context.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_context.rs @@ -8,7 +8,6 @@ use kaspa_consensus_core::block::Block; use kaspa_database::prelude::StoreResultExtensions; use kaspa_hashes::Hash; use kaspa_utils::option::OptionExtensions; -use once_cell::unsync::Lazy; use std::sync::Arc; impl BlockBodyProcessor { @@ -21,27 +20,17 @@ impl BlockBodyProcessor { fn check_block_transactions_in_context(self: &Arc, block: &Block) -> BlockProcessResult<()> { // Note: This is somewhat expensive during ibd, as it incurs cache misses. - // Use lazy evaluation to avoid unnecessary work, as most of the time we expect the txs not to have lock time. - let lazy_pmt_res = - Lazy::new(|| match self.window_manager.calc_past_median_time(&self.ghostdag_store.get_data(block.hash()).unwrap()) { - Ok((pmt, pmt_window)) => { - if !self.block_window_cache_for_past_median_time.contains_key(&block.hash()) { - self.block_window_cache_for_past_median_time.insert(block.hash(), pmt_window); - }; - Ok(pmt) - } - Err(e) => Err(e), - }); + let pmt = { + let (pmt, pmt_window) = self.window_manager.calc_past_median_time(&self.ghostdag_store.get_data(block.hash()).unwrap())?; + if !self.block_window_cache_for_past_median_time.contains_key(&block.hash()) { + self.block_window_cache_for_past_median_time.insert(block.hash(), pmt_window); + }; + pmt + }; for tx in block.transactions.iter() { - // Quick check to avoid the expensive Lazy eval during ibd (in most cases). - // TODO: refactor this and avoid classifying the tx lock outside of the transaction validator. - if tx.lock_time != 0 { - if let Err(e) = - self.transaction_validator.utxo_free_tx_validation(tx, block.header.daa_score, (*lazy_pmt_res).clone()?) - { - return Err(RuleError::TxInContextFailed(tx.id(), e)); - }; + if let Err(e) = self.transaction_validator.utxo_free_tx_validation(tx, block.header.daa_score, pmt) { + return Err(RuleError::TxInContextFailed(tx.id(), e)); }; } Ok(()) diff --git a/consensus/src/processes/transaction_validator/mod.rs b/consensus/src/processes/transaction_validator/mod.rs index 7d007a3350..3f091dfd76 100644 --- a/consensus/src/processes/transaction_validator/mod.rs +++ b/consensus/src/processes/transaction_validator/mod.rs @@ -30,6 +30,7 @@ pub struct TransactionValidator { storage_mass_activation: ForkActivation, /// KIP-10 hardfork DAA score kip10_activation: ForkActivation, + payload_activation: ForkActivation, } impl TransactionValidator { @@ -46,6 +47,7 @@ impl TransactionValidator { mass_calculator: MassCalculator, storage_mass_activation: ForkActivation, kip10_activation: ForkActivation, + payload_activation: ForkActivation, ) -> Self { Self { max_tx_inputs, @@ -59,6 +61,7 @@ impl TransactionValidator { mass_calculator, storage_mass_activation, kip10_activation, + payload_activation, } } @@ -84,6 +87,7 @@ impl TransactionValidator { mass_calculator: MassCalculator::new(0, 0, 0, 0), storage_mass_activation: ForkActivation::never(), kip10_activation: ForkActivation::never(), + payload_activation: ForkActivation::never(), } } } diff --git a/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs b/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs index 914624f940..a08b83d94e 100644 --- a/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs +++ b/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs @@ -16,7 +16,6 @@ impl TransactionValidator { check_transaction_output_value_ranges(tx)?; check_duplicate_transaction_inputs(tx)?; check_gas(tx)?; - check_transaction_payload(tx)?; check_transaction_subnetwork(tx)?; check_transaction_version(tx) } @@ -107,14 +106,6 @@ fn check_gas(tx: &Transaction) -> TxResult<()> { Ok(()) } -fn check_transaction_payload(tx: &Transaction) -> TxResult<()> { - // This should be revised if subnetworks are activated (along with other validations that weren't copied from kaspad) - if !tx.is_coinbase() && !tx.payload.is_empty() { - return Err(TxRuleError::NonCoinbaseTxHasPayload); - } - Ok(()) -} - fn check_transaction_version(tx: &Transaction) -> TxResult<()> { if tx.version != TX_VERSION { return Err(TxRuleError::UnknownTxVersion(tx.version)); @@ -304,7 +295,7 @@ mod tests { let mut tx = valid_tx.clone(); tx.payload = vec![0]; - assert_match!(tv.validate_tx_in_isolation(&tx), Err(TxRuleError::NonCoinbaseTxHasPayload)); + assert_match!(tv.validate_tx_in_isolation(&tx), Ok(())); let mut tx = valid_tx; tx.version = TX_VERSION + 1; diff --git a/consensus/src/processes/transaction_validator/tx_validation_not_utxo_related.rs b/consensus/src/processes/transaction_validator/tx_validation_not_utxo_related.rs index 4cfa72b464..3a854948ac 100644 --- a/consensus/src/processes/transaction_validator/tx_validation_not_utxo_related.rs +++ b/consensus/src/processes/transaction_validator/tx_validation_not_utxo_related.rs @@ -9,6 +9,7 @@ use super::{ impl TransactionValidator { pub fn utxo_free_tx_validation(&self, tx: &Transaction, ctx_daa_score: u64, ctx_block_time: u64) -> TxResult<()> { + self.check_transaction_payload(tx, ctx_daa_score)?; self.check_tx_is_finalized(tx, ctx_daa_score, ctx_block_time) } @@ -38,4 +39,15 @@ impl TransactionValidator { Ok(()) } + + fn check_transaction_payload(&self, tx: &Transaction, ctx_daa_score: u64) -> TxResult<()> { + if self.payload_activation.is_active(ctx_daa_score) { + Ok(()) + } else { + if !tx.is_coinbase() && !tx.payload.is_empty() { + return Err(TxRuleError::NonCoinbaseTxHasPayload); + } + Ok(()) + } + } } diff --git a/simpa/Cargo.toml b/simpa/Cargo.toml index 815edf6a64..bea3110e1e 100644 --- a/simpa/Cargo.toml +++ b/simpa/Cargo.toml @@ -40,3 +40,8 @@ tokio = { workspace = true, features = ["rt", "macros", "rt-multi-thread"] } [features] heap = ["dhat", "kaspa-alloc/heap"] semaphore-trace = ["kaspa-utils/semaphore-trace"] + +[profile.heap] +inherits = "release" +debug = true +strip = false diff --git a/simpa/src/main.rs b/simpa/src/main.rs index a2365e1c9f..c35c0c640e 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -122,6 +122,8 @@ struct Args { rocksdb_files_limit: Option, #[arg(long)] rocksdb_mem_budget: Option, + #[arg(long, default_value_t = false)] + long_payload: bool, } #[cfg(feature = "heap")] @@ -191,6 +193,7 @@ fn main_impl(mut args: Args) { let mut params = if args.testnet11 { TESTNET11_PARAMS } else { DEVNET_PARAMS }; params.storage_mass_activation = ForkActivation::new(400); params.storage_mass_parameter = 10_000; + params.payload_activation = ForkActivation::always(); let mut builder = ConfigBuilder::new(params) .apply_args(|config| apply_args_to_consensus_params(&args, &mut config.params)) .apply_args(|config| apply_args_to_perf_params(&args, &mut config.perf)) @@ -245,6 +248,7 @@ fn main_impl(mut args: Args) { args.rocksdb_stats_period_sec, args.rocksdb_files_limit, args.rocksdb_mem_budget, + args.long_payload, ) .run(until); consensus.shutdown(handles); diff --git a/simpa/src/simulator/miner.rs b/simpa/src/simulator/miner.rs index a9a4a3423d..9cd985937f 100644 --- a/simpa/src/simulator/miner.rs +++ b/simpa/src/simulator/miner.rs @@ -74,6 +74,7 @@ pub struct Miner { target_txs_per_block: u64, target_blocks: Option, max_cached_outpoints: usize, + long_payload: bool, // Mass calculator mass_calculator: MassCalculator, @@ -90,6 +91,7 @@ impl Miner { params: &Params, target_txs_per_block: u64, target_blocks: Option, + long_payload: bool, ) -> Self { let (schnorr_public_key, _) = pk.x_only_public_key(); let script_pub_key_script = once(0x20).chain(schnorr_public_key.serialize()).chain(once(0xac)).collect_vec(); // TODO: Use script builder when available to create p2pk properly @@ -114,6 +116,7 @@ impl Miner { params.mass_per_sig_op, params.storage_mass_parameter, ), + long_payload, } } @@ -143,7 +146,10 @@ impl Miner { .iter() .filter_map(|&outpoint| { let entry = self.get_spendable_entry(virtual_utxo_view, outpoint, virtual_state.daa_score)?; - let unsigned_tx = self.create_unsigned_tx(outpoint, entry.amount, multiple_outputs); + let mut unsigned_tx = self.create_unsigned_tx(outpoint, entry.amount, multiple_outputs); + if self.long_payload { + unsigned_tx.payload = vec![0; 90_000]; + } Some(MutableTransaction::with_entries(unsigned_tx, vec![entry])) }) .take(self.target_txs_per_block as usize) diff --git a/simpa/src/simulator/network.rs b/simpa/src/simulator/network.rs index 63e5a3b6cc..79ac6fad75 100644 --- a/simpa/src/simulator/network.rs +++ b/simpa/src/simulator/network.rs @@ -50,6 +50,7 @@ impl KaspaNetworkSimulator { rocksdb_stats_period_sec: Option, rocksdb_files_limit: Option, rocksdb_mem_budget: Option, + long_payload: bool, ) -> &mut Self { let secp = secp256k1::Secp256k1::new(); let mut rng = rand::thread_rng(); @@ -98,6 +99,7 @@ impl KaspaNetworkSimulator { &self.config, target_txs_per_block, self.target_blocks, + long_payload, )); self.simulation.register(i, miner_process); self.consensuses.push((consensus, handles, lifetime)); diff --git a/testing/integration/src/consensus_integration_tests.rs b/testing/integration/src/consensus_integration_tests.rs index 3db614dc41..58a6e2bb33 100644 --- a/testing/integration/src/consensus_integration_tests.rs +++ b/testing/integration/src/consensus_integration_tests.rs @@ -27,6 +27,7 @@ use kaspa_consensus_core::api::{BlockValidationFutures, ConsensusApi}; use kaspa_consensus_core::block::Block; use kaspa_consensus_core::blockhash::new_unique; use kaspa_consensus_core::blockstatus::BlockStatus; +use kaspa_consensus_core::coinbase::MinerData; use kaspa_consensus_core::constants::{BLOCK_VERSION, SOMPI_PER_KASPA, STORAGE_MASS_PARAMETER}; use kaspa_consensus_core::errors::block::{BlockProcessResult, RuleError}; use kaspa_consensus_core::header::Header; @@ -47,7 +48,7 @@ use crate::common; use flate2::read::GzDecoder; use futures_util::future::try_join_all; use itertools::Itertools; -use kaspa_consensus_core::coinbase::MinerData; +use kaspa_consensus_core::errors::tx::TxRuleError; use kaspa_consensus_core::merkle::calc_hash_merkle_root; use kaspa_consensus_core::muhash::MuHashExtensions; use kaspa_core::core::Core; @@ -61,6 +62,7 @@ use kaspa_math::Uint256; use kaspa_muhash::MuHash; use kaspa_notify::subscription::context::SubscriptionContext; use kaspa_txscript::caches::TxScriptCacheCounters; +use kaspa_txscript::opcodes::codes::OpTrue; use kaspa_utxoindex::api::{UtxoIndexApi, UtxoIndexProxy}; use kaspa_utxoindex::UtxoIndex; use serde::{Deserialize, Serialize}; @@ -842,6 +844,7 @@ impl KaspadGoParams { skip_proof_of_work: self.SkipProofOfWork, max_block_level: self.MaxBlockLevel, pruning_proof_m: self.PruningProofM, + payload_activation: ForkActivation::never(), } } } @@ -1865,3 +1868,140 @@ async fn run_kip10_activation_test() { assert!(matches!(status, Ok(BlockStatus::StatusUTXOValid))); assert!(consensus.lkg_virtual_state.load().accepted_tx_ids.contains(&tx_id)); } + +#[tokio::test] +async fn payload_test() { + let config = ConfigBuilder::new(DEVNET_PARAMS) + .skip_proof_of_work() + .edit_consensus_params(|p| { + p.coinbase_maturity = 0; + p.payload_activation = ForkActivation::always() + }) + .build(); + let consensus = TestConsensus::new(&config); + let wait_handles = consensus.init(); + + let miner_data = MinerData::new(ScriptPublicKey::from_vec(0, vec![OpTrue]), vec![]); + let b = consensus.build_utxo_valid_block_with_parents(1.into(), vec![config.genesis.hash], miner_data.clone(), vec![]); + consensus.validate_and_insert_block(b.to_immutable()).virtual_state_task.await.unwrap(); + let funding_block = consensus.build_utxo_valid_block_with_parents(2.into(), vec![1.into()], miner_data, vec![]); + let cb_id = { + let mut cb = funding_block.transactions[0].clone(); + cb.finalize(); + cb.id() + }; + consensus.validate_and_insert_block(funding_block.to_immutable()).virtual_state_task.await.unwrap(); + let tx = Transaction::new( + 0, + vec![TransactionInput::new(TransactionOutpoint { transaction_id: cb_id, index: 0 }, vec![], 0, 0)], + vec![TransactionOutput::new(1, ScriptPublicKey::default())], + 0, + SubnetworkId::default(), + 0, + vec![0; (config.params.max_block_mass / 2) as usize], + ); + consensus.add_utxo_valid_block_with_parents(3.into(), vec![2.into()], vec![tx]).await.unwrap(); + + consensus.shutdown(wait_handles); +} + +#[tokio::test] +async fn payload_activation_test() { + use kaspa_consensus_core::subnets::SUBNETWORK_ID_NATIVE; + + // Set payload activation at DAA score 3 for this test + const PAYLOAD_ACTIVATION_DAA_SCORE: u64 = 3; + + init_allocator_with_default_settings(); + + // Create initial UTXO to fund our test transactions + let initial_utxo_collection = [( + TransactionOutpoint::new(1.into(), 0), + UtxoEntry { + amount: SOMPI_PER_KASPA, + script_public_key: ScriptPublicKey::from_vec(0, vec![OpTrue]), + block_daa_score: 0, + is_coinbase: false, + }, + )]; + + // Initialize consensus with payload activation point + let config = ConfigBuilder::new(DEVNET_PARAMS) + .skip_proof_of_work() + .apply_args(|cfg| { + let mut genesis_multiset = MuHash::new(); + initial_utxo_collection.iter().for_each(|(outpoint, utxo)| { + genesis_multiset.add_utxo(outpoint, utxo); + }); + cfg.params.genesis.utxo_commitment = genesis_multiset.finalize(); + let genesis_header: Header = (&cfg.params.genesis).into(); + cfg.params.genesis.hash = genesis_header.hash; + }) + .edit_consensus_params(|p| { + p.payload_activation = ForkActivation::new(PAYLOAD_ACTIVATION_DAA_SCORE); + }) + .build(); + + let consensus = TestConsensus::new(&config); + let mut genesis_multiset = MuHash::new(); + consensus.append_imported_pruning_point_utxos(&initial_utxo_collection, &mut genesis_multiset); + consensus.import_pruning_point_utxo_set(config.genesis.hash, genesis_multiset).unwrap(); + consensus.init(); + + // Build blockchain up to one block before activation + let mut index = 0; + for _ in 0..PAYLOAD_ACTIVATION_DAA_SCORE - 1 { + let parent = if index == 0 { config.genesis.hash } else { index.into() }; + consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![parent], vec![]).await.unwrap(); + index += 1; + } + assert_eq!(consensus.get_virtual_daa_score(), index); + + // Create transaction with large payload + let large_payload = vec![0u8; (config.params.max_block_mass / 2) as usize]; + let mut tx_with_payload = Transaction::new( + 0, + vec![TransactionInput::new( + initial_utxo_collection[0].0, + vec![], // Empty signature script since we're using OpTrue + 0, + 0, + )], + vec![TransactionOutput::new(initial_utxo_collection[0].1.amount - 5000, ScriptPublicKey::from_vec(0, vec![OpTrue]))], + 0, + SUBNETWORK_ID_NATIVE, + 0, + large_payload, + ); + tx_with_payload.finalize(); + let tx_id = tx_with_payload.id(); + + // Test 1: Build empty block, then manually insert invalid tx and verify consensus rejects it + { + let miner_data = MinerData::new(ScriptPublicKey::from_vec(0, vec![]), vec![]); + + // First build block without transactions + let mut block = + consensus.build_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], miner_data.clone(), vec![]); + + // Insert our test transaction and recalculate block hashes + block.transactions.push(tx_with_payload.clone()); + + block.header.hash_merkle_root = calc_hash_merkle_root(block.transactions.iter(), false); + let block_status = consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await; + assert!(matches!(block_status, Err(RuleError::TxInContextFailed(tx, TxRuleError::NonCoinbaseTxHasPayload)) if tx == tx_id)); + assert_eq!(consensus.lkg_virtual_state.load().daa_score, PAYLOAD_ACTIVATION_DAA_SCORE - 1); + index += 1; + } + + // Add one more block to reach activation score + consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![(index - 1).into()], vec![]).await.unwrap(); + index += 1; + + // Test 2: Verify the same transaction is accepted after activation + let status = + consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], vec![tx_with_payload.clone()]).await; + + assert!(matches!(status, Ok(BlockStatus::StatusUTXOValid))); + assert!(consensus.lkg_virtual_state.load().accepted_tx_ids.contains(&tx_id)); +} From ea6b83e7b78d303a7103d9eefa0adec82f16b8b5 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Thu, 28 Nov 2024 13:11:53 +0200 Subject: [PATCH 28/31] Small fixes related to enabling payload (#605) --- consensus/core/src/hashing/sighash.rs | 30 ++++----------------------- simpa/Cargo.toml | 5 ----- 2 files changed, 4 insertions(+), 31 deletions(-) diff --git a/consensus/core/src/hashing/sighash.rs b/consensus/core/src/hashing/sighash.rs index 05645356dd..2c8006f75d 100644 --- a/consensus/core/src/hashing/sighash.rs +++ b/consensus/core/src/hashing/sighash.rs @@ -48,28 +48,6 @@ pub trait SigHashReusedValues { fn payload_hash(&self, set: impl Fn() -> Hash) -> Hash; } -impl SigHashReusedValues for Arc { - fn previous_outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { - self.as_ref().previous_outputs_hash(set) - } - - fn sequences_hash(&self, set: impl Fn() -> Hash) -> Hash { - self.as_ref().sequences_hash(set) - } - - fn sig_op_counts_hash(&self, set: impl Fn() -> Hash) -> Hash { - self.as_ref().sig_op_counts_hash(set) - } - - fn outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { - self.as_ref().outputs_hash(set) - } - - fn payload_hash(&self, set: impl Fn() -> Hash) -> Hash { - self.as_ref().outputs_hash(set) - } -} - impl SigHashReusedValues for SigHashReusedValuesUnsync { fn previous_outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { self.previous_outputs_hash.get().unwrap_or_else(|| { @@ -204,11 +182,11 @@ pub fn sig_op_counts_hash(tx: &Transaction, hash_type: SigHashType, reused_value } pub fn payload_hash(tx: &Transaction, reused_values: &impl SigHashReusedValues) -> Hash { - let hash = || { - if tx.subnetwork_id.is_native() && tx.payload.is_empty() { - return ZERO_HASH; - } + if tx.subnetwork_id.is_native() && tx.payload.is_empty() { + return ZERO_HASH; + } + let hash = || { let mut hasher = TransactionSigningHash::new(); hasher.write_var_bytes(&tx.payload); hasher.finalize() diff --git a/simpa/Cargo.toml b/simpa/Cargo.toml index bea3110e1e..815edf6a64 100644 --- a/simpa/Cargo.toml +++ b/simpa/Cargo.toml @@ -40,8 +40,3 @@ tokio = { workspace = true, features = ["rt", "macros", "rt-multi-thread"] } [features] heap = ["dhat", "kaspa-alloc/heap"] semaphore-trace = ["kaspa-utils/semaphore-trace"] - -[profile.heap] -inherits = "release" -debug = true -strip = false From 73159f78767669548ee4470ab2fbed172c855f64 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Thu, 28 Nov 2024 20:48:22 +0200 Subject: [PATCH 29/31] Fix new lints required by Rust 1.83 (#606) * cargo fix all new 1.83 lints except for static_mut_refs * use LazyLock for CONTEXT * allow static_mut_refs for wallet storage paths * bump MSRV to 1.82 and use the recent is_none_or --- Cargo.toml | 2 +- cli/src/modules/history.rs | 10 +------- consensus/client/src/header.rs | 2 +- consensus/client/src/input.rs | 2 +- consensus/client/src/output.rs | 2 +- consensus/client/src/transaction.rs | 2 +- consensus/client/src/utxo.rs | 4 ++-- consensus/core/src/config/constants.rs | 5 ++-- consensus/core/src/network.rs | 4 ++-- consensus/core/src/tx.rs | 6 ++--- consensus/core/src/tx/script_public_key.rs | 4 ++-- consensus/src/model/services/reachability.rs | 1 - consensus/src/model/stores/ghostdag.rs | 8 +++---- consensus/src/model/stores/relations.rs | 2 +- consensus/src/model/stores/utxo_diffs.rs | 1 - .../body_validation_in_context.rs | 3 +-- consensus/src/processes/coinbase.rs | 11 ++++----- consensus/src/processes/pruning.rs | 3 +-- consensus/src/processes/sync/mod.rs | 3 +-- consensus/src/test_helpers.rs | 7 ++---- core/src/task/runtime.rs | 5 ++-- crypto/addresses/src/lib.rs | 2 +- crypto/hashes/src/lib.rs | 2 +- crypto/muhash/src/lib.rs | 2 +- crypto/txscript/src/lib.rs | 2 +- database/src/access.rs | 6 +++-- .../src/mempool/model/frontier/search_tree.rs | 2 +- mining/src/model/topological_sort.rs | 4 ++-- protocol/flows/src/flowcontext/orphans.rs | 8 +++---- .../flows/src/flowcontext/transactions.rs | 3 +-- rothschild/src/main.rs | 2 +- rpc/core/src/model/header.rs | 1 - rpc/wrpc/wasm/src/resolver.rs | 2 +- utils/src/as_slice.rs | 6 ++--- utils/src/iter.rs | 4 ++-- utils/src/lib.rs | 1 - utils/src/option.rs | 13 ----------- utils/src/serde_bytes/de.rs | 2 +- wallet/bip32/src/mnemonic/bits.rs | 2 +- wallet/bip32/src/xpublic_key.rs | 2 +- wallet/core/src/storage/local/mod.rs | 15 +++++++++--- wallet/core/src/tx/generator/generator.rs | 1 - wallet/core/src/tx/payment.rs | 4 ++-- wallet/core/src/wallet/mod.rs | 4 ++-- wallet/core/src/wasm/cryptobox.rs | 4 ++-- wallet/core/src/wasm/utxo/context.rs | 2 +- wallet/core/src/wasm/utxo/processor.rs | 2 +- wallet/keys/src/derivation_path.rs | 2 +- wallet/keys/src/keypair.rs | 2 +- wallet/keys/src/privatekey.rs | 2 +- wallet/keys/src/publickey.rs | 2 +- wallet/keys/src/xprv.rs | 2 +- wallet/keys/src/xpub.rs | 2 +- wallet/pskt/src/bundle.rs | 23 ++++++++----------- wallet/pskt/src/wasm/pskt.rs | 2 +- 55 files changed, 94 insertions(+), 128 deletions(-) delete mode 100644 utils/src/option.rs diff --git a/Cargo.toml b/Cargo.toml index 7141101f9a..aa304d37fb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,7 +62,7 @@ members = [ ] [workspace.package] -rust-version = "1.81.0" +rust-version = "1.82.0" version = "0.15.3" authors = ["Kaspa developers"] license = "ISC" diff --git a/cli/src/modules/history.rs b/cli/src/modules/history.rs index 8fdf31f4db..f46527f1d6 100644 --- a/cli/src/modules/history.rs +++ b/cli/src/modules/history.rs @@ -86,15 +86,7 @@ impl History { } }; let length = ids.size_hint().0; - let skip = if let Some(last) = last { - if last > length { - 0 - } else { - length - last - } - } else { - 0 - }; + let skip = if let Some(last) = last { length.saturating_sub(last) } else { 0 }; let mut index = 0; let page = 25; diff --git a/consensus/client/src/header.rs b/consensus/client/src/header.rs index 6f04a73c43..7d2e25b393 100644 --- a/consensus/client/src/header.rs +++ b/consensus/client/src/header.rs @@ -266,7 +266,7 @@ impl Header { impl TryCastFromJs for Header { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/consensus/client/src/input.rs b/consensus/client/src/input.rs index a5018199d5..0c5e052f2a 100644 --- a/consensus/client/src/input.rs +++ b/consensus/client/src/input.rs @@ -200,7 +200,7 @@ impl AsRef for TransactionInput { impl TryCastFromJs for TransactionInput { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> std::result::Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> std::result::Result, Self::Error> where R: AsRef + 'a, { diff --git a/consensus/client/src/output.rs b/consensus/client/src/output.rs index 17b4a58c80..01772dde32 100644 --- a/consensus/client/src/output.rs +++ b/consensus/client/src/output.rs @@ -139,7 +139,7 @@ impl From<&TransactionOutput> for cctx::TransactionOutput { impl TryCastFromJs for TransactionOutput { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> std::result::Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> std::result::Result, Self::Error> where R: AsRef + 'a, { diff --git a/consensus/client/src/transaction.rs b/consensus/client/src/transaction.rs index 17cc381265..4026ac1ebc 100644 --- a/consensus/client/src/transaction.rs +++ b/consensus/client/src/transaction.rs @@ -280,7 +280,7 @@ impl Transaction { impl TryCastFromJs for Transaction { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> std::result::Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> std::result::Result, Self::Error> where R: AsRef + 'a, { diff --git a/consensus/client/src/utxo.rs b/consensus/client/src/utxo.rs index bbfc1199d1..99a663fd05 100644 --- a/consensus/client/src/utxo.rs +++ b/consensus/client/src/utxo.rs @@ -282,7 +282,7 @@ impl TryIntoUtxoEntryReferences for JsValue { impl TryCastFromJs for UtxoEntry { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { @@ -405,7 +405,7 @@ impl TryFrom for UtxoEntries { impl TryCastFromJs for UtxoEntryReference { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/consensus/core/src/config/constants.rs b/consensus/core/src/config/constants.rs index 146e30a17c..899773bbf0 100644 --- a/consensus/core/src/config/constants.rs +++ b/consensus/core/src/config/constants.rs @@ -36,7 +36,7 @@ pub mod consensus { /// Size of the **sampled** median time window (independent of BPS) pub const MEDIAN_TIME_SAMPLED_WINDOW_SIZE: u64 = - ((2 * NEW_TIMESTAMP_DEVIATION_TOLERANCE - 1) + PAST_MEDIAN_TIME_SAMPLE_INTERVAL - 1) / PAST_MEDIAN_TIME_SAMPLE_INTERVAL; + (2 * NEW_TIMESTAMP_DEVIATION_TOLERANCE - 1).div_ceil(PAST_MEDIAN_TIME_SAMPLE_INTERVAL); // // ~~~~~~~~~~~~~~~~~~~~~~~~~ Max difficulty target ~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -71,8 +71,7 @@ pub mod consensus { pub const DIFFICULTY_WINDOW_SAMPLE_INTERVAL: u64 = 4; /// Size of the **sampled** difficulty window (independent of BPS) - pub const DIFFICULTY_SAMPLED_WINDOW_SIZE: u64 = - (NEW_DIFFICULTY_WINDOW_DURATION + DIFFICULTY_WINDOW_SAMPLE_INTERVAL - 1) / DIFFICULTY_WINDOW_SAMPLE_INTERVAL; + pub const DIFFICULTY_SAMPLED_WINDOW_SIZE: u64 = NEW_DIFFICULTY_WINDOW_DURATION.div_ceil(DIFFICULTY_WINDOW_SAMPLE_INTERVAL); // // ~~~~~~~~~~~~~~~~~~~ Finality & Pruning ~~~~~~~~~~~~~~~~~~~ diff --git a/consensus/core/src/network.rs b/consensus/core/src/network.rs index 18e52eacbf..2f81444b3c 100644 --- a/consensus/core/src/network.rs +++ b/consensus/core/src/network.rs @@ -344,7 +344,7 @@ impl Serialize for NetworkId { struct NetworkIdVisitor; -impl<'de> de::Visitor<'de> for NetworkIdVisitor { +impl de::Visitor<'_> for NetworkIdVisitor { type Value = NetworkId; fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { @@ -413,7 +413,7 @@ impl TryFrom for NetworkId { impl TryCastFromJs for NetworkId { type Error = NetworkIdError; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/consensus/core/src/tx.rs b/consensus/core/src/tx.rs index 9f02ade4b6..769d29452c 100644 --- a/consensus/core/src/tx.rs +++ b/consensus/core/src/tx.rs @@ -321,7 +321,7 @@ impl<'a, T: VerifiableTransaction> Iterator for PopulatedInputIterator<'a, T> { } } -impl<'a, T: VerifiableTransaction> ExactSizeIterator for PopulatedInputIterator<'a, T> {} +impl ExactSizeIterator for PopulatedInputIterator<'_, T> {} /// Represents a read-only referenced transaction along with fully populated UTXO entry data pub struct PopulatedTransaction<'a> { @@ -336,7 +336,7 @@ impl<'a> PopulatedTransaction<'a> { } } -impl<'a> VerifiableTransaction for PopulatedTransaction<'a> { +impl VerifiableTransaction for PopulatedTransaction<'_> { fn tx(&self) -> &Transaction { self.tx } @@ -368,7 +368,7 @@ impl<'a> ValidatedTransaction<'a> { } } -impl<'a> VerifiableTransaction for ValidatedTransaction<'a> { +impl VerifiableTransaction for ValidatedTransaction<'_> { fn tx(&self) -> &Transaction { self.tx } diff --git a/consensus/core/src/tx/script_public_key.rs b/consensus/core/src/tx/script_public_key.rs index dfed2ab5ce..b0a4756066 100644 --- a/consensus/core/src/tx/script_public_key.rs +++ b/consensus/core/src/tx/script_public_key.rs @@ -94,7 +94,7 @@ impl Serialize for ScriptPublicKey { } } -impl<'de: 'a, 'a> Deserialize<'de> for ScriptPublicKey { +impl<'de> Deserialize<'de> for ScriptPublicKey { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, @@ -374,7 +374,7 @@ impl BorshDeserialize for ScriptPublicKey { type CastError = workflow_wasm::error::Error; impl TryCastFromJs for ScriptPublicKey { type Error = workflow_wasm::error::Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/consensus/src/model/services/reachability.rs b/consensus/src/model/services/reachability.rs index 39f5ceba2d..1c6282f8e5 100644 --- a/consensus/src/model/services/reachability.rs +++ b/consensus/src/model/services/reachability.rs @@ -154,7 +154,6 @@ impl MTReachabilityService { /// a compromise where the lock is released every constant number of items. /// /// TODO: decide if these alternatives require overall system benchmarking - struct BackwardChainIterator { store: Arc>, current: Option, diff --git a/consensus/src/model/stores/ghostdag.rs b/consensus/src/model/stores/ghostdag.rs index 4ed02e4cec..f74fa125bd 100644 --- a/consensus/src/model/stores/ghostdag.rs +++ b/consensus/src/model/stores/ghostdag.rs @@ -116,7 +116,7 @@ impl GhostdagData { pub fn ascending_mergeset_without_selected_parent<'a>( &'a self, store: &'a (impl GhostdagStoreReader + ?Sized), - ) -> impl Iterator + '_ { + ) -> impl Iterator + 'a { self.mergeset_blues .iter() .skip(1) // Skip the selected parent @@ -139,7 +139,7 @@ impl GhostdagData { pub fn descending_mergeset_without_selected_parent<'a>( &'a self, store: &'a (impl GhostdagStoreReader + ?Sized), - ) -> impl Iterator + '_ { + ) -> impl Iterator + 'a { self.mergeset_blues .iter() .skip(1) // Skip the selected parent @@ -175,7 +175,7 @@ impl GhostdagData { pub fn consensus_ordered_mergeset<'a>( &'a self, store: &'a (impl GhostdagStoreReader + ?Sized), - ) -> impl Iterator + '_ { + ) -> impl Iterator + 'a { once(self.selected_parent).chain(self.ascending_mergeset_without_selected_parent(store).map(|s| s.hash)) } @@ -183,7 +183,7 @@ impl GhostdagData { pub fn consensus_ordered_mergeset_without_selected_parent<'a>( &'a self, store: &'a (impl GhostdagStoreReader + ?Sized), - ) -> impl Iterator + '_ { + ) -> impl Iterator + 'a { self.ascending_mergeset_without_selected_parent(store).map(|s| s.hash) } diff --git a/consensus/src/model/stores/relations.rs b/consensus/src/model/stores/relations.rs index 4734f099a3..2971a3a87d 100644 --- a/consensus/src/model/stores/relations.rs +++ b/consensus/src/model/stores/relations.rs @@ -145,7 +145,7 @@ pub struct StagingRelationsStore<'a> { children_deletions: BlockHashMap, } -impl<'a> ChildrenStore for StagingRelationsStore<'a> { +impl ChildrenStore for StagingRelationsStore<'_> { fn insert_child(&mut self, _writer: impl DbWriter, parent: Hash, child: Hash) -> Result<(), StoreError> { self.check_not_in_entry_deletions(parent)?; self.check_not_in_children_deletions(parent, child)?; // We expect deletion to be permanent diff --git a/consensus/src/model/stores/utxo_diffs.rs b/consensus/src/model/stores/utxo_diffs.rs index 079f08ecbc..20ddd9b107 100644 --- a/consensus/src/model/stores/utxo_diffs.rs +++ b/consensus/src/model/stores/utxo_diffs.rs @@ -14,7 +14,6 @@ use rocksdb::WriteBatch; /// blocks. However, once the diff is computed, it is permanent. This store has a relation to /// block status, such that if a block has status `StatusUTXOValid` then it is expected to have /// utxo diff data as well as utxo multiset data and acceptance data. - pub trait UtxoDiffsStoreReader { fn get(&self, hash: Hash) -> Result, StoreError>; } diff --git a/consensus/src/pipeline/body_processor/body_validation_in_context.rs b/consensus/src/pipeline/body_processor/body_validation_in_context.rs index 2b1bd99487..0eca78651c 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_context.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_context.rs @@ -7,7 +7,6 @@ use crate::{ use kaspa_consensus_core::block::Block; use kaspa_database::prelude::StoreResultExtensions; use kaspa_hashes::Hash; -use kaspa_utils::option::OptionExtensions; use std::sync::Arc; impl BlockBodyProcessor { @@ -45,7 +44,7 @@ impl BlockBodyProcessor { .copied() .filter(|parent| { let status_option = statuses_read_guard.get(*parent).unwrap_option(); - status_option.is_none_or_ex(|s| !s.has_block_body()) + status_option.is_none_or(|s| !s.has_block_body()) }) .collect(); if !missing.is_empty() { diff --git a/consensus/src/processes/coinbase.rs b/consensus/src/processes/coinbase.rs index f79bbed751..d67f922c81 100644 --- a/consensus/src/processes/coinbase.rs +++ b/consensus/src/processes/coinbase.rs @@ -72,7 +72,7 @@ impl CoinbaseManager { // Precomputed subsidy by month table for the actual block per second rate // Here values are rounded up so that we keep the same number of rewarding months as in the original 1 BPS table. // In a 10 BPS network, the induced increase in total rewards is 51 KAS (see tests::calc_high_bps_total_rewards_delta()) - let subsidy_by_month_table: SubsidyByMonthTable = core::array::from_fn(|i| (SUBSIDY_BY_MONTH_TABLE[i] + bps - 1) / bps); + let subsidy_by_month_table: SubsidyByMonthTable = core::array::from_fn(|i| SUBSIDY_BY_MONTH_TABLE[i].div_ceil(bps)); Self { coinbase_payload_script_public_key_max_len, max_coinbase_payload_len, @@ -288,10 +288,7 @@ mod tests { let total_rewards: u64 = pre_deflationary_rewards + SUBSIDY_BY_MONTH_TABLE.iter().map(|x| x * SECONDS_PER_MONTH).sum::(); let testnet_11_bps = TESTNET11_PARAMS.bps(); let total_high_bps_rewards_rounded_up: u64 = pre_deflationary_rewards - + SUBSIDY_BY_MONTH_TABLE - .iter() - .map(|x| ((x + testnet_11_bps - 1) / testnet_11_bps * testnet_11_bps) * SECONDS_PER_MONTH) - .sum::(); + + SUBSIDY_BY_MONTH_TABLE.iter().map(|x| (x.div_ceil(testnet_11_bps) * testnet_11_bps) * SECONDS_PER_MONTH).sum::(); let cbm = create_manager(&TESTNET11_PARAMS); let total_high_bps_rewards: u64 = @@ -316,7 +313,7 @@ mod tests { let cbm = create_manager(&network_id.into()); cbm.subsidy_by_month_table.iter().enumerate().for_each(|(i, x)| { assert_eq!( - (SUBSIDY_BY_MONTH_TABLE[i] + cbm.bps() - 1) / cbm.bps(), + SUBSIDY_BY_MONTH_TABLE[i].div_ceil(cbm.bps()), *x, "{}: locally computed and precomputed values must match", network_id @@ -376,7 +373,7 @@ mod tests { Test { name: "after 32 halvings", daa_score: params.deflationary_phase_daa_score + 32 * blocks_per_halving, - expected: ((DEFLATIONARY_PHASE_INITIAL_SUBSIDY / 2_u64.pow(32)) + cbm.bps() - 1) / cbm.bps(), + expected: (DEFLATIONARY_PHASE_INITIAL_SUBSIDY / 2_u64.pow(32)).div_ceil(cbm.bps()), }, Test { name: "just before subsidy depleted", diff --git a/consensus/src/processes/pruning.rs b/consensus/src/processes/pruning.rs index 7c534af8ed..5916df74d7 100644 --- a/consensus/src/processes/pruning.rs +++ b/consensus/src/processes/pruning.rs @@ -13,7 +13,6 @@ use crate::model::{ }, }; use kaspa_hashes::Hash; -use kaspa_utils::option::OptionExtensions; use parking_lot::RwLock; #[derive(Clone)] @@ -213,7 +212,7 @@ impl< let mut expected_pps_queue = VecDeque::new(); for current in self.reachability_service.backward_chain_iterator(hst, pruning_info.pruning_point, false) { let current_header = self.headers_store.get_header(current).unwrap(); - if expected_pps_queue.back().is_none_or_ex(|&&h| h != current_header.pruning_point) { + if expected_pps_queue.back().is_none_or(|&h| h != current_header.pruning_point) { expected_pps_queue.push_back(current_header.pruning_point); } } diff --git a/consensus/src/processes/sync/mod.rs b/consensus/src/processes/sync/mod.rs index 3978913bae..839e48a9ef 100644 --- a/consensus/src/processes/sync/mod.rs +++ b/consensus/src/processes/sync/mod.rs @@ -5,7 +5,6 @@ use kaspa_consensus_core::errors::sync::{SyncManagerError, SyncManagerResult}; use kaspa_database::prelude::StoreResultExtensions; use kaspa_hashes::Hash; use kaspa_math::uint::malachite_base::num::arithmetic::traits::CeilingLogBase2; -use kaspa_utils::option::OptionExtensions; use parking_lot::RwLock; use crate::model::{ @@ -191,7 +190,7 @@ impl< } } - if highest_with_body.is_none_or_ex(|&h| h == high) { + if highest_with_body.is_none_or(|h| h == high) { return Ok(vec![]); }; diff --git a/consensus/src/test_helpers.rs b/consensus/src/test_helpers.rs index c119c6d6d2..b3867f145e 100644 --- a/consensus/src/test_helpers.rs +++ b/consensus/src/test_helpers.rs @@ -19,7 +19,7 @@ pub fn block_from_precomputed_hash(hash: Hash, parents: Vec) -> Block { pub fn generate_random_utxos_from_script_public_key_pool( rng: &mut SmallRng, amount: usize, - script_public_key_pool: &Vec, + script_public_key_pool: &[ScriptPublicKey], ) -> UtxoCollection { let mut i = 0; let mut collection = UtxoCollection::with_capacity(amount); @@ -40,10 +40,7 @@ pub fn generate_random_outpoint(rng: &mut SmallRng) -> TransactionOutpoint { TransactionOutpoint::new(generate_random_hash(rng), rng.gen::()) } -pub fn generate_random_utxo_from_script_public_key_pool( - rng: &mut SmallRng, - script_public_key_pool: &Vec, -) -> UtxoEntry { +pub fn generate_random_utxo_from_script_public_key_pool(rng: &mut SmallRng, script_public_key_pool: &[ScriptPublicKey]) -> UtxoEntry { UtxoEntry::new( rng.gen_range(1..100_000), //we choose small amounts as to not overflow with large utxosets. script_public_key_pool.choose(rng).expect("expected_script_public key").clone(), diff --git a/core/src/task/runtime.rs b/core/src/task/runtime.rs index 13deaae6b8..1bc3e6952e 100644 --- a/core/src/task/runtime.rs +++ b/core/src/task/runtime.rs @@ -50,14 +50,13 @@ impl AsyncRuntime { } /// Launch a tokio Runtime and run the top-level async objects - pub fn worker(self: &Arc, core: Arc) { - return tokio::runtime::Builder::new_multi_thread() + tokio::runtime::Builder::new_multi_thread() .worker_threads(self.threads) .enable_all() .build() .expect("Failed building the Runtime") - .block_on(async { self.worker_impl(core).await }); + .block_on(async { self.worker_impl(core).await }) } pub async fn worker_impl(self: &Arc, core: Arc) { diff --git a/crypto/addresses/src/lib.rs b/crypto/addresses/src/lib.rs index 8e3ea385a8..48c0baf198 100644 --- a/crypto/addresses/src/lib.rs +++ b/crypto/addresses/src/lib.rs @@ -506,7 +506,7 @@ impl<'de> Deserialize<'de> for Address { impl TryCastFromJs for Address { type Error = AddressError; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/crypto/hashes/src/lib.rs b/crypto/hashes/src/lib.rs index d9ff47997c..da9019af29 100644 --- a/crypto/hashes/src/lib.rs +++ b/crypto/hashes/src/lib.rs @@ -187,7 +187,7 @@ impl Hash { type TryFromError = workflow_wasm::error::Error; impl TryCastFromJs for Hash { type Error = TryFromError; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/crypto/muhash/src/lib.rs b/crypto/muhash/src/lib.rs index 3fa7fc6e69..2ad0594663 100644 --- a/crypto/muhash/src/lib.rs +++ b/crypto/muhash/src/lib.rs @@ -146,7 +146,7 @@ pub struct MuHashElementBuilder<'a> { element_hasher: MuHashElementHash, } -impl<'a> HasherBase for MuHashElementBuilder<'a> { +impl HasherBase for MuHashElementBuilder<'_> { fn update>(&mut self, data: A) -> &mut Self { self.element_hasher.write(data); self diff --git a/crypto/txscript/src/lib.rs b/crypto/txscript/src/lib.rs index a82be592f6..637a10aff2 100644 --- a/crypto/txscript/src/lib.rs +++ b/crypto/txscript/src/lib.rs @@ -230,7 +230,7 @@ impl<'a, T: VerifiableTransaction, Reused: SigHashReusedValues> TxScriptEngine<' #[inline] pub fn is_executing(&self) -> bool { - return self.cond_stack.is_empty() || *self.cond_stack.last().expect("Checked not empty") == OpCond::True; + self.cond_stack.is_empty() || *self.cond_stack.last().expect("Checked not empty") == OpCond::True } fn execute_opcode(&mut self, opcode: DynOpcodeImplementation) -> Result<(), TxScriptError> { diff --git a/database/src/access.rs b/database/src/access.rs index ad82197dbb..fad8ee1300 100644 --- a/database/src/access.rs +++ b/database/src/access.rs @@ -22,6 +22,8 @@ where prefix: Vec, } +pub type KeyDataResult = Result<(Box<[u8]>, TData), Box>; + impl CachedDbAccess where TKey: Clone + std::hash::Hash + Eq + Send + Sync, @@ -65,7 +67,7 @@ where } } - pub fn iterator(&self) -> impl Iterator, TData), Box>> + '_ + pub fn iterator(&self) -> impl Iterator> + '_ where TKey: Clone + AsRef<[u8]>, TData: DeserializeOwned, // We need `DeserializeOwned` since the slice coming from `db.get_pinned` has short lifetime @@ -173,7 +175,7 @@ where seek_from: Option, // iter whole range if None limit: usize, // amount to take. skip_first: bool, // skips the first value, (useful in conjunction with the seek-key, as to not re-retrieve). - ) -> impl Iterator, TData), Box>> + '_ + ) -> impl Iterator> + '_ where TKey: Clone + AsRef<[u8]>, TData: DeserializeOwned, diff --git a/mining/src/mempool/model/frontier/search_tree.rs b/mining/src/mempool/model/frontier/search_tree.rs index edf34c2710..136269a794 100644 --- a/mining/src/mempool/model/frontier/search_tree.rs +++ b/mining/src/mempool/model/frontier/search_tree.rs @@ -111,7 +111,7 @@ impl<'a> PrefixWeightVisitor<'a> { } } -impl<'a> DescendVisit for PrefixWeightVisitor<'a> { +impl DescendVisit for PrefixWeightVisitor<'_> { type Result = f64; fn visit_inner(&mut self, keys: &[FeerateKey], arguments: &[FeerateWeight]) -> DescendVisitResult { diff --git a/mining/src/model/topological_sort.rs b/mining/src/model/topological_sort.rs index aa88cce023..fb276ac004 100644 --- a/mining/src/model/topological_sort.rs +++ b/mining/src/model/topological_sort.rs @@ -166,8 +166,8 @@ impl<'a, T: AsRef> Iterator for TopologicalIter<'a, T> { } } -impl<'a, T: AsRef> FusedIterator for TopologicalIter<'a, T> {} -impl<'a, T: AsRef> ExactSizeIterator for TopologicalIter<'a, T> { +impl> FusedIterator for TopologicalIter<'_, T> {} +impl> ExactSizeIterator for TopologicalIter<'_, T> { fn len(&self) -> usize { self.transactions.len() } diff --git a/protocol/flows/src/flowcontext/orphans.rs b/protocol/flows/src/flowcontext/orphans.rs index f18649e558..41bf940a17 100644 --- a/protocol/flows/src/flowcontext/orphans.rs +++ b/protocol/flows/src/flowcontext/orphans.rs @@ -6,7 +6,6 @@ use kaspa_consensus_core::{ use kaspa_consensusmanager::{BlockProcessingBatch, ConsensusProxy}; use kaspa_core::debug; use kaspa_hashes::Hash; -use kaspa_utils::option::OptionExtensions; use rand::Rng; use std::{ collections::{HashMap, HashSet, VecDeque}, @@ -166,7 +165,7 @@ impl OrphanBlocksPool { } } else { let status = consensus.async_get_block_status(current).await; - if status.is_none_or_ex(|s| s.is_header_only()) { + if status.is_none_or(|s| s.is_header_only()) { // Block is not in the orphan pool nor does its body exist consensus-wise, so it is a root roots.push(current); } @@ -193,8 +192,7 @@ impl OrphanBlocksPool { if let Occupied(entry) = self.orphans.entry(orphan_hash) { let mut processable = true; for p in entry.get().block.header.direct_parents().iter().copied() { - if !processing.contains_key(&p) && consensus.async_get_block_status(p).await.is_none_or_ex(|s| s.is_header_only()) - { + if !processing.contains_key(&p) && consensus.async_get_block_status(p).await.is_none_or(|s| s.is_header_only()) { processable = false; break; } @@ -250,7 +248,7 @@ impl OrphanBlocksPool { let mut processable = true; for parent in block.block.header.direct_parents().iter().copied() { if self.orphans.contains_key(&parent) - || consensus.async_get_block_status(parent).await.is_none_or_ex(|status| status.is_header_only()) + || consensus.async_get_block_status(parent).await.is_none_or(|status| status.is_header_only()) { processable = false; break; diff --git a/protocol/flows/src/flowcontext/transactions.rs b/protocol/flows/src/flowcontext/transactions.rs index 110b378b70..5fe1bb5939 100644 --- a/protocol/flows/src/flowcontext/transactions.rs +++ b/protocol/flows/src/flowcontext/transactions.rs @@ -47,8 +47,7 @@ impl TransactionsSpread { // Keep the launching times aligned to exact intervals. Note that `delta=10.1` seconds will result in // adding 10 seconds to last scan time, while `delta=11` will result in adding 20 (assuming scanning // interval is 10 seconds). - self.last_scanning_time += - Duration::from_secs(((delta.as_secs() + SCANNING_TASK_INTERVAL - 1) / SCANNING_TASK_INTERVAL) * SCANNING_TASK_INTERVAL); + self.last_scanning_time += Duration::from_secs(delta.as_secs().div_ceil(SCANNING_TASK_INTERVAL) * SCANNING_TASK_INTERVAL); self.scanning_job_count += 1; self.scanning_task_running = true; diff --git a/rothschild/src/main.rs b/rothschild/src/main.rs index 35d08493bb..9baeaa04e7 100644 --- a/rothschild/src/main.rs +++ b/rothschild/src/main.rs @@ -254,7 +254,7 @@ async fn main() { (stats.utxos_amount / stats.num_utxos as u64), stats.num_utxos / stats.num_txs, stats.num_outs / stats.num_txs, - if utxos_len > pending_len { utxos_len - pending_len } else { 0 }, + utxos_len.saturating_sub(pending_len), ); stats.since = now; stats.num_txs = 0; diff --git a/rpc/core/src/model/header.rs b/rpc/core/src/model/header.rs index dddf767b7f..fda6b70e1e 100644 --- a/rpc/core/src/model/header.rs +++ b/rpc/core/src/model/header.rs @@ -8,7 +8,6 @@ use workflow_serializer::prelude::*; /// Used for mining APIs (get_block_template & submit_block) #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] #[serde(rename_all = "camelCase")] - pub struct RpcRawHeader { pub version: u16, pub parents_by_level: Vec>, diff --git a/rpc/wrpc/wasm/src/resolver.rs b/rpc/wrpc/wasm/src/resolver.rs index 7abfdb6884..1753534372 100644 --- a/rpc/wrpc/wasm/src/resolver.rs +++ b/rpc/wrpc/wasm/src/resolver.rs @@ -198,7 +198,7 @@ impl TryFrom for NativeResolver { impl TryCastFromJs for Resolver { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result> + fn try_cast_from<'a, R>(value: &'a R) -> Result> where R: AsRef + 'a, { diff --git a/utils/src/as_slice.rs b/utils/src/as_slice.rs index fd73c39039..7fc0459c6a 100644 --- a/utils/src/as_slice.rs +++ b/utils/src/as_slice.rs @@ -16,7 +16,7 @@ pub trait AsMutSlice: AsSlice { fn as_mut_slice(&mut self) -> &mut [Self::Element]; } -impl<'a, S> AsSlice for &'a S +impl AsSlice for &S where S: ?Sized + AsSlice, { @@ -27,7 +27,7 @@ where } } -impl<'a, S> AsSlice for &'a mut S +impl AsSlice for &mut S where S: ?Sized + AsSlice, { @@ -38,7 +38,7 @@ where } } -impl<'a, S> AsMutSlice for &'a mut S +impl AsMutSlice for &mut S where S: ?Sized + AsMutSlice, { diff --git a/utils/src/iter.rs b/utils/src/iter.rs index 3c4c98c64a..3d38eff12a 100644 --- a/utils/src/iter.rs +++ b/utils/src/iter.rs @@ -25,7 +25,7 @@ impl<'a, I> ReusableIterFormat<'a, I> { } } -impl<'a, I> std::fmt::Display for ReusableIterFormat<'a, I> +impl std::fmt::Display for ReusableIterFormat<'_, I> where I: std::clone::Clone, I: Iterator, @@ -37,7 +37,7 @@ where } } -impl<'a, I> std::fmt::Debug for ReusableIterFormat<'a, I> +impl std::fmt::Debug for ReusableIterFormat<'_, I> where I: std::clone::Clone, I: Iterator, diff --git a/utils/src/lib.rs b/utils/src/lib.rs index 3d1bb54384..c6cc077c4a 100644 --- a/utils/src/lib.rs +++ b/utils/src/lib.rs @@ -14,7 +14,6 @@ pub mod hex; pub mod iter; pub mod mem_size; pub mod networking; -pub mod option; pub mod refs; pub mod as_slice; diff --git a/utils/src/option.rs b/utils/src/option.rs deleted file mode 100644 index 3e619f46fa..0000000000 --- a/utils/src/option.rs +++ /dev/null @@ -1,13 +0,0 @@ -pub trait OptionExtensions { - /// Substitute for unstable [`Option::is_none_or`] - fn is_none_or_ex(&self, f: impl FnOnce(&T) -> bool) -> bool; -} - -impl OptionExtensions for Option { - fn is_none_or_ex(&self, f: impl FnOnce(&T) -> bool) -> bool { - match self { - Some(v) => f(v), - None => true, - } - } -} diff --git a/utils/src/serde_bytes/de.rs b/utils/src/serde_bytes/de.rs index 66064bfe33..6a634db0c7 100644 --- a/utils/src/serde_bytes/de.rs +++ b/utils/src/serde_bytes/de.rs @@ -29,7 +29,7 @@ pub struct FromHexVisitor<'de, T: FromHex> { lifetime: std::marker::PhantomData<&'de ()>, } -impl<'de, T: FromHex> Default for FromHexVisitor<'de, T> { +impl Default for FromHexVisitor<'_, T> { fn default() -> Self { Self { marker: Default::default(), lifetime: Default::default() } } diff --git a/wallet/bip32/src/mnemonic/bits.rs b/wallet/bip32/src/mnemonic/bits.rs index 5ed09af008..08ff65ef6d 100644 --- a/wallet/bip32/src/mnemonic/bits.rs +++ b/wallet/bip32/src/mnemonic/bits.rs @@ -56,7 +56,7 @@ impl Bits for u8 { } } -impl<'a> Bits for &'a u8 { +impl Bits for &'_ u8 { const SIZE: usize = 8; fn bits(self) -> u32 { diff --git a/wallet/bip32/src/xpublic_key.rs b/wallet/bip32/src/xpublic_key.rs index ac4eb720c9..ff9678cac1 100644 --- a/wallet/bip32/src/xpublic_key.rs +++ b/wallet/bip32/src/xpublic_key.rs @@ -10,7 +10,7 @@ use hmac::Mac; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use std::fmt; -/// Extended public secp256k1 ECDSA verification key. +///// Extended public secp256k1 ECDSA verification key. //#[cfg(feature = "secp256k1")] //#[cfg_attr(docsrs, doc(cfg(feature = "secp256k1")))] //pub type XPub = ExtendedPublicKey; diff --git a/wallet/core/src/storage/local/mod.rs b/wallet/core/src/storage/local/mod.rs index 07e612a512..0c220b64ff 100644 --- a/wallet/core/src/storage/local/mod.rs +++ b/wallet/core/src/storage/local/mod.rs @@ -37,21 +37,30 @@ pub fn default_storage_folder() -> &'static str { // SAFETY: This operation is initializing a static mut variable, // however, the actual variable is accessible only through // this function. - unsafe { DEFAULT_STORAGE_FOLDER.get_or_insert("~/.kaspa".to_string()).as_str() } + #[allow(static_mut_refs)] + unsafe { + DEFAULT_STORAGE_FOLDER.get_or_insert("~/.kaspa".to_string()).as_str() + } } pub fn default_wallet_file() -> &'static str { // SAFETY: This operation is initializing a static mut variable, // however, the actual variable is accessible only through // this function. - unsafe { DEFAULT_WALLET_FILE.get_or_insert("kaspa".to_string()).as_str() } + #[allow(static_mut_refs)] + unsafe { + DEFAULT_WALLET_FILE.get_or_insert("kaspa".to_string()).as_str() + } } pub fn default_settings_file() -> &'static str { // SAFETY: This operation is initializing a static mut variable, // however, the actual variable is accessible only through // this function. - unsafe { DEFAULT_SETTINGS_FILE.get_or_insert("kaspa".to_string()).as_str() } + #[allow(static_mut_refs)] + unsafe { + DEFAULT_SETTINGS_FILE.get_or_insert("kaspa".to_string()).as_str() + } } /// Set a custom storage folder for the wallet SDK diff --git a/wallet/core/src/tx/generator/generator.rs b/wallet/core/src/tx/generator/generator.rs index 398ba1b4dc..d736bc73e1 100644 --- a/wallet/core/src/tx/generator/generator.rs +++ b/wallet/core/src/tx/generator/generator.rs @@ -592,7 +592,6 @@ impl Generator { } */ - fn generate_transaction_data(&self, context: &mut Context, stage: &mut Stage) -> Result<(DataKind, Data)> { let calc = &self.inner.mass_calculator; let mut data = Data::new(calc); diff --git a/wallet/core/src/tx/payment.rs b/wallet/core/src/tx/payment.rs index c164e0d789..350b8c98f4 100644 --- a/wallet/core/src/tx/payment.rs +++ b/wallet/core/src/tx/payment.rs @@ -72,7 +72,7 @@ pub struct PaymentOutput { impl TryCastFromJs for PaymentOutput { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { @@ -158,7 +158,7 @@ impl PaymentOutputs { impl TryCastFromJs for PaymentOutputs { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/wallet/core/src/wallet/mod.rs b/wallet/core/src/wallet/mod.rs index d7c9b6c76e..e9316a13b1 100644 --- a/wallet/core/src/wallet/mod.rs +++ b/wallet/core/src/wallet/mod.rs @@ -57,7 +57,7 @@ pub struct SingleWalletFileV1<'a, T: AsRef<[u8]>> { pub ecdsa: bool, } -impl<'a, T: AsRef<[u8]>> SingleWalletFileV1<'a, T> { +impl> SingleWalletFileV1<'_, T> { const NUM_THREADS: u32 = 8; } @@ -80,7 +80,7 @@ pub struct MultisigWalletFileV1<'a, T: AsRef<[u8]>> { pub ecdsa: bool, } -impl<'a, T: AsRef<[u8]>> MultisigWalletFileV1<'a, T> { +impl> MultisigWalletFileV1<'_, T> { const NUM_THREADS: u32 = 8; } diff --git a/wallet/core/src/wasm/cryptobox.rs b/wallet/core/src/wasm/cryptobox.rs index 957d4fc35a..76e9fdc3c0 100644 --- a/wallet/core/src/wasm/cryptobox.rs +++ b/wallet/core/src/wasm/cryptobox.rs @@ -35,7 +35,7 @@ impl CryptoBoxPrivateKey { impl TryCastFromJs for CryptoBoxPrivateKey { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result> + fn try_cast_from<'a, R>(value: &'a R) -> Result> where R: AsRef + 'a, { @@ -66,7 +66,7 @@ pub struct CryptoBoxPublicKey { impl TryCastFromJs for CryptoBoxPublicKey { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result> + fn try_cast_from<'a, R>(value: &'a R) -> Result> where R: AsRef + 'a, { diff --git a/wallet/core/src/wasm/utxo/context.rs b/wallet/core/src/wasm/utxo/context.rs index 3298a4829e..9c78fe6892 100644 --- a/wallet/core/src/wasm/utxo/context.rs +++ b/wallet/core/src/wasm/utxo/context.rs @@ -252,7 +252,7 @@ impl From for native::UtxoContext { impl TryCastFromJs for UtxoContext { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/wallet/core/src/wasm/utxo/processor.rs b/wallet/core/src/wasm/utxo/processor.rs index d68f10f763..ac089d4852 100644 --- a/wallet/core/src/wasm/utxo/processor.rs +++ b/wallet/core/src/wasm/utxo/processor.rs @@ -197,7 +197,7 @@ impl UtxoProcessor { impl TryCastFromJs for UtxoProcessor { type Error = workflow_wasm::error::Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/wallet/keys/src/derivation_path.rs b/wallet/keys/src/derivation_path.rs index a5389ca37e..7867f886f5 100644 --- a/wallet/keys/src/derivation_path.rs +++ b/wallet/keys/src/derivation_path.rs @@ -57,7 +57,7 @@ impl DerivationPath { impl TryCastFromJs for DerivationPath { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/wallet/keys/src/keypair.rs b/wallet/keys/src/keypair.rs index 2cc3d57607..7c58d18bd3 100644 --- a/wallet/keys/src/keypair.rs +++ b/wallet/keys/src/keypair.rs @@ -104,7 +104,7 @@ impl Keypair { impl TryCastFromJs for Keypair { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/wallet/keys/src/privatekey.rs b/wallet/keys/src/privatekey.rs index 554bdf36e3..75911a3ff3 100644 --- a/wallet/keys/src/privatekey.rs +++ b/wallet/keys/src/privatekey.rs @@ -95,7 +95,7 @@ impl PrivateKey { impl TryCastFromJs for PrivateKey { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/wallet/keys/src/publickey.rs b/wallet/keys/src/publickey.rs index 235eb80804..f3c951ae20 100644 --- a/wallet/keys/src/publickey.rs +++ b/wallet/keys/src/publickey.rs @@ -155,7 +155,7 @@ extern "C" { impl TryCastFromJs for PublicKey { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/wallet/keys/src/xprv.rs b/wallet/keys/src/xprv.rs index c19e0b9cc8..a0ea428a06 100644 --- a/wallet/keys/src/xprv.rs +++ b/wallet/keys/src/xprv.rs @@ -146,7 +146,7 @@ extern "C" { impl TryCastFromJs for XPrv { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/wallet/keys/src/xpub.rs b/wallet/keys/src/xpub.rs index 8706f3fc91..64fc5f78ee 100644 --- a/wallet/keys/src/xpub.rs +++ b/wallet/keys/src/xpub.rs @@ -116,7 +116,7 @@ extern "C" { impl TryCastFromJs for XPub { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/wallet/pskt/src/bundle.rs b/wallet/pskt/src/bundle.rs index 6c926c6665..e08474d7a4 100644 --- a/wallet/pskt/src/bundle.rs +++ b/wallet/pskt/src/bundle.rs @@ -247,23 +247,18 @@ mod tests { use secp256k1::Secp256k1; use secp256k1::{rand::thread_rng, Keypair}; use std::str::FromStr; - use std::sync::Once; + use std::sync::LazyLock; - static INIT: Once = Once::new(); - static mut CONTEXT: Option)>> = None; + static CONTEXT: LazyLock)>> = LazyLock::new(|| { + let kps = [Keypair::new(&Secp256k1::new(), &mut thread_rng()), Keypair::new(&Secp256k1::new(), &mut thread_rng())]; + let redeem_script: Vec = + multisig_redeem_script(kps.iter().map(|pk| pk.x_only_public_key().0.serialize()), 2).expect("Test multisig redeem script"); - fn mock_context() -> &'static ([Keypair; 2], Vec) { - unsafe { - INIT.call_once(|| { - let kps = [Keypair::new(&Secp256k1::new(), &mut thread_rng()), Keypair::new(&Secp256k1::new(), &mut thread_rng())]; - let redeem_script: Vec = multisig_redeem_script(kps.iter().map(|pk| pk.x_only_public_key().0.serialize()), 2) - .expect("Test multisig redeem script"); - - CONTEXT = Some(Box::new((kps, redeem_script))); - }); + Box::new((kps, redeem_script)) + }); - CONTEXT.as_ref().unwrap() - } + fn mock_context() -> &'static ([Keypair; 2], Vec) { + CONTEXT.as_ref() } // Mock multisig PSKT from example diff --git a/wallet/pskt/src/wasm/pskt.rs b/wallet/pskt/src/wasm/pskt.rs index 8ee370a4b9..53c8a1cc3c 100644 --- a/wallet/pskt/src/wasm/pskt.rs +++ b/wallet/pskt/src/wasm/pskt.rs @@ -90,7 +90,7 @@ pub struct PSKT { impl TryCastFromJs for PSKT { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> std::result::Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> std::result::Result, Self::Error> where R: AsRef + 'a, { From c63dfc003a3c7d651b9ca022d786a9104c3a6edc Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Thu, 28 Nov 2024 22:58:10 +0200 Subject: [PATCH 30/31] IBD sync: recover sampled window (#598) * Search for the full consecutive window covering all sampled blocks * unrelated: reachability docs * Avoid searching for the cover if window is not sampled * cleanup WindowType::FullDifficultyWindow * rename * Fix cache origin issue and simplify cache management * prevent access to block window cache get w/o specifying origin * Suggested refactor for determining lock time type prior the call (to avoid leaking logic out of the TransactionValidator) * long due renames * renames and comments * move window "cover" logic into WindowManager * unrelated technical debt: make sure to run par_iter within the context of an existing thread pool (avoid creating a global thread pool if possible) --- consensus/benches/check_scripts.rs | 2 +- consensus/src/consensus/mod.rs | 9 +- consensus/src/model/services/reachability.rs | 21 ++ .../src/model/stores/block_window_cache.rs | 44 ++- .../body_validation_in_context.rs | 30 +- .../src/pipeline/body_processor/processor.rs | 4 - .../pipeline/header_processor/processor.rs | 2 +- .../pipeline/virtual_processor/processor.rs | 25 +- .../virtual_processor/utxo_validation.rs | 2 +- consensus/src/processes/pruning_proof/mod.rs | 10 +- .../src/processes/reachability/inquirer.rs | 12 +- .../processes/transaction_validator/mod.rs | 6 +- .../tx_validation_in_header_context.rs | 102 +++++++ .../tx_validation_in_isolation.rs | 5 + ...ed.rs => tx_validation_in_utxo_context.rs} | 0 .../tx_validation_not_utxo_related.rs | 53 ---- consensus/src/processes/window.rs | 270 +++++++++++++----- .../src/consensus_integration_tests.rs | 2 +- 18 files changed, 419 insertions(+), 180 deletions(-) create mode 100644 consensus/src/processes/transaction_validator/tx_validation_in_header_context.rs rename consensus/src/processes/transaction_validator/{transaction_validator_populated.rs => tx_validation_in_utxo_context.rs} (100%) delete mode 100644 consensus/src/processes/transaction_validator/tx_validation_not_utxo_related.rs diff --git a/consensus/benches/check_scripts.rs b/consensus/benches/check_scripts.rs index 5d13c43d8e..6462e04e49 100644 --- a/consensus/benches/check_scripts.rs +++ b/consensus/benches/check_scripts.rs @@ -1,6 +1,6 @@ use criterion::{black_box, criterion_group, criterion_main, Criterion, SamplingMode}; use kaspa_addresses::{Address, Prefix, Version}; -use kaspa_consensus::processes::transaction_validator::transaction_validator_populated::{ +use kaspa_consensus::processes::transaction_validator::tx_validation_in_utxo_context::{ check_scripts_par_iter, check_scripts_par_iter_pool, check_scripts_sequential, }; use kaspa_consensus_core::hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}; diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index eca78ee2a4..99719d4ac2 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -767,12 +767,13 @@ impl ConsensusApi for Consensus { let mut pruning_utxoset_write = self.pruning_utxoset_stores.write(); pruning_utxoset_write.utxo_set.write_many(utxoset_chunk).unwrap(); - // Parallelize processing - let inner_multiset = + // Parallelize processing using the context of an existing thread pool. + let inner_multiset = self.virtual_processor.install(|| { utxoset_chunk.par_iter().map(|(outpoint, entry)| MuHash::from_utxo(outpoint, entry)).reduce(MuHash::new, |mut a, b| { a.combine(&b); a - }); + }) + }); current_multiset.combine(&inner_multiset); } @@ -979,7 +980,7 @@ impl ConsensusApi for Consensus { Ok(self .services .window_manager - .block_window(&self.ghostdag_store.get_data(hash).unwrap(), WindowType::SampledDifficultyWindow) + .block_window(&self.ghostdag_store.get_data(hash).unwrap(), WindowType::DifficultyWindow) .unwrap() .deref() .iter() diff --git a/consensus/src/model/services/reachability.rs b/consensus/src/model/services/reachability.rs index 1c6282f8e5..a3aa83c7a4 100644 --- a/consensus/src/model/services/reachability.rs +++ b/consensus/src/model/services/reachability.rs @@ -9,14 +9,35 @@ use crate::processes::reachability::{inquirer, Result}; use kaspa_hashes::Hash; pub trait ReachabilityService { + /// Checks if `this` block is a chain ancestor of `queried` block (i.e., `this ∈ chain(queried) ∪ {queried}`). + /// Note that we use the graph theory convention here which defines that a block is also an ancestor of itself. fn is_chain_ancestor_of(&self, this: Hash, queried: Hash) -> bool; + + /// Result version of [`is_dag_ancestor_of`] (avoids unwrapping internally) fn is_dag_ancestor_of_result(&self, this: Hash, queried: Hash) -> Result; + + /// Returns true if `this` is a DAG ancestor of `queried` (i.e., `queried ∈ future(this) ∪ {this}`). + /// Note: this method will return true if `this == queried`. + /// The complexity of this method is `O(log(|future_covering_set(this)|))` fn is_dag_ancestor_of(&self, this: Hash, queried: Hash) -> bool; + + /// Checks if `this` is DAG ancestor of any of the blocks in `queried`. See [`is_dag_ancestor_of`] as well. fn is_dag_ancestor_of_any(&self, this: Hash, queried: &mut impl Iterator) -> bool; + + /// Checks if any of the blocks in `list` is DAG ancestor of `queried`. See [`is_dag_ancestor_of`] as well. fn is_any_dag_ancestor(&self, list: &mut impl Iterator, queried: Hash) -> bool; + + /// Result version of [`is_any_dag_ancestor`] (avoids unwrapping internally) fn is_any_dag_ancestor_result(&self, list: &mut impl Iterator, queried: Hash) -> Result; + + /// Finds the tree child of `ancestor` which is also a chain ancestor of `descendant`. + /// (A "tree child of X" is a block which X is its chain parent) fn get_next_chain_ancestor(&self, descendant: Hash, ancestor: Hash) -> Hash; + + /// Returns the chain parent of `this` fn get_chain_parent(&self, this: Hash) -> Hash; + + /// Checks whether `this` has reachability data fn has_reachability_data(&self, this: Hash) -> bool; } diff --git a/consensus/src/model/stores/block_window_cache.rs b/consensus/src/model/stores/block_window_cache.rs index 5fee0e1f84..2088cd2d18 100644 --- a/consensus/src/model/stores/block_window_cache.rs +++ b/consensus/src/model/stores/block_window_cache.rs @@ -1,6 +1,6 @@ use crate::processes::ghostdag::ordering::SortableBlock; use kaspa_consensus_core::BlockHasher; -use kaspa_database::prelude::Cache; +use kaspa_database::prelude::{Cache, CachePolicy}; use kaspa_hashes::Hash; use kaspa_utils::mem_size::MemSizeEstimator; use std::{ @@ -10,7 +10,7 @@ use std::{ sync::Arc, }; -#[derive(Clone, Copy)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum WindowOrigin { Full, Sampled, @@ -54,16 +54,46 @@ impl DerefMut for BlockWindowHeap { } } +/// A newtype wrapper over `[Cache]` meant to prevent erroneous reads of windows from different origins +#[derive(Clone)] +pub struct BlockWindowCacheStore { + inner: Cache, BlockHasher>, +} + +impl BlockWindowCacheStore { + pub fn new(policy: CachePolicy) -> Self { + Self { inner: Cache::new(policy) } + } + + pub fn contains_key(&self, key: &Hash) -> bool { + self.inner.contains_key(key) + } + + pub fn remove(&self, key: &Hash) -> Option> { + self.inner.remove(key) + } +} + /// Reader API for `BlockWindowCacheStore`. pub trait BlockWindowCacheReader { - fn get(&self, hash: &Hash) -> Option>; + /// Get the cache entry to this hash conditioned that *it matches the provided origin*. + /// We demand the origin to be provided in order to prevent reader errors. + fn get(&self, hash: &Hash, origin: WindowOrigin) -> Option>; } -pub type BlockWindowCacheStore = Cache, BlockHasher>; - impl BlockWindowCacheReader for BlockWindowCacheStore { #[inline(always)] - fn get(&self, hash: &Hash) -> Option> { - self.get(hash) + fn get(&self, hash: &Hash, origin: WindowOrigin) -> Option> { + self.inner.get(hash).and_then(|win| if win.origin() == origin { Some(win) } else { None }) + } +} + +pub trait BlockWindowCacheWriter { + fn insert(&self, hash: Hash, window: Arc); +} + +impl BlockWindowCacheWriter for BlockWindowCacheStore { + fn insert(&self, hash: Hash, window: Arc) { + self.inner.insert(hash, window); } } diff --git a/consensus/src/pipeline/body_processor/body_validation_in_context.rs b/consensus/src/pipeline/body_processor/body_validation_in_context.rs index 0eca78651c..08eb49f63b 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_context.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_context.rs @@ -1,12 +1,19 @@ use super::BlockBodyProcessor; use crate::{ errors::{BlockProcessResult, RuleError}, - model::stores::{ghostdag::GhostdagStoreReader, statuses::StatusesStoreReader}, - processes::window::WindowManager, + model::stores::statuses::StatusesStoreReader, + processes::{ + transaction_validator::{ + tx_validation_in_header_context::{LockTimeArg, LockTimeType}, + TransactionValidator, + }, + window::WindowManager, + }, }; use kaspa_consensus_core::block::Block; use kaspa_database::prelude::StoreResultExtensions; use kaspa_hashes::Hash; +use once_cell::unsync::Lazy; use std::sync::Arc; impl BlockBodyProcessor { @@ -17,18 +24,17 @@ impl BlockBodyProcessor { } fn check_block_transactions_in_context(self: &Arc, block: &Block) -> BlockProcessResult<()> { - // Note: This is somewhat expensive during ibd, as it incurs cache misses. - - let pmt = { - let (pmt, pmt_window) = self.window_manager.calc_past_median_time(&self.ghostdag_store.get_data(block.hash()).unwrap())?; - if !self.block_window_cache_for_past_median_time.contains_key(&block.hash()) { - self.block_window_cache_for_past_median_time.insert(block.hash(), pmt_window); - }; - pmt - }; + // Use lazy evaluation to avoid unnecessary work, as most of the time we expect the txs not to have lock time. + let lazy_pmt_res = Lazy::new(|| self.window_manager.calc_past_median_time_for_known_hash(block.hash())); for tx in block.transactions.iter() { - if let Err(e) = self.transaction_validator.utxo_free_tx_validation(tx, block.header.daa_score, pmt) { + let lock_time_arg = match TransactionValidator::get_lock_time_type(tx) { + LockTimeType::Finalized => LockTimeArg::Finalized, + LockTimeType::DaaScore => LockTimeArg::DaaScore(block.header.daa_score), + // We only evaluate the pmt calculation when actually needed + LockTimeType::Time => LockTimeArg::MedianTime((*lazy_pmt_res).clone()?), + }; + if let Err(e) = self.transaction_validator.validate_tx_in_header_context(tx, block.header.daa_score, lock_time_arg) { return Err(RuleError::TxInContextFailed(tx.id(), e)); }; } diff --git a/consensus/src/pipeline/body_processor/processor.rs b/consensus/src/pipeline/body_processor/processor.rs index ebb11a2003..7bad12ce3f 100644 --- a/consensus/src/pipeline/body_processor/processor.rs +++ b/consensus/src/pipeline/body_processor/processor.rs @@ -8,7 +8,6 @@ use crate::{ services::reachability::MTReachabilityService, stores::{ block_transactions::DbBlockTransactionsStore, - block_window_cache::BlockWindowCacheStore, ghostdag::DbGhostdagStore, headers::DbHeadersStore, reachability::DbReachabilityStore, @@ -67,7 +66,6 @@ pub struct BlockBodyProcessor { pub(super) headers_store: Arc, pub(super) block_transactions_store: Arc, pub(super) body_tips_store: Arc>, - pub(super) block_window_cache_for_past_median_time: Arc, // Managers and services pub(super) reachability_service: MTReachabilityService, @@ -93,7 +91,6 @@ pub struct BlockBodyProcessor { } impl BlockBodyProcessor { - #[allow(clippy::too_many_arguments)] pub fn new( receiver: Receiver, sender: Sender, @@ -122,7 +119,6 @@ impl BlockBodyProcessor { headers_store: storage.headers_store.clone(), block_transactions_store: storage.block_transactions_store.clone(), body_tips_store: storage.body_tips_store.clone(), - block_window_cache_for_past_median_time: storage.block_window_cache_for_past_median_time.clone(), reachability_service: services.reachability_service.clone(), coinbase_manager: services.coinbase_manager.clone(), diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index 4ecc761af1..f467b6d975 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -10,7 +10,7 @@ use crate::{ model::{ services::reachability::MTReachabilityService, stores::{ - block_window_cache::{BlockWindowCacheStore, BlockWindowHeap}, + block_window_cache::{BlockWindowCacheStore, BlockWindowCacheWriter, BlockWindowHeap}, daa::DbDaaStore, depth::DbDepthStore, ghostdag::{DbGhostdagStore, GhostdagData, GhostdagStoreReader}, diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index 1f0c4ff38b..a8e1f7f2f4 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -16,7 +16,7 @@ use crate::{ stores::{ acceptance_data::{AcceptanceDataStoreReader, DbAcceptanceDataStore}, block_transactions::{BlockTransactionsStoreReader, DbBlockTransactionsStore}, - block_window_cache::BlockWindowCacheStore, + block_window_cache::{BlockWindowCacheStore, BlockWindowCacheWriter}, daa::DbDaaStore, depth::{DbDepthStore, DepthStoreReader}, ghostdag::{DbGhostdagStore, GhostdagData, GhostdagStoreReader}, @@ -43,7 +43,7 @@ use crate::{ processes::{ coinbase::CoinbaseManager, ghostdag::ordering::SortableBlock, - transaction_validator::{errors::TxResult, transaction_validator_populated::TxValidationFlags, TransactionValidator}, + transaction_validator::{errors::TxResult, tx_validation_in_utxo_context::TxValidationFlags, TransactionValidator}, window::WindowManager, }, }; @@ -807,7 +807,11 @@ impl VirtualStateProcessor { args: &TransactionValidationArgs, ) -> TxResult<()> { self.transaction_validator.validate_tx_in_isolation(&mutable_tx.tx)?; - self.transaction_validator.utxo_free_tx_validation(&mutable_tx.tx, virtual_daa_score, virtual_past_median_time)?; + self.transaction_validator.validate_tx_in_header_context_with_args( + &mutable_tx.tx, + virtual_daa_score, + virtual_past_median_time, + )?; self.validate_mempool_transaction_in_utxo_context(mutable_tx, virtual_utxo_view, virtual_daa_score, args)?; Ok(()) } @@ -896,7 +900,11 @@ impl VirtualStateProcessor { // No need to validate the transaction in isolation since we rely on the mining manager to submit transactions // which were previously validated through `validate_mempool_transaction_and_populate`, hence we only perform // in-context validations - self.transaction_validator.utxo_free_tx_validation(tx, virtual_state.daa_score, virtual_state.past_median_time)?; + self.transaction_validator.validate_tx_in_header_context_with_args( + tx, + virtual_state.daa_score, + virtual_state.past_median_time, + )?; let ValidatedTransaction { calculated_fee, .. } = self.validate_transaction_in_utxo_context(tx, utxo_view, virtual_state.daa_score, TxValidationFlags::Full)?; Ok(calculated_fee) @@ -1202,6 +1210,15 @@ impl VirtualStateProcessor { true } } + + /// Executes `op` within the thread pool associated with this processor. + pub fn install(&self, op: OP) -> R + where + OP: FnOnce() -> R + Send, + R: Send, + { + self.thread_pool.install(op) + } } enum MergesetIncreaseResult { diff --git a/consensus/src/pipeline/virtual_processor/utxo_validation.rs b/consensus/src/pipeline/virtual_processor/utxo_validation.rs index 4a62a4ae8e..2e9c7ddb4a 100644 --- a/consensus/src/pipeline/virtual_processor/utxo_validation.rs +++ b/consensus/src/pipeline/virtual_processor/utxo_validation.rs @@ -7,7 +7,7 @@ use crate::{ model::stores::{block_transactions::BlockTransactionsStoreReader, daa::DaaStoreReader, ghostdag::GhostdagData}, processes::transaction_validator::{ errors::{TxResult, TxRuleError}, - transaction_validator_populated::TxValidationFlags, + tx_validation_in_utxo_context::TxValidationFlags, }, }; use kaspa_consensus_core::{ diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 2b3ba5f9d8..a9412bbf60 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -7,7 +7,6 @@ use std::{ hash_map::Entry::{self}, VecDeque, }, - ops::Deref, sync::{atomic::AtomicBool, Arc}, }; @@ -279,12 +278,11 @@ impl PruningProofManager { // PRUNE SAFETY: called either via consensus under the prune guard or by the pruning processor (hence no pruning in parallel) for anticone_block in anticone.iter().copied() { - let window = self - .window_manager - .block_window(&self.ghostdag_store.get_data(anticone_block).unwrap(), WindowType::FullDifficultyWindow) - .unwrap(); + let ghostdag = self.ghostdag_store.get_data(anticone_block).unwrap(); + let window = self.window_manager.block_window(&ghostdag, WindowType::DifficultyWindow).unwrap(); + let cover = self.window_manager.consecutive_cover_for_window(ghostdag, &window); - for hash in window.deref().iter().map(|block| block.0.hash) { + for hash in cover { if let Entry::Vacant(e) = daa_window_blocks.entry(hash) { e.insert(TrustedHeader { header: self.headers_store.get_header(hash).unwrap(), diff --git a/consensus/src/processes/reachability/inquirer.rs b/consensus/src/processes/reachability/inquirer.rs index ff09849b4a..3c1b153de3 100644 --- a/consensus/src/processes/reachability/inquirer.rs +++ b/consensus/src/processes/reachability/inquirer.rs @@ -156,21 +156,21 @@ pub fn hint_virtual_selected_parent(store: &mut (impl ReachabilityStore + ?Sized ) } -/// Checks if the `this` block is a strict chain ancestor of the `queried` block (aka `this ∈ chain(queried)`). +/// Checks if the `this` block is a strict chain ancestor of the `queried` block (i.e., `this ∈ chain(queried)`). /// Note that this results in `false` if `this == queried` pub fn is_strict_chain_ancestor_of(store: &(impl ReachabilityStoreReader + ?Sized), this: Hash, queried: Hash) -> Result { Ok(store.get_interval(this)?.strictly_contains(store.get_interval(queried)?)) } -/// Checks if `this` block is a chain ancestor of `queried` block (aka `this ∈ chain(queried) ∪ {queried}`). +/// Checks if `this` block is a chain ancestor of `queried` block (i.e., `this ∈ chain(queried) ∪ {queried}`). /// Note that we use the graph theory convention here which defines that a block is also an ancestor of itself. pub fn is_chain_ancestor_of(store: &(impl ReachabilityStoreReader + ?Sized), this: Hash, queried: Hash) -> Result { Ok(store.get_interval(this)?.contains(store.get_interval(queried)?)) } -/// Returns true if `this` is a DAG ancestor of `queried` (aka `queried ∈ future(this) ∪ {this}`). +/// Returns true if `this` is a DAG ancestor of `queried` (i.e., `queried ∈ future(this) ∪ {this}`). /// Note: this method will return true if `this == queried`. -/// The complexity of this method is O(log(|future_covering_set(this)|)) +/// The complexity of this method is `O(log(|future_covering_set(this)|))` pub fn is_dag_ancestor_of(store: &(impl ReachabilityStoreReader + ?Sized), this: Hash, queried: Hash) -> Result { // First, check if `this` is a chain ancestor of queried if is_chain_ancestor_of(store, this, queried)? { @@ -184,7 +184,7 @@ pub fn is_dag_ancestor_of(store: &(impl ReachabilityStoreReader + ?Sized), this: } } -/// Finds the child of `ancestor` which is also a chain ancestor of `descendant`. +/// Finds the tree child of `ancestor` which is also a chain ancestor of `descendant`. pub fn get_next_chain_ancestor(store: &(impl ReachabilityStoreReader + ?Sized), descendant: Hash, ancestor: Hash) -> Result { if descendant == ancestor { // The next ancestor does not exist @@ -200,7 +200,7 @@ pub fn get_next_chain_ancestor(store: &(impl ReachabilityStoreReader + ?Sized), } /// Note: it is important to keep the unchecked version for internal module use, -/// since in some scenarios during reindexing `descendant` might have a modified +/// since in some scenarios during reindexing `ancestor` might have a modified /// interval which was not propagated yet. pub(super) fn get_next_chain_ancestor_unchecked( store: &(impl ReachabilityStoreReader + ?Sized), diff --git a/consensus/src/processes/transaction_validator/mod.rs b/consensus/src/processes/transaction_validator/mod.rs index 3f091dfd76..b4a946c2ff 100644 --- a/consensus/src/processes/transaction_validator/mod.rs +++ b/consensus/src/processes/transaction_validator/mod.rs @@ -1,7 +1,7 @@ pub mod errors; -pub mod transaction_validator_populated; -mod tx_validation_in_isolation; -pub mod tx_validation_not_utxo_related; +pub mod tx_validation_in_header_context; +pub mod tx_validation_in_isolation; +pub mod tx_validation_in_utxo_context; use std::sync::Arc; use crate::model::stores::ghostdag; diff --git a/consensus/src/processes/transaction_validator/tx_validation_in_header_context.rs b/consensus/src/processes/transaction_validator/tx_validation_in_header_context.rs new file mode 100644 index 0000000000..129627c59d --- /dev/null +++ b/consensus/src/processes/transaction_validator/tx_validation_in_header_context.rs @@ -0,0 +1,102 @@ +//! Groups transaction validations that depend on the containing header and/or +//! its past headers (but do not depend on UTXO state or other transactions in +//! the containing block) + +use super::{ + errors::{TxResult, TxRuleError}, + TransactionValidator, +}; +use crate::constants::LOCK_TIME_THRESHOLD; +use kaspa_consensus_core::tx::Transaction; + +pub(crate) enum LockTimeType { + Finalized, + DaaScore, + Time, +} + +pub(crate) enum LockTimeArg { + Finalized, + DaaScore(u64), + MedianTime(u64), +} + +impl TransactionValidator { + pub(crate) fn validate_tx_in_header_context_with_args( + &self, + tx: &Transaction, + ctx_daa_score: u64, + ctx_block_time: u64, + ) -> TxResult<()> { + self.validate_tx_in_header_context( + tx, + ctx_daa_score, + match Self::get_lock_time_type(tx) { + LockTimeType::Finalized => LockTimeArg::Finalized, + LockTimeType::DaaScore => LockTimeArg::DaaScore(ctx_daa_score), + LockTimeType::Time => LockTimeArg::MedianTime(ctx_block_time), + }, + ) + } + + pub(crate) fn validate_tx_in_header_context( + &self, + tx: &Transaction, + ctx_daa_score: u64, + lock_time_arg: LockTimeArg, + ) -> TxResult<()> { + self.check_transaction_payload(tx, ctx_daa_score)?; + self.check_tx_is_finalized(tx, lock_time_arg) + } + + pub(crate) fn get_lock_time_type(tx: &Transaction) -> LockTimeType { + match tx.lock_time { + // Lock time of zero means the transaction is finalized. + 0 => LockTimeType::Finalized, + + // The lock time field of a transaction is either a block DAA score at + // which the transaction is finalized or a timestamp depending on if the + // value is before the LOCK_TIME_THRESHOLD. When it is under the + // threshold it is a DAA score + t if t < LOCK_TIME_THRESHOLD => LockTimeType::DaaScore, + + // ..and when equal or above the threshold it represents time + _t => LockTimeType::Time, + } + } + + fn check_tx_is_finalized(&self, tx: &Transaction, lock_time_arg: LockTimeArg) -> TxResult<()> { + let block_time_or_daa_score = match lock_time_arg { + LockTimeArg::Finalized => return Ok(()), + LockTimeArg::DaaScore(ctx_daa_score) => ctx_daa_score, + LockTimeArg::MedianTime(ctx_block_time) => ctx_block_time, + }; + + if tx.lock_time < block_time_or_daa_score { + return Ok(()); + } + + // At this point, the transaction's lock time hasn't occurred yet, but + // the transaction might still be finalized if the sequence number + // for all transaction inputs is maxed out. + for (i, input) in tx.inputs.iter().enumerate() { + if input.sequence != u64::MAX { + return Err(TxRuleError::NotFinalized(i)); + } + } + + Ok(()) + } + + fn check_transaction_payload(&self, tx: &Transaction, ctx_daa_score: u64) -> TxResult<()> { + // TODO (post HF): move back to in isolation validation + if self.payload_activation.is_active(ctx_daa_score) { + Ok(()) + } else { + if !tx.is_coinbase() && !tx.payload.is_empty() { + return Err(TxRuleError::NonCoinbaseTxHasPayload); + } + Ok(()) + } + } +} diff --git a/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs b/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs index a08b83d94e..b509a71c72 100644 --- a/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs +++ b/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs @@ -8,6 +8,11 @@ use super::{ }; impl TransactionValidator { + /// Performs a variety of transaction validation checks which are independent of any + /// context -- header or utxo. **Note** that any check performed here should be moved to + /// header contextual validation if it becomes HF activation dependent. This is bcs we rely + /// on checks here to be truly independent and avoid calling it multiple times wherever possible + /// (e.g., BBT relies on mempool in isolation checks even though virtual daa score might have changed) pub fn validate_tx_in_isolation(&self, tx: &Transaction) -> TxResult<()> { self.check_transaction_inputs_in_isolation(tx)?; self.check_transaction_outputs_in_isolation(tx)?; diff --git a/consensus/src/processes/transaction_validator/transaction_validator_populated.rs b/consensus/src/processes/transaction_validator/tx_validation_in_utxo_context.rs similarity index 100% rename from consensus/src/processes/transaction_validator/transaction_validator_populated.rs rename to consensus/src/processes/transaction_validator/tx_validation_in_utxo_context.rs diff --git a/consensus/src/processes/transaction_validator/tx_validation_not_utxo_related.rs b/consensus/src/processes/transaction_validator/tx_validation_not_utxo_related.rs deleted file mode 100644 index 3a854948ac..0000000000 --- a/consensus/src/processes/transaction_validator/tx_validation_not_utxo_related.rs +++ /dev/null @@ -1,53 +0,0 @@ -use kaspa_consensus_core::tx::Transaction; - -use crate::constants::LOCK_TIME_THRESHOLD; - -use super::{ - errors::{TxResult, TxRuleError}, - TransactionValidator, -}; - -impl TransactionValidator { - pub fn utxo_free_tx_validation(&self, tx: &Transaction, ctx_daa_score: u64, ctx_block_time: u64) -> TxResult<()> { - self.check_transaction_payload(tx, ctx_daa_score)?; - self.check_tx_is_finalized(tx, ctx_daa_score, ctx_block_time) - } - - fn check_tx_is_finalized(&self, tx: &Transaction, ctx_daa_score: u64, ctx_block_time: u64) -> TxResult<()> { - // Lock time of zero means the transaction is finalized. - if tx.lock_time == 0 { - return Ok(()); - } - - // The lock time field of a transaction is either a block DAA score at - // which the transaction is finalized or a timestamp depending on if the - // value is before the LOCK_TIME_THRESHOLD. When it is under the - // threshold it is a DAA score. - let block_time_or_daa_score = if tx.lock_time < LOCK_TIME_THRESHOLD { ctx_daa_score } else { ctx_block_time }; - if tx.lock_time < block_time_or_daa_score { - return Ok(()); - } - - // At this point, the transaction's lock time hasn't occurred yet, but - // the transaction might still be finalized if the sequence number - // for all transaction inputs is maxed out. - for (i, input) in tx.inputs.iter().enumerate() { - if input.sequence != u64::MAX { - return Err(TxRuleError::NotFinalized(i)); - } - } - - Ok(()) - } - - fn check_transaction_payload(&self, tx: &Transaction, ctx_daa_score: u64) -> TxResult<()> { - if self.payload_activation.is_active(ctx_daa_score) { - Ok(()) - } else { - if !tx.is_coinbase() && !tx.payload.is_empty() { - return Err(TxRuleError::NonCoinbaseTxHasPayload); - } - Ok(()) - } - } -} diff --git a/consensus/src/processes/window.rs b/consensus/src/processes/window.rs index ab09b1e7cb..1caff9c007 100644 --- a/consensus/src/processes/window.rs +++ b/consensus/src/processes/window.rs @@ -1,6 +1,6 @@ use crate::{ model::stores::{ - block_window_cache::{BlockWindowCacheReader, BlockWindowHeap, WindowOrigin}, + block_window_cache::{BlockWindowCacheReader, BlockWindowCacheWriter, BlockWindowHeap, WindowOrigin}, daa::DaaStoreReader, ghostdag::{GhostdagData, GhostdagStoreReader}, headers::HeaderStoreReader, @@ -31,9 +31,8 @@ use super::{ #[derive(Clone, Copy)] pub enum WindowType { - SampledDifficultyWindow, - FullDifficultyWindow, - SampledMedianTimeWindow, + DifficultyWindow, + MedianTimeWindow, VaryingWindow(usize), } @@ -55,15 +54,44 @@ pub trait WindowManager { fn block_daa_window(&self, ghostdag_data: &GhostdagData) -> Result; fn calculate_difficulty_bits(&self, ghostdag_data: &GhostdagData, daa_window: &DaaWindow) -> u32; fn calc_past_median_time(&self, ghostdag_data: &GhostdagData) -> Result<(u64, Arc), RuleError>; + fn calc_past_median_time_for_known_hash(&self, hash: Hash) -> Result; fn estimate_network_hashes_per_second(&self, window: Arc) -> DifficultyResult; fn window_size(&self, ghostdag_data: &GhostdagData, window_type: WindowType) -> usize; fn sample_rate(&self, ghostdag_data: &GhostdagData, window_type: WindowType) -> u64; + + /// Returns the full consecutive sub-DAG containing all blocks required to restore the (possibly sampled) window. + fn consecutive_cover_for_window(&self, ghostdag_data: Arc, window: &BlockWindowHeap) -> Vec; +} + +trait AffiliatedWindowCacheReader { + fn get(&self, hash: &Hash) -> Option>; +} + +/// A local wrapper over an (optional) block window cache which filters cache hits based on a pre-specified window origin +struct AffiliatedWindowCache<'a, U: BlockWindowCacheReader> { + /// The inner underlying cache + inner: Option<&'a Arc>, + /// The affiliated origin (sampled vs. full) + origin: WindowOrigin, +} + +impl<'a, U: BlockWindowCacheReader> AffiliatedWindowCache<'a, U> { + fn new(inner: Option<&'a Arc>, origin: WindowOrigin) -> Self { + Self { inner, origin } + } +} + +impl AffiliatedWindowCacheReader for AffiliatedWindowCache<'_, U> { + fn get(&self, hash: &Hash) -> Option> { + // Only return the cached window if it originates from the affiliated origin + self.inner.and_then(|cache| cache.get(hash, self.origin)) + } } /// A window manager conforming (indirectly) to the legacy golang implementation /// based on full, hence un-sampled, windows #[derive(Clone)] -pub struct FullWindowManager { +pub struct FullWindowManager { genesis_hash: Hash, ghostdag_store: Arc, block_window_cache_for_difficulty: Arc, @@ -74,7 +102,7 @@ pub struct FullWindowManager, } -impl FullWindowManager { +impl FullWindowManager { pub fn new( genesis: &GenesisBlock, ghostdag_store: Arc, @@ -114,30 +142,29 @@ impl Fu return Ok(Arc::new(BlockWindowHeap::new(WindowOrigin::Full))); } - let cache = if window_size == self.difficulty_window_size { + let inner_cache = if window_size == self.difficulty_window_size { Some(&self.block_window_cache_for_difficulty) } else if window_size == self.past_median_time_window_size { Some(&self.block_window_cache_for_past_median_time) } else { None }; - - if let Some(cache) = cache { - if let Some(selected_parent_binary_heap) = cache.get(&ghostdag_data.selected_parent) { - // Only use the cached window if it originates from here - if let WindowOrigin::Full = selected_parent_binary_heap.origin() { - let mut window_heap = BoundedSizeBlockHeap::from_binary_heap(window_size, (*selected_parent_binary_heap).clone()); - if ghostdag_data.selected_parent != self.genesis_hash { - self.try_push_mergeset( - &mut window_heap, - ghostdag_data, - self.ghostdag_store.get_blue_work(ghostdag_data.selected_parent).unwrap(), - ); - } - - return Ok(Arc::new(window_heap.binary_heap)); - } + // Wrap the inner cache with a cache affiliated with this origin (WindowOrigin::Full). + // This is crucial for hardfork times where the DAA mechanism changes thereby invalidating cache entries + // originating from the prior mechanism + let cache = AffiliatedWindowCache::new(inner_cache, WindowOrigin::Full); + + if let Some(selected_parent_binary_heap) = cache.get(&ghostdag_data.selected_parent) { + let mut window_heap = BoundedSizeBlockHeap::from_binary_heap(window_size, (*selected_parent_binary_heap).clone()); + if ghostdag_data.selected_parent != self.genesis_hash { + self.try_push_mergeset( + &mut window_heap, + ghostdag_data, + self.ghostdag_store.get_blue_work(ghostdag_data.selected_parent).unwrap(), + ); } + + return Ok(Arc::new(window_heap.binary_heap)); } let mut window_heap = BoundedSizeBlockHeap::new(WindowOrigin::Full, window_size); @@ -194,7 +221,9 @@ impl Fu } } -impl WindowManager for FullWindowManager { +impl WindowManager + for FullWindowManager +{ fn block_window(&self, ghostdag_data: &GhostdagData, window_type: WindowType) -> Result, RuleError> { self.build_block_window(ghostdag_data, window_type) } @@ -206,7 +235,7 @@ impl Wi } fn block_daa_window(&self, ghostdag_data: &GhostdagData) -> Result { - let window = self.block_window(ghostdag_data, WindowType::SampledDifficultyWindow)?; + let window = self.block_window(ghostdag_data, WindowType::DifficultyWindow)?; Ok(self.calc_daa_window(ghostdag_data, window)) } @@ -215,19 +244,31 @@ impl Wi } fn calc_past_median_time(&self, ghostdag_data: &GhostdagData) -> Result<(u64, Arc), RuleError> { - let window = self.block_window(ghostdag_data, WindowType::SampledMedianTimeWindow)?; + let window = self.block_window(ghostdag_data, WindowType::MedianTimeWindow)?; let past_median_time = self.past_median_time_manager.calc_past_median_time(&window)?; Ok((past_median_time, window)) } + fn calc_past_median_time_for_known_hash(&self, hash: Hash) -> Result { + if let Some(window) = self.block_window_cache_for_past_median_time.get(&hash, WindowOrigin::Full) { + let past_median_time = self.past_median_time_manager.calc_past_median_time(&window)?; + Ok(past_median_time) + } else { + let ghostdag_data = self.ghostdag_store.get_data(hash).unwrap(); + let (past_median_time, window) = self.calc_past_median_time(&ghostdag_data)?; + self.block_window_cache_for_past_median_time.insert(hash, window); + Ok(past_median_time) + } + } + fn estimate_network_hashes_per_second(&self, window: Arc) -> DifficultyResult { self.difficulty_manager.estimate_network_hashes_per_second(&window) } fn window_size(&self, _ghostdag_data: &GhostdagData, window_type: WindowType) -> usize { match window_type { - WindowType::SampledDifficultyWindow | WindowType::FullDifficultyWindow => self.difficulty_window_size, - WindowType::SampledMedianTimeWindow => self.past_median_time_window_size, + WindowType::DifficultyWindow => self.difficulty_window_size, + WindowType::MedianTimeWindow => self.past_median_time_window_size, WindowType::VaryingWindow(size) => size, } } @@ -235,6 +276,11 @@ impl Wi fn sample_rate(&self, _ghostdag_data: &GhostdagData, _window_type: WindowType) -> u64 { 1 } + + fn consecutive_cover_for_window(&self, _ghostdag_data: Arc, window: &BlockWindowHeap) -> Vec { + assert_eq!(WindowOrigin::Full, window.origin()); + window.iter().map(|b| b.0.hash).collect() + } } type DaaStatus = Option<(u64, BlockHashSet)>; @@ -246,7 +292,12 @@ enum SampledBlock { /// A sampled window manager implementing [KIP-0004](https://github.com/kaspanet/kips/blob/master/kip-0004.md) #[derive(Clone)] -pub struct SampledWindowManager { +pub struct SampledWindowManager< + T: GhostdagStoreReader, + U: BlockWindowCacheReader + BlockWindowCacheWriter, + V: HeaderStoreReader, + W: DaaStoreReader, +> { genesis_hash: Hash, ghostdag_store: Arc, headers_store: Arc, @@ -263,7 +314,9 @@ pub struct SampledWindowManager, } -impl SampledWindowManager { +impl + SampledWindowManager +{ #[allow(clippy::too_many_arguments)] pub fn new( genesis: &GenesisBlock, @@ -331,11 +384,15 @@ impl Some(&self.block_window_cache_for_difficulty), - WindowType::SampledMedianTimeWindow => Some(&self.block_window_cache_for_past_median_time), - WindowType::FullDifficultyWindow | WindowType::VaryingWindow(_) => None, + let inner_cache = match window_type { + WindowType::DifficultyWindow => Some(&self.block_window_cache_for_difficulty), + WindowType::MedianTimeWindow => Some(&self.block_window_cache_for_past_median_time), + WindowType::VaryingWindow(_) => None, }; + // Wrap the inner cache with a cache affiliated with this origin (WindowOrigin::Sampled). + // This is crucial for hardfork times where the DAA mechanism changes thereby invalidating cache entries + // originating from the prior mechanism + let cache = AffiliatedWindowCache::new(inner_cache, WindowOrigin::Sampled); let selected_parent_blue_work = self.ghostdag_store.get_blue_work(ghostdag_data.selected_parent).unwrap(); @@ -343,7 +400,7 @@ impl); // see if we can inherit and merge with the selected parent cache - if self.try_merge_with_selected_parent_cache(&mut window_heap, cache, ¤t_ghostdag.selected_parent) { + if self.try_merge_with_selected_parent_cache(&mut window_heap, &cache, ¤t_ghostdag.selected_parent) { // if successful, we may break out of the loop, with the window already filled. break; }; @@ -439,36 +496,33 @@ impl>, + cache: &impl AffiliatedWindowCacheReader, ghostdag_data: &GhostdagData, selected_parent_blue_work: BlueWorkType, mergeset_non_daa_inserter: Option, ) -> Option> { - cache.and_then(|cache| { - cache.get(&ghostdag_data.selected_parent).map(|selected_parent_window| { - let mut heap = Lazy::new(|| BoundedSizeBlockHeap::from_binary_heap(window_size, (*selected_parent_window).clone())); - // We pass a Lazy heap as an optimization to avoid cloning the selected parent heap in cases where the mergeset contains no samples - self.push_mergeset(&mut heap, sample_rate, ghostdag_data, selected_parent_blue_work, mergeset_non_daa_inserter); - if let Ok(heap) = Lazy::into_value(heap) { - Arc::new(heap.binary_heap) - } else { - selected_parent_window.clone() - } - }) + cache.get(&ghostdag_data.selected_parent).map(|selected_parent_window| { + let mut heap = Lazy::new(|| BoundedSizeBlockHeap::from_binary_heap(window_size, (*selected_parent_window).clone())); + // We pass a Lazy heap as an optimization to avoid cloning the selected parent heap in cases where the mergeset contains no samples + self.push_mergeset(&mut heap, sample_rate, ghostdag_data, selected_parent_blue_work, mergeset_non_daa_inserter); + if let Ok(heap) = Lazy::into_value(heap) { + Arc::new(heap.binary_heap) + } else { + selected_parent_window.clone() + } }) } fn try_merge_with_selected_parent_cache( &self, heap: &mut BoundedSizeBlockHeap, - cache: Option<&Arc>, + cache: &impl AffiliatedWindowCacheReader, selected_parent: &Hash, ) -> bool { cache - .and_then(|cache| { - cache.get(selected_parent).map(|selected_parent_window| { - heap.merge_ancestor_heap(&mut (*selected_parent_window).clone()); - }) + .get(selected_parent) + .map(|selected_parent_window| { + heap.merge_ancestor_heap(&mut (*selected_parent_window).clone()); }) .is_some() } @@ -501,7 +555,7 @@ impl WindowManager +impl WindowManager for SampledWindowManager { fn block_window(&self, ghostdag_data: &GhostdagData, window_type: WindowType) -> Result, RuleError> { @@ -516,7 +570,7 @@ impl Result { let mut mergeset_non_daa = BlockHashSet::default(); - let window = self.build_block_window(ghostdag_data, WindowType::SampledDifficultyWindow, |hash| { + let window = self.build_block_window(ghostdag_data, WindowType::DifficultyWindow, |hash| { mergeset_non_daa.insert(hash); })?; let daa_score = self.difficulty_manager.calc_daa_score(ghostdag_data, &mergeset_non_daa); @@ -528,33 +582,73 @@ impl Result<(u64, Arc), RuleError> { - let window = self.block_window(ghostdag_data, WindowType::SampledMedianTimeWindow)?; + let window = self.block_window(ghostdag_data, WindowType::MedianTimeWindow)?; let past_median_time = self.past_median_time_manager.calc_past_median_time(&window)?; Ok((past_median_time, window)) } + fn calc_past_median_time_for_known_hash(&self, hash: Hash) -> Result { + if let Some(window) = self.block_window_cache_for_past_median_time.get(&hash, WindowOrigin::Sampled) { + let past_median_time = self.past_median_time_manager.calc_past_median_time(&window)?; + Ok(past_median_time) + } else { + let ghostdag_data = self.ghostdag_store.get_data(hash).unwrap(); + let (past_median_time, window) = self.calc_past_median_time(&ghostdag_data)?; + self.block_window_cache_for_past_median_time.insert(hash, window); + Ok(past_median_time) + } + } + fn estimate_network_hashes_per_second(&self, window: Arc) -> DifficultyResult { self.difficulty_manager.estimate_network_hashes_per_second(&window) } fn window_size(&self, _ghostdag_data: &GhostdagData, window_type: WindowType) -> usize { match window_type { - WindowType::SampledDifficultyWindow => self.difficulty_window_size, - // We aim to return a full window such that it contains what would be the sampled window. Note that the - // product below addresses also the worst-case scenario where the last sampled block is exactly `sample_rate` - // blocks from the end of the full window - WindowType::FullDifficultyWindow => self.difficulty_window_size * self.difficulty_sample_rate as usize, - WindowType::SampledMedianTimeWindow => self.past_median_time_window_size, + WindowType::DifficultyWindow => self.difficulty_window_size, + WindowType::MedianTimeWindow => self.past_median_time_window_size, WindowType::VaryingWindow(size) => size, } } fn sample_rate(&self, _ghostdag_data: &GhostdagData, window_type: WindowType) -> u64 { match window_type { - WindowType::SampledDifficultyWindow => self.difficulty_sample_rate, - WindowType::SampledMedianTimeWindow => self.past_median_time_sample_rate, - WindowType::FullDifficultyWindow | WindowType::VaryingWindow(_) => 1, + WindowType::DifficultyWindow => self.difficulty_sample_rate, + WindowType::MedianTimeWindow => self.past_median_time_sample_rate, + WindowType::VaryingWindow(_) => 1, + } + } + + fn consecutive_cover_for_window(&self, mut ghostdag: Arc, window: &BlockWindowHeap) -> Vec { + assert_eq!(WindowOrigin::Sampled, window.origin()); + + // In the sampled case, the sampling logic relies on DAA indexes which can only be calculated correctly if the full + // mergesets covering all sampled blocks are sent. + + // Tracks the window blocks to make sure we visit all blocks + let mut unvisited: BlockHashSet = window.iter().map(|b| b.0.hash).collect(); + let capacity_estimate = window.len() * self.difficulty_sample_rate as usize; + // The full consecutive window covering all sampled window blocks and the full mergesets containing them + let mut cover = Vec::with_capacity(capacity_estimate); + while !unvisited.is_empty() { + assert!(!ghostdag.selected_parent.is_origin(), "unvisited still not empty"); + // TODO (relaxed): a possible optimization here is to iterate in the same order as + // sampled_mergeset_iterator (descending_mergeset) and to break once all samples from + // this mergeset are reached. + // * Why is this sufficient? bcs we still send the prefix of the mergeset required for + // obtaining the DAA index for all sampled blocks. + // * What's the benefit? This might exclude deeply merged blocks which in turn will help + // reducing the number of trusted blocks sent to a fresh syncing peer. + for merged in ghostdag.unordered_mergeset() { + cover.push(merged); + unvisited.remove(&merged); + } + if unvisited.is_empty() { + break; + } + ghostdag = self.ghostdag_store.get_data(ghostdag.selected_parent).unwrap(); } + cover } } @@ -562,7 +656,12 @@ impl { +pub struct DualWindowManager< + T: GhostdagStoreReader, + U: BlockWindowCacheReader + BlockWindowCacheWriter, + V: HeaderStoreReader, + W: DaaStoreReader, +> { ghostdag_store: Arc, headers_store: Arc, sampling_activation: ForkActivation, @@ -570,7 +669,9 @@ pub struct DualWindowManager, } -impl DualWindowManager { +impl + DualWindowManager +{ #[allow(clippy::too_many_arguments)] pub fn new( genesis: &GenesisBlock, @@ -621,67 +722,82 @@ impl bool { - let sp_daa_score = self.headers_store.get_daa_score(ghostdag_data.selected_parent).unwrap(); + /// Checks whether sampling mode was activated based on the selected parent (internally checking its DAA score) + pub(crate) fn sampling(&self, selected_parent: Hash) -> bool { + let sp_daa_score = self.headers_store.get_daa_score(selected_parent).unwrap(); self.sampling_activation.is_active(sp_daa_score) } } -impl WindowManager +impl WindowManager for DualWindowManager { fn block_window(&self, ghostdag_data: &GhostdagData, window_type: WindowType) -> Result, RuleError> { - match self.sampling(ghostdag_data) { + match self.sampling(ghostdag_data.selected_parent) { true => self.sampled_window_manager.block_window(ghostdag_data, window_type), false => self.full_window_manager.block_window(ghostdag_data, window_type), } } fn calc_daa_window(&self, ghostdag_data: &GhostdagData, window: Arc) -> DaaWindow { - match self.sampling(ghostdag_data) { + match self.sampling(ghostdag_data.selected_parent) { true => self.sampled_window_manager.calc_daa_window(ghostdag_data, window), false => self.full_window_manager.calc_daa_window(ghostdag_data, window), } } fn block_daa_window(&self, ghostdag_data: &GhostdagData) -> Result { - match self.sampling(ghostdag_data) { + match self.sampling(ghostdag_data.selected_parent) { true => self.sampled_window_manager.block_daa_window(ghostdag_data), false => self.full_window_manager.block_daa_window(ghostdag_data), } } fn calculate_difficulty_bits(&self, ghostdag_data: &GhostdagData, daa_window: &DaaWindow) -> u32 { - match self.sampling(ghostdag_data) { + match self.sampling(ghostdag_data.selected_parent) { true => self.sampled_window_manager.calculate_difficulty_bits(ghostdag_data, daa_window), false => self.full_window_manager.calculate_difficulty_bits(ghostdag_data, daa_window), } } fn calc_past_median_time(&self, ghostdag_data: &GhostdagData) -> Result<(u64, Arc), RuleError> { - match self.sampling(ghostdag_data) { + match self.sampling(ghostdag_data.selected_parent) { true => self.sampled_window_manager.calc_past_median_time(ghostdag_data), false => self.full_window_manager.calc_past_median_time(ghostdag_data), } } + fn calc_past_median_time_for_known_hash(&self, hash: Hash) -> Result { + match self.sampling(self.ghostdag_store.get_selected_parent(hash).unwrap()) { + true => self.sampled_window_manager.calc_past_median_time_for_known_hash(hash), + false => self.full_window_manager.calc_past_median_time_for_known_hash(hash), + } + } + fn estimate_network_hashes_per_second(&self, window: Arc) -> DifficultyResult { self.sampled_window_manager.estimate_network_hashes_per_second(window) } fn window_size(&self, ghostdag_data: &GhostdagData, window_type: WindowType) -> usize { - match self.sampling(ghostdag_data) { + match self.sampling(ghostdag_data.selected_parent) { true => self.sampled_window_manager.window_size(ghostdag_data, window_type), false => self.full_window_manager.window_size(ghostdag_data, window_type), } } fn sample_rate(&self, ghostdag_data: &GhostdagData, window_type: WindowType) -> u64 { - match self.sampling(ghostdag_data) { + match self.sampling(ghostdag_data.selected_parent) { true => self.sampled_window_manager.sample_rate(ghostdag_data, window_type), false => self.full_window_manager.sample_rate(ghostdag_data, window_type), } } + + fn consecutive_cover_for_window(&self, ghostdag_data: Arc, window: &BlockWindowHeap) -> Vec { + match window.origin() { + WindowOrigin::Sampled => self.sampled_window_manager.consecutive_cover_for_window(ghostdag_data, window), + WindowOrigin::Full => self.full_window_manager.consecutive_cover_for_window(ghostdag_data, window), + } + } } struct BoundedSizeBlockHeap { diff --git a/testing/integration/src/consensus_integration_tests.rs b/testing/integration/src/consensus_integration_tests.rs index 58a6e2bb33..52d9b79865 100644 --- a/testing/integration/src/consensus_integration_tests.rs +++ b/testing/integration/src/consensus_integration_tests.rs @@ -1367,7 +1367,7 @@ async fn difficulty_test() { fn full_window_bits(consensus: &TestConsensus, hash: Hash) -> u32 { let window_size = consensus.params().difficulty_window_size(0) * consensus.params().difficulty_sample_rate(0) as usize; let ghostdag_data = &consensus.ghostdag_store().get_data(hash).unwrap(); - let window = consensus.window_manager().block_window(ghostdag_data, WindowType::FullDifficultyWindow).unwrap(); + let window = consensus.window_manager().block_window(ghostdag_data, WindowType::VaryingWindow(window_size)).unwrap(); assert_eq!(window.blocks.len(), window_size); let daa_window = consensus.window_manager().calc_daa_window(ghostdag_data, window); consensus.window_manager().calculate_difficulty_bits(ghostdag_data, &daa_window) From 233552b4f37dba410c8008fdce5711342254b6d5 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Thu, 28 Nov 2024 23:42:06 +0200 Subject: [PATCH 31/31] Track the average transaction mass throughout the mempool's lifespan (#599) * Track the average transaction mass throughout the mempool's lifespan using a decaying formulae * notebook analysis * finalize notebook * relax feerate estimator test comparisons to avoid arbitrary CI failures * review comment --- mining/src/feerate/fee_estimation.ipynb | 159 +++++++++++++++++++++++- mining/src/mempool/model/frontier.rs | 48 ++++--- 2 files changed, 188 insertions(+), 19 deletions(-) diff --git a/mining/src/feerate/fee_estimation.ipynb b/mining/src/feerate/fee_estimation.ipynb index a8b8fbfc89..51b905fa29 100644 --- a/mining/src/feerate/fee_estimation.ipynb +++ b/mining/src/feerate/fee_estimation.ipynb @@ -14,7 +14,7 @@ }, { "cell_type": "code", - "execution_count": 97, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -464,6 +464,155 @@ "pred" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Avg transaction mass\n", + "\n", + "We suggest a decaying weight formula for calculating the average mass throughout history, as opposed to using the average mass of the currently existing transactions. The following code compares the two approaches." + ] + }, + { + "cell_type": "code", + "execution_count": 65, + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"\n", + "Helper function for creating a long sequence of transaction masses with periods with highly unusual mass clusters\n", + "N = sequence length\n", + "M = length of each unusual cluster\n", + "X = number of unusual clusters\n", + "\"\"\"\n", + "def generate_seq(N, M, X, mean=2036, var=100, mean_cluster=50000, var_cluster=10000):\n", + " seq = np.random.normal(loc=mean, scale=var, size=N)\n", + " clusters = np.random.normal(loc=mean_cluster, scale=var_cluster, size=X * M)\n", + " cluster_indices = np.random.choice(N - M, size=X, replace=False)\n", + " for i, idx in enumerate(cluster_indices):\n", + " seq[idx:idx+M] = clusters[i*M:(i+1)*M]\n", + " return seq" + ] + }, + { + "cell_type": "code", + "execution_count": 68, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAj0AAAGdCAYAAAD5ZcJyAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAABB7klEQVR4nO3dfVxUdaI/8M/wMAMIM4AIiCJgpObz5gPSg1srVzTublZ3t8zbdcvq2mK/zK6prVet3d+PfrbtZuXW7t1dbX+bmd672a6YxaJiJmqSpKKSj2HJgE/MAPLM9/cHcWAEYWaYmXPmfD/v14vXC+Z8OfM93znf7/mcxzEIIQSIiIiIdC5A7QoQERER+QJDDxEREUmBoYeIiIikwNBDREREUmDoISIiIikw9BAREZEUGHqIiIhICgw9REREJIUgtSugptbWVly4cAEREREwGAxqV4eIiIicIIRAdXU1EhISEBDg/PEbqUPPhQsXkJiYqHY1iIiIyA3nz5/H4MGDnS4vdeiJiIgA0NZoZrNZ5doQERGRM+x2OxITE5XtuLOkDj3tp7TMZjNDDxERkZ9x9dIUXshMREREUmDoISIiIikw9BAREZEUGHqIiIhICgw9REREJAWGHiIiIpICQw8RERFJwaXQk5OTg0mTJiEiIgKxsbGYNWsWSktLHcrcddddMBgMDj/z5893KFNWVoasrCyEhYUhNjYWixcvRnNzs0OZXbt24dZbb4XJZEJqairWr1/fpT5r165FcnIyQkJCkJaWhgMHDriyOERERCQRl0JPQUEBsrOzsW/fPuTl5aGpqQnTp09HbW2tQ7knnngC5eXlys/q1auVaS0tLcjKykJjYyP27t2Ld955B+vXr8eKFSuUMmfPnkVWVhbuvvtuFBcXY+HChXj88cfx8ccfK2Xef/99LFq0CCtXrsQXX3yBcePGITMzE5WVle62BREREemYQQgh3P3nixcvIjY2FgUFBZg6dSqAtiM948ePx2uvvdbt/3z00Uf453/+Z1y4cAFxcXEAgLfffhtLlizBxYsXYTQasWTJEuTm5uLo0aPK/z300EOoqqrC9u3bAQBpaWmYNGkS3nzzTQBtXx6amJiIp59+GkuXLnWq/na7HRaLBTabjU9kJiIi8hPubr/7dE2PzWYDAERHRzu8/u677yImJgajR4/GsmXLcO3aNWVaYWEhxowZowQeAMjMzITdbkdJSYlSJiMjw2GemZmZKCwsBAA0NjaiqKjIoUxAQAAyMjKUMkRERESduf3dW62trVi4cCFuv/12jB49Wnn94YcfRlJSEhISEnD48GEsWbIEpaWl+Otf/woAsFqtDoEHgPK31WrtsYzdbkddXR2uXr2KlpaWbsucOHHihnVuaGhAQ0OD8rfdbndjyYmIiMgfuR16srOzcfToUezZs8fh9SeffFL5fcyYMRg4cCCmTZuG06dP46abbnK/ph6Qk5ODF198UdU6+MJXFdXIP16JeXekwBjEG/SIiIgAN09vLViwAFu3bsXOnTsxePDgHsumpaUBAE6dOgUAiI+PR0VFhUOZ9r/j4+N7LGM2mxEaGoqYmBgEBgZ2W6Z9Ht1ZtmwZbDab8nP+/Hknltb/PPnng/i/20/gtX98pXZViIiINMOl0COEwIIFC/DBBx9gx44dSElJ6fV/iouLAQADBw4EAKSnp+PIkSMOd1nl5eXBbDZj5MiRSpn8/HyH+eTl5SE9PR0AYDQaMWHCBIcyra2tyM/PV8p0x2QywWw2O/zo0bnLbddQfflNlboVISIi0hCXTm9lZ2djw4YN+PDDDxEREaFcg2OxWBAaGorTp09jw4YNuOeee9C/f38cPnwYzz77LKZOnYqxY8cCAKZPn46RI0fikUcewerVq2G1WrF8+XJkZ2fDZDIBAObPn48333wTzz//PB577DHs2LEDmzZtQm5urlKXRYsWYe7cuZg4cSImT56M1157DbW1tXj00Uc91TZERESkJ8IFALr9WbdunRBCiLKyMjF16lQRHR0tTCaTSE1NFYsXLxY2m81hPufOnRMzZ84UoaGhIiYmRjz33HOiqanJoczOnTvF+PHjhdFoFEOHDlXeo7M33nhDDBkyRBiNRjF58mSxb98+VxZH2Gw2AaBL/fxd0pKtImnJVvHwfxWqXRUiIiKPc3f73afn9Pg7vT6nJ3lp2xGx21P7493Hp6hcGyIiIs9S5Tk9RERERP6CoYeIiIikwNCjY/KeuCQiIuqKoYeIiIikwNCjYwaD2jUgIiLSDoYeIiIikgJDDxEREUmBoYeIiIikwNBDREREUmDo0THesk5ERNSBoYeIiIikwNBDREREUmDo0TE+p4eIiKgDQw8RERFJgaGHiIiIpMDQQ0RERFJg6NEx3rJORETUgaGHiIiIpMDQQ0RERFJg6CEiIiIpMPQQERGRFBh6dIwPJyQiIurA0ENERERSYOghIiIiKTD06Bif00NERNSBoYeIiIikwNBDREREUmDoISIiIikw9BAREZEUGHp0jM/pISIi6sDQQ0RERFJg6NEx3rJORETUgaGHiIiIpMDQQ0RERFJg6CEiIiIpMPQQERGRFBh6iIiISAoMPTrG5/QQERF1YOjRMd6yTkRE1IGhh4iIiKTA0ENERERSYOghIiIiKTD0EBERkRQYeoiIiEgKDD1EREQkBYYeIiIikgJDDxEREUmBoYeIiIikwNBDREREUmDoISIiIikw9BAREZEUGHqIiIhICgw9REREJAWGHiIiIpICQw8RERFJgaGHiIiIpMDQQ0RERFJg6CEiIiIpMPQQERGRFBh6dEwItWtARESkHQw9REREJAWGHiIiIpICQ4+OGQxq14CIiEg7XAo9OTk5mDRpEiIiIhAbG4tZs2ahtLTUoUx9fT2ys7PRv39/hIeH44EHHkBFRYVDmbKyMmRlZSEsLAyxsbFYvHgxmpubHcrs2rULt956K0wmE1JTU7F+/fou9Vm7di2Sk5MREhKCtLQ0HDhwwJXFISIiIom4FHoKCgqQnZ2Nffv2IS8vD01NTZg+fTpqa2uVMs8++yz+/ve/Y/PmzSgoKMCFCxdw//33K9NbWlqQlZWFxsZG7N27F++88w7Wr1+PFStWKGXOnj2LrKws3H333SguLsbChQvx+OOP4+OPP1bKvP/++1i0aBFWrlyJL774AuPGjUNmZiYqKyv70h5ERESkV6IPKisrBQBRUFAghBCiqqpKBAcHi82bNytljh8/LgCIwsJCIYQQ27ZtEwEBAcJqtSpl3nrrLWE2m0VDQ4MQQojnn39ejBo1yuG9HnzwQZGZman8PXnyZJGdna383dLSIhISEkROTo7T9bfZbAKAsNlsLiy19iUt2SqSlmwVs39fqHZViIiIPM7d7Xefrumx2WwAgOjoaABAUVERmpqakJGRoZQZMWIEhgwZgsLCQgBAYWEhxowZg7i4OKVMZmYm7HY7SkpKlDKd59Fepn0ejY2NKCoqcigTEBCAjIwMpUx3GhoaYLfbHX70jLesExERdXA79LS2tmLhwoW4/fbbMXr0aACA1WqF0WhEZGSkQ9m4uDhYrValTOfA0z69fVpPZex2O+rq6nDp0iW0tLR0W6Z9Ht3JycmBxWJRfhITE11fcCIiIvJLboee7OxsHD16FBs3bvRkfbxq2bJlsNlsys/58+fVrhIRERH5SJA7/7RgwQJs3boVu3fvxuDBg5XX4+Pj0djYiKqqKoejPRUVFYiPj1fKXH+XVfvdXZ3LXH/HV0VFBcxmM0JDQxEYGIjAwMBuy7TPozsmkwkmk8n1BSYiIiK/59KRHiEEFixYgA8++AA7duxASkqKw/QJEyYgODgY+fn5ymulpaUoKytDeno6ACA9PR1HjhxxuMsqLy8PZrMZI0eOVMp0nkd7mfZ5GI1GTJgwwaFMa2sr8vPzlTLE5/QQERF15tKRnuzsbGzYsAEffvghIiIilOtnLBYLQkNDYbFYMG/ePCxatAjR0dEwm814+umnkZ6ejilTpgAApk+fjpEjR+KRRx7B6tWrYbVasXz5cmRnZytHYebPn48333wTzz//PB577DHs2LEDmzZtQm5urlKXRYsWYe7cuZg4cSImT56M1157DbW1tXj00Uc91TZERESkJ67c6gWg259169YpZerq6sTPfvYzERUVJcLCwsR9990nysvLHeZz7tw5MXPmTBEaGipiYmLEc889J5qamhzK7Ny5U4wfP14YjUYxdOhQh/do98Ybb4ghQ4YIo9EoJk+eLPbt2+fK4vCWdSIiIj/k7vbbIIS8Nzbb7XZYLBbYbDaYzWa1q+MxyUvbjojddlN/bHhiisq1ISIi8ix3t9/87i0dkzfOEhERdcXQQ0RERFJg6CEiIiIpMPQQERGRFBh6dIzP6SEiIurA0ENERERSYOghIiIiKTD06BhvWSciIurA0ENERERSYOghIiIiKTD0EBERkRQYeoiIiEgKDD06xuf0EBERdWDoISIiIikw9OgYb1knIiLqwNBDREREUmDoISIiIikw9BAREZEUGHqIiIhICgw9REREJAWGHh3jc3qIiIg6MPToGG9ZJyIi6sDQQ0RERFJg6CEiIiIpMPQQERGRFBh6iIiISAoMPURERCQFhh4iIiKSAkMPERERSYGhh4iIiKTA0ENERERSYOghIiIiKTD0EBERkRQYeoiIiEgKDD1EREQkBYYeHRPg16wTERG1Y+ghIiIiKTD06JgBBrWrQEREpBkMPURERCQFhh4iIiKSAkMPERERSYGhh4iIiKTA0KNjvGWdiIioA0MPERERSYGhh4iIiKTA0KNjfE4PERFRB4YeIiIikgJDDxEREUmBoYeIiIikwNBDREREUmDo0TE+p4eIiKgDQw8RERFJgaGHiIiIpMDQo2N8Tg8REVEHhh4iIiKSAkMPERERSYGhh4iIiKTA0KNjvGWdiIioA0MPERERSYGhh4iIiKTA0ENERERSYOjRMT6nh4iIqIPLoWf37t344Q9/iISEBBgMBmzZssVh+k9/+lMYDAaHnxkzZjiUuXLlCubMmQOz2YzIyEjMmzcPNTU1DmUOHz6MO++8EyEhIUhMTMTq1au71GXz5s0YMWIEQkJCMGbMGGzbts3VxSEiIiJJuBx6amtrMW7cOKxdu/aGZWbMmIHy8nLl57333nOYPmfOHJSUlCAvLw9bt27F7t278eSTTyrT7XY7pk+fjqSkJBQVFeGVV17BqlWr8Pvf/14ps3fvXsyePRvz5s3DoUOHMGvWLMyaNQtHjx51dZGIiIhIAgYhhNv3NRsMBnzwwQeYNWuW8tpPf/pTVFVVdTkC1O748eMYOXIkPv/8c0ycOBEAsH37dtxzzz345ptvkJCQgLfeegs///nPYbVaYTQaAQBLly7Fli1bcOLECQDAgw8+iNraWmzdulWZ95QpUzB+/Hi8/fbbTtXfbrfDYrHAZrPBbDa70QLalLw0FwAwZWg0Nj6ZrnJtiIiIPMvd7bdXrunZtWsXYmNjMXz4cDz11FO4fPmyMq2wsBCRkZFK4AGAjIwMBAQEYP/+/UqZqVOnKoEHADIzM1FaWoqrV68qZTIyMhzeNzMzE4WFhd5YJCIiIvJzQZ6e4YwZM3D//fcjJSUFp0+fxgsvvICZM2eisLAQgYGBsFqtiI2NdaxEUBCio6NhtVoBAFarFSkpKQ5l4uLilGlRUVGwWq3Ka53LtM+jOw0NDWhoaFD+ttvtfVpWIiIi8h8eDz0PPfSQ8vuYMWMwduxY3HTTTdi1axemTZvm6bdzSU5ODl588UVV60BERETq8Pot60OHDkVMTAxOnToFAIiPj0dlZaVDmebmZly5cgXx8fFKmYqKCocy7X/3VqZ9eneWLVsGm82m/Jw/f75vC0dERER+w+uh55tvvsHly5cxcOBAAEB6ejqqqqpQVFSklNmxYwdaW1uRlpamlNm9ezeampqUMnl5eRg+fDiioqKUMvn5+Q7vlZeXh/T0G1+4azKZYDabHX70jM/pISIi6uBy6KmpqUFxcTGKi4sBAGfPnkVxcTHKyspQU1ODxYsXY9++fTh37hzy8/Nx7733IjU1FZmZmQCAW265BTNmzMATTzyBAwcO4LPPPsOCBQvw0EMPISEhAQDw8MMPw2g0Yt68eSgpKcH777+PNWvWYNGiRUo9nnnmGWzfvh2vvvoqTpw4gVWrVuHgwYNYsGCBB5qFiIiIdEe4aOfOnQJAl5+5c+eKa9euienTp4sBAwaI4OBgkZSUJJ544glhtVod5nH58mUxe/ZsER4eLsxms3j00UdFdXW1Q5kvv/xS3HHHHcJkMolBgwaJl19+uUtdNm3aJIYNGyaMRqMYNWqUyM3NdWlZbDabACBsNpurzaBpSUu2iqQlW8WDv9urdlWIiIg8zt3td5+e0+Pv+JweIiIi/6Op5/QQERERaQ1DDxEREUmBoYeIiIikwNBDREREUmDoISIiaVht9Ri98mN8fu4KGptb1a4O+RhDDxERSePZ94tR09CMH79diGHLP8KFqjq1q0Q+xNBDRETS+KLsqsPf73/OryOSCUMPERGpqrVV4GRFNXzx2LgwY6DD3xEhvX/v9rELdiQvzUXqC9u8VS3N+duXF3Dn6h1oaG5RuyoexdDj55KX5iJ5aS6Kvr7ae2EiIg36978U4Z9+sxvz3jmodlW6lXes7cutm1vleZbv/3rvEM5fqcOf9pxTuyoexdCjUS2tApsPnkd1fVPvhQH86bOzXq6RfB75434kL83F3768oHZViHStPVTsOFGpck26d7GmXu0qqKbUale7Ch7F0KNRv8w9hsX/fRhTV+90qvyw2Agv10g+n568BKBtj0cNL/69BNkbvlDlvYlk4U9fxCSEwCN/3I8/F57z2XsGBeorJuhraXRk88FvAABXrzl3pMfffe+lT/CT3xWqXY1uDYoM9fl71jQ0Y91n55B7uBxXaxt9/v6d/a7gNJKX5vIuF51rkejUjb/aVXoRn568hBUflvjsPf0pFDqDoUej+pkCey+kE8cu2HH1WhMOnL2idlU0o/PzQ5pa1H2WSM5HJwAA/zv3uKr1oN7VN7Xgw+JvXb4g+H+9dwg3vbAN/zv3GG7LyUddY8fFq5+evIi/6/gUr8Ggdg2cV3blmtpV6JXVVq/6mNUThh4fW7jxEJKX5uLcpVq1q6IZzl63ROqqa9LXXRx6NOI/t+OZjcVYsMG1U7Lt163916dnccFWj7d2nVKmPfLHA3j6vUOorJb3uhat0HpA23miElNy8vHous/VrsoNMfT42JbitsHlkT/tV7kmRK4J0PiASx1yj5T36f+/qqjp8lp5lT5Cj87O1nidK0Hr97vPAAD2nLrkpdr0HUOPFwkhkLw0F8N+/lGXaUnR/Xr5X9feS+t7AKQu27UmJC/NxVN/KVK7KuQDcWZTn/4/KLDrgOKrMcZbz+oRQnQ7b71ds9KbR9cdwD1rPvXKvLtbb7SGoceLLn93AWpjSyvsXj6FI1vHpa6EEPikxIoKe9c98v/5ou3C+I+OWpUwnrw0l9891I1rjc1K+/jiYXnkaPZ/7fP4PMttdUhZtg0zXvsU2t8s39i3fbyZ4EptI3aWXsSxcjtqGpo9VKs2vys4rdzxqmUMPV7U+W4I4UfblrxjFfjXP+z32IAvhMAf95zF4W+qPDI/T7lQVecXGzVna1h4+jKe/H9FSPs/+V2mdR4saztdpOoPF0b6WudTO53bSjbt4djX9p3x3A0NQghcqmnAX/Z9DQAorajuUsbVI1j7zlzG//tufr7U0NyC3xWc6dM8Ou/kNHj4Gr32Gx60rvfnb5NHCD86k/zEn9ueivqfHx7FL2eN6fP89py6hF9sPQYAOPdyVp/n5wl/+PQMfpl7HEtnjsD879+kdnW6cCeM7frqosv/49p1Ov68j+w8fwjCvlBhb1C7Cn02/y9F+LikwqOPnXjo921HoobG9MPtqTEem29vahvkDeCexCM9XtTTJqK3vQtPDLt9HbuveaiTffF1lUfm40lv7my7O+VlP9g7cTZqyBFJyFdadRD+Pi5pe9Jz5yOd1y+Vu4t5qrLrxd7exP7tGQw9KvH0eOKNiwxNwZ55VlCoUXurWZUkD30kIu/Q2xdx3ojewpb2tkY60pdc44kVjXd0UW/8f1+eyDUcFuXG0OMjBnY1chODiVxOWO1IXpqLtTtP9V7YSwwGgzTrHXcO5cLQoxM6OP2uaGkVyD9eAVsdT0G5xMnB290Ldblx8I1n3isGALzycam6FdEpfx0qPVHvX+dxneLdWyrxxYXM/mrNP77C6zva9nK1creXr/nq82eO0Z4Qo/rfuyfTHWwSLSo2ffdF1jLjkR6d0PJeuKuDyl/2l3mnItSFKx+NTBsHNWm4K5OOONudtbxtcQdDjxf1tJHgBuTGxg62qF0FIuldf7RHL9cl+nIp9HDETAeL4IChR8f0trLKip8jaYE/PWDVFd46krHl0LeY8Mt/oOhrzz1hmvqOoUejuKEjrdHbYW6Sk6ceTtibhe8X40ptIx5/56B33oDcwtCjEl9sQLiRou4wTztPD6cnSF2tXIU0haFHo/QUWPS0LEQyMBgMMFzXcfVyTY+/YgD3DIYeL9LrOXDyPnfGN26UyJOu38jqZTzzdS/x97Cit51Whh6V+Hk/cIlMy0pE2nb9cOTuRp3jmn9i6PGiznveru4lsUNRO63sYetsh48IAMda2TD0qMQXhwzZmak3XEeoO92dkuHpU0d6O+0jC4YeL/LlHjr7HzmLQYecwfXEM7TajLJ+vgw9pClCCGk7oxq4t6o9Wl39tXKata88tcr7epxSq/X1doSPoUezXLwGqJvXuEEjT+L6RHqgj+hG7mLoIc3hxtVxr9qbe5SuzJtH4HxDC6v/9c/okZ3ejnbIjKGHSCe4nSJvknnD36dTe9xZ0BSGHi/q256xawOMXoYjHk0g0ia9XNNDrtHb587Qo2MMENQtrhc94qkdfeOnKzeGHs3ilom0RcYswF6oP/76maq1E6u305oMPUQa5+xYp6+hibRGbxs/X/HXkKVXDD2kKRwgvuNGQ7DtPMPfvyCSyBl6u1bHWQw9XiTnKkVEfaHVcUPWjSTpC0OPRnliZ1PGazCIiIhuhKHHi5g52nD/0DecXd/c3WPnNR2+odVW5ufvHn8/Xaq3nWeGHh9RY733x77m7wMEkV6wK3bQQuDj6UXPYOgh1fG5KD1jECQtkHmjK/Oy6w1Djxf1pZu4mgP8OTdwo05ERL7A0KNRruYA5gZ98d3HyRWHnKOFUzz+iD1MWxh6yOtcGSo5QBARkbcw9JDqeE0PaQnXR3KG3z8p3ckF0Ft3YOjRMa2srL31LV7T0zNPNw+bW9v48VC3VFox9DZeMPR4UV825q7+Z3cBR28rK5GvydqHNLK/pGlsI//E0EOaIutGxhO0cmSP+kYLH6MAb9MmfWLoIdXxGoqutBj++DERdXD6mh52HE1h6NEJLW4kncVrekhL1F4f2Ru0R+bb9fWW2Rh6NEpn6xlpFPMmUe94qk8/GHo0yhMXMvsjDi7uc3ZvlC1MzvBlIFZz/HL/C3h9i/3WMxh6yOt6Gzx5ztsz3Bm82fTaw4/EPzCE+CeGHlKd2tdQkHMYkIjI37kcenbv3o0f/vCHSEhIgMFgwJYtWxymCyGwYsUKDBw4EKGhocjIyMDJkycdyly5cgVz5syB2WxGZGQk5s2bh5qaGocyhw8fxp133omQkBAkJiZi9erVXeqyefNmjBgxAiEhIRgzZgy2bdvm6uL4jMybdW4siZynhbHCAHn6rawXKWthPVODy6GntrYW48aNw9q1a7udvnr1arz++ut4++23sX//fvTr1w+ZmZmor69XysyZMwclJSXIy8vD1q1bsXv3bjz55JPKdLvdjunTpyMpKQlFRUV45ZVXsGrVKvz+979XyuzduxezZ8/GvHnzcOjQIcyaNQuzZs3C0aNHXV0kTZL16Ieki92FW6eq3Bi82d6kBXLGDn+hr08nyNV/mDlzJmbOnNntNCEEXnvtNSxfvhz33nsvAODPf/4z4uLisGXLFjz00EM4fvw4tm/fjs8//xwTJ04EALzxxhu455578Ktf/QoJCQl499130djYiD/96U8wGo0YNWoUiouL8etf/1oJR2vWrMGMGTOwePFiAMAvfvEL5OXl4c0338Tbb7/tVmN4GjcozuE1PT3TynqklXp4m9rro1Z6gyyft7dvntDK50ltPHpNz9mzZ2G1WpGRkaG8ZrFYkJaWhsLCQgBAYWEhIiMjlcADABkZGQgICMD+/fuVMlOnToXRaFTKZGZmorS0FFevXlXKdH6f9jLt79OdhoYG2O12hx9Sn6xHtdTC9qbe+HoN4RrZO3Zbz/Bo6LFarQCAuLg4h9fj4uKUaVarFbGxsQ7Tg4KCEB0d7VCmu3l0fo8blWmf3p2cnBxYLBblJzEx0dVF9Bi19yaJANeu2+Aq6xvctmmPrNf96JFUd28tW7YMNptN+Tl//rxqdZFpb1uiRVWVO6GEn00v2D4+oWakYKCRi0dDT3x8PACgoqLC4fWKigplWnx8PCorKx2mNzc348qVKw5luptH5/e4UZn26d0xmUwwm80OP6Q+HvUi0h695r3rg7671/Rwh8E/eTT0pKSkID4+Hvn5+cprdrsd+/fvR3p6OgAgPT0dVVVVKCoqUsrs2LEDra2tSEtLU8rs3r0bTU1NSpm8vDwMHz4cUVFRSpnO79Nepv19ZOPPwUGmo17ucHZQZjPqg//2ZP3ik+L1w+XQU1NTg+LiYhQXFwNou3i5uLgYZWVlMBgMWLhwIX75y1/ib3/7G44cOYJ/+7d/Q0JCAmbNmgUAuOWWWzBjxgw88cQTOHDgAD777DMsWLAADz30EBISEgAADz/8MIxGI+bNm4eSkhK8//77WLNmDRYtWqTU45lnnsH27dvx6quv4sSJE1i1ahUOHjyIBQsW9L1VfMCfQwp5HwMMqam70UkvQ5anlkMv7SEbl29ZP3jwIO6++27l7/YgMnfuXKxfvx7PP/88amtr8eSTT6Kqqgp33HEHtm/fjpCQEOV/3n33XSxYsADTpk1DQEAAHnjgAbz++uvKdIvFgk8++QTZ2dmYMGECYmJisGLFCodn+dx2223YsGEDli9fjhdeeAE333wztmzZgtGjR7vVEP5OL0dLdLIYqnB2EBY3+L3X+UtyDELtvqTVLuDNZjEYDOz8PiZrc7sceu66664eBwWDwYCXXnoJL7300g3LREdHY8OGDT2+z9ixY/Hpp5/2WObHP/4xfvzjH/dcYT8l0/p4/VEvWTsjEfmeu2He1+MUT7F5hlR3b2mJ2nuTvsTDwETOY3fxLk9dyOw0fqCawtCjE/58jdD1AdCPF4VIt9gv5aS3z52hRyW+CClaOZbkykEtHsLtypsHBSU64Og3tPqRePWaHu/Nuut78UJmqTH0kOr8+SiVt7izffF6K0ryMXF9bFv/ZDoF35vurvth8/gnhh4fUWMA4dBN3XF7VeQgTzpw/frv7bsSOQ5rC0MPqY57lJ7BVtQHLWwk9fycnuv5yyl1DpOewdDjRVxJXcc2IyKt6Usw8vchTW9Zl6GHVMdrKHrm7KDpTiv6y16uWtRoH61+InrZIeFw00bWvs/Qo1F6GWBIR7ixkAqHIM9gt9EWhh7yut72KHhND5H26eUICYcb1+ituRh6dMwfV1Z/rLM3MAjKSyfZgkiTGHq8SNZzpq7id295iJO74p3XS7a19mjlI7l+bfLuF456b95qvpcnaWW98HcMPSrx037nFlm+nZv0gUfZ2rAVPMPfb9Tw79p3xdCjEl8MKP6ysvK7t3rm9EbYyxtrfizy6K4PytwvueOmHww9XsSO4jruZZPstDBq6Lkb6nnZqHcMPaQ6fz/8qxlutCM3AF1xffQ9f9xB5DWb/omhx4t66hT+18Xdx8GByHla6S16DcTuZFqOYfrB0KNRnjjN4y/dlKe0uvJqk7C5ifrM10enPD1OOjs7vR34ZOghTeH2uCu2CZHncB9Lbgw9PnJ9P2O/68BrKDyDragPUn6OUi40qYGhh0hivFahZ2ocFdDOJ+LDmmhnoZ3mbN/hPp22MPR4UU8Dpi/6AfsaEblDzxtqPS8b9Y6hR6P8cMfHI3i+Xbt4GlI2/LxJfxh6iDTO00GQuVLbpIwaPlxod/pTd3dq+f7uLZ++nW4x9GiUJ7oT+wj1hgMpdYfrRe+cvqbHy/Ug1zD0EBFpiHbyhnZq4s/YitrC0ONFfVnZpe0o0i44EfkCL01rI+tQy9BDpBPeHsy5rSBv8eW6df2pO2dO5fHRDvrB0ENEpCEMl/ri75+nP34ZbE8YenRMX6uqzNT5zh1ShxY+HoNBnvWEp7vkwtCjEpk6miuDJw8jt9HiBkeDVSIicglDjxf19K24vW7UXNzCdBeiuJEiIn/gy51AmXY4qSuGHiKJuXJEidsK0gN3LmTW23UtMmPo0Qktng4h8ndqdCutbl45xriHX9+iLQw9WqWjfuJKn+fA2hXbRC78uH3L25mkp8sc/IHeMhtDjxd1XtWvX+97XZH8u5+QhvFiceqNn2+nyQn+HsbcxdCjE3pL4+Q6XndA5Dp3t/2+zgySZhSPY+hRiS9WYG4C/Zevjsa48j4M1r6hhWY2GHiwubO+9Ede06MtDD06ppVBy7Xn9BDJTat9gKdFHTHL+CeGHiKiTrgtkwvDi1wYeryopyMcvXU07lNRO64LvsX29j1/vB6N19j4J4YeIsl0Hqw5cJPsvN0H/C/O6RtDD2mKrLdR+gMO3r6hlXZ258nFeqWFI1G8psozGHpU0tsAon4XIzXJvIGRHT96/+DstUD8PLWFoUej2FHIVV5/sqx3Z08awy8B7Zm/75j4e/3dxdDjVTdeq/yxk5M6vDk4STruOY2nW/XP22Oxvw/1/l7/6zH06IQWzjl7AjcxJDut9GRZ8p4sy0ltGHp0ghe5kbdpZWOsd1rtyVqtly9wfNUPhh7SHO55eRebl7SGgbp3HBc9g6FHo3gtARHJQs+jHa/f1BaGHi9yeAicrru15wjBQQJw3Ah4c91huKYb4ZhFgP6+MJWhRyf8+UJmbnhJS/y3J3kX+ynpAUMPkU54e2Ottz0+0g6tr1n+vFPZV3oLuww9REREJAWGHpV4er9BL+ff9bIcWtZ5z42t3RXbpG180tkOvt/jx+EZDD1e1NNKyhW4A9uCiIh8gaFHJ2Q+56xHDkdjmArJx7pb5by5GvJ6Me3S22fD0ENEROQ1+goN/o6hR6Ok3bmXdsHVwaNIPWPzEMBrDfWEoUclzP4duOEl0h699ku93IKtl+XwNYYeL1J9nVT7/d2kervpHNuX3MH1Rl0MOZ7B0KMSrr7kLGfHOp1db0gS8eWq6+sLc9kvtcXjoWfVqlUwGAwOPyNGjFCm19fXIzs7G/3790d4eDgeeOABVFRUOMyjrKwMWVlZCAsLQ2xsLBYvXozm5maHMrt27cKtt94Kk8mE1NRUrF+/3tOL4v/8sLMJcJDwLefjNz8WefCzdtSXu2N5gEZbvHKkZ9SoUSgvL1d+9uzZo0x79tln8fe//x2bN29GQUEBLly4gPvvv1+Z3tLSgqysLDQ2NmLv3r145513sH79eqxYsUIpc/bsWWRlZeHuu+9GcXExFi5ciMcffxwff/yxNxbHKziodOBFgl2xRUhtvuyXXN9d19cwJWsYC/LKTIOCEB8f3+V1m82GP/7xj9iwYQN+8IMfAADWrVuHW265Bfv27cOUKVPwySef4NixY/jHP/6BuLg4jB8/Hr/4xS+wZMkSrFq1CkajEW+//TZSUlLw6quvAgBuueUW7NmzB7/5zW+QmZnpjUUiIiId4LUxcvPKkZ6TJ08iISEBQ4cOxZw5c1BWVgYAKCoqQlNTEzIyMpSyI0aMwJAhQ1BYWAgAKCwsxJgxYxAXF6eUyczMhN1uR0lJiVKm8zzay7TPQysc9pTYz8jL9PYQMdIa7w1iel5z2S21xeNHetLS0rB+/XoMHz4c5eXlePHFF3HnnXfi6NGjsFqtMBqNiIyMdPifuLg4WK1WAIDVanUIPO3T26f1VMZut6Ourg6hoaHd1q2hoQENDQ3K33a7vU/LqiV66VjcCXMf92CJeuevOwfs3Z7h8dAzc+ZM5fexY8ciLS0NSUlJ2LRp0w3DiK/k5OTgxRdfVLUOzvLI9ou9RBe8eW2FS+uZf24ryA0C3AHROn487vH6LeuRkZEYNmwYTp06hfj4eDQ2NqKqqsqhTEVFhXINUHx8fJe7udr/7q2M2WzuMVgtW7YMNptN+Tl//nxfF08z/HqA8ue6k+7wiJlcnPm0ebOFfng99NTU1OD06dMYOHAgJkyYgODgYOTn5yvTS0tLUVZWhvT0dABAeno6jhw5gsrKSqVMXl4ezGYzRo4cqZTpPI/2Mu3zuBGTyQSz2ezwQ6QX/nrYnvyDV7OgD1ddhlq5eTz0/Md//AcKCgpw7tw57N27F/fddx8CAwMxe/ZsWCwWzJs3D4sWLcLOnTtRVFSERx99FOnp6ZgyZQoAYPr06Rg5ciQeeeQRfPnll/j444+xfPlyZGdnw2QyAQDmz5+PM2fO4Pnnn8eJEyfw29/+Fps2bcKzzz7r6cXpE9X7lh9uA7lH1Ub1dYek5odDh9u8vawytaU/8Pg1Pd988w1mz56Ny5cvY8CAAbjjjjuwb98+DBgwAADwm9/8BgEBAXjggQfQ0NCAzMxM/Pa3v1X+PzAwEFu3bsVTTz2F9PR09OvXD3PnzsVLL72klElJSUFubi6effZZrFmzBoMHD8Yf/vAHqW9X504+uYPZqmcMn/rDI6Jy83jo2bhxY4/TQ0JCsHbtWqxdu/aGZZKSkrBt27Ye53PXXXfh0KFDbtWRtIXblZ55esPLDTmRa/r0RGYP1sNhvkKgL8eRnD2qrreMyO/eIiIiVflyu3r9NT3+sg/AnRXPYOjRM410ElcuHGTHJtImdk336OxAid9j6PEibsBJ67iOkuwYSuTC0KNRMt3FxA1vd7TXKH25roH8j177pV4uZNbpx+N1DD1ERKQqfwwifN6Pf2LoUYlPOrlGxhFXlpXDCJE2eXMb78sA4c6FzH058u6Hec6B3o7wMvSohHsJpBaZTp0S6Qf7rScw9HhR542Lq6urTJmIG2H1MHzTjcjSL909jqH2KTl2Xfcw9OhEt91PI52CG1Z98PfD9ESAe2Glu1M8soxregu/DD0q6a3judov9bJayjKQeANDiWdwDfQ9Xx418deHE3qarEMtQw+RBnUekJwdnLw9iMk6SFIb7pDIiRcyE3kYx1LPc3YDxabvmd4O7buC/VLbZF43+4Khx4t6GjR62yhxwCFv4bpF1MFfjmOw33oGQ49OdNtx/aU3d8KO7T5vXxbBa4bk4es7k3z5dj5fNn8ciHWMoUclat/uSETkCr3sj/BCZrkx9OgZe7MuuHPuXisXP5N/EoJXjJA+MfRolMsPM/RKLXzDn+tORPqnxQjoqx0WvZ2UYOgh0iB3xjNeO0D+Ss0119vvrbfQ4O8YejTK1X7CfkXOcncHkesY6ZEz/aEvOxSeOiLj6QM72jt25RsMPUQ64da1P9IOfeQqr37LuvdmTeSAoYdUxye9eh5blPqK/bJnbB7/xNCjUR7pT354PoIDSVfOtgmv6SFP8f2zbPTL36/p8fPqd8HQ40XufH+SZyugwnt2g0GGiLTK3Y26v4cZZ+lt+GboIdIgLQZFaQZ5Dba9Fuj1+i99LhXdCEOPSiTZfjjl+kGHG52+6+l6DIdpLrS1lJ+LjMv8HYkX3Sm+7g/Xv5+U/dEDGHpU0uv6KukKrde9SV+Q5UgM6Y+ev5ZHv0vmnxh6SHN0PP65hTGQfI13bjnizph+MPR4ETtKG4YYItIbZ8c1rW4FnA22ehu+GXpU0uuK5OKa5tfBQqujgh9ztkldaXq/XsfIZV22iRL3Uy0+DoI71e5h6NEqHa3Prhwp51F1IvXxOT29c/75WaQlDD06wbCgL97ci+OqQuR/eGTHMxh6VMLVl5zFi0qJiDyDoceLuK1yDvdgPM/ZdY/rKN2Y6OEvUvsaN1/1XbWX09MYelTi6fVILysmB1b3eXsV0OLFnERq4Q6Df2Lo8YLpvylA8tJcXKxucHsePPpBRLJo0XGC0PODF/1RkNoV0KOvKmoAACv/VqK8tib/JGaNT1D+zj9RifqmFgQFGNAq2o7UnCivRkRIEAwGoKmlYxC4UtuI6vomRIYaYQgAIkxBKLlgdwhVpypr0NzSiqq6JuW1y7UNqG1oRnV9Myyhwbhc2wCDwYB4cwjKrlyDra4JMeFG5B4uR3JMP/xgRCzKq+qV/z93qRZ//eIbJEaHwRIajJsGhKOlVeCrimpYQoNhCg6AKSgQzS2tsNc3IzI0GAYDEBIciI9LrACA9KH90dTSqszzYnUDSi7YcHtqDI6X23GtsQUl39qV6V+er8KJ8mrl73WfncXw+AhEhhoRbgrCpdoG1De2oLK6AcGBAdhz6iLuGh6L6vpm/GBELM5fuYYLVXUYPciCgZYQGAwG1De14NzlWpR8a8fdI2LRzxTo8Hk9t+lLPP2DVOwqrYStrhmjB5kxISlKmf5tVR2stnoct9oxZpAFV2sbERQYgMs1be0fERKM+qYWxFtC8M3VOrQKgYTIUMSbQ3Duci2+vlyL21NjcLKiBuevXMPoQRaEGQMRagxEU4uAOSQIVns96hpb0M8UhJCgQDR3+vyPfGtDqxAYPcgCIYC9py9hzKBInKysRnL/fojuZ4StrgnHyjvasaG5BUe+tSE2wgRTUADqm1o6ludqnfJ72+cbCmNQABqa2j6nsivXkNQ/DA3NrRga008pW1ldjz0nL2F4fASCAw0wGAwIMwYiKMCAqmtNaBUCocZAhAYH4sylWuw9fRmTk6NRduUaBkWG4lpjM0KCA5HUPwyNza1oFcCxcjsSo0LR0ioQFBiAgZYQFH19FROTo3CxugGDIkNRfL4KAQYDjEFt0z8svoCIkCAkRIYizBiIwVFh+OLrq2hubcXNcRFoamnFgHATzKHBuFjdgC/KrqKfMQhjBlvwzdU6VNrrca2xBYfKruInkxIxcqAZDc2t+MOnZ9DUInBTbLiyzAe/vooKez3GDLJgcFQY9p+9jAHhJjS3CtQ2NCNzVDzOXa5FdX0zSiuqcdOAcPQztdVp/5nLmJQSDVNQAIQATl+sUfr494cPgL2uGecu16J/PyOS+veDEAKtAqhv7Pis7PVNaGhqxZmLNcp6U3WtCaHGQHzx9VVsOngez/7TMPQzBWHLoW87/q+uGScrqjE4KgznLteiwl6P1NhwpU2t9nqcsFZj1vhBDv2z3f4zlxEVZnR47Ze5x/HQpET8y4TBCDAYEBIcgBPWauQeLkfmqHiMSjCjsaUVtQ3NCP2unl9VVKPo66vIGjsQ9rpmDB3QDycranChqg7D4yNw/so1BAcGoOpak8N7Xa1txMnKGsSZTSg+X4WbYyNwc1w4hABsdW3rWnV9E5L790O5rR71TW19Z6AlBA3Nrdh6uBy3p/bvslxtbdrs8PflmgbUN7XAXteEEGMgauqbERTYFlTKLl/D+avXcOZSTZf5vJr3FVJjwzFlaH9YQoNxqaYBNQ3NKLfV46uKjjHs26o6vHegDDfHhuNSTSMGWkIQZgxEnCUEZy7W4sDZy7hloBmTkqPR0NSKb6vqEBNuhDGobXytbmjCp19dQj+T4+b6ck0jzl6qhSU0GLFmEyrtDTh1sQbjBkfiQlUdTEEBuNbYgkFRoTh47iqGDujn8P//88U3+JcJiQgNDkRTSyuaWwXe3fc1Hpgw2KHsmYu1+PJ8FUYlmHGlthG7vrqICUlRMAYG4GJNAyJDg2EODe7SPheq6tDY3IrkmH5dpqnJICS+StJut8NiscBms8FsNntsvslLcz02LyIiIn/21S9nwhjk2RNL7m6/eXqLiIiIvOZkZXXvhXyEoccL/uep25TfJ393ePv21P64e/gAAMAdqTHd/l9y/zCHvx+/IwUBNzgdHG4KQky4CT+/5xaYQ5w7Sxlu6r7co7cnK79bQoMRGGDAiPgIAMD3hw1wOC0HAPd/b5BDeQAIDXY8ZQQAgyJDu32/zok/IiQI0f2MGJVgxnP/NMyhnCU0GPeMicc/jx2I9KH98fQPUvHCPSPwr1OGYHhcBObdkYIfjUu4fvaI7mfs8lpPHk4bAgCYMSoePxqXgNmT2/4eGtMPizOHK/VNS4mGJTQYN3c6/eGKBEuIw2HjOLOp23JDnTgcfN/3BuHl+8d0O21ScpTD349MSQIATE6OxrjESIwcaMa4xMhuPx/Td8saHGjAD0bEwhQUgJjwjnqOHmTG7an90b+XNh4a0w+33dR2emHm6HgMj4tQ2hkAUjotY4QpCJOSo5R1PbWb9p02IlZZJ2PCTRga089hOW8Z2LGn1/n1iUlRmJQchZDgrkNd+tDuT38MiDAp6wAAPP2DVPQzOq7f/YyBGBbXVs9nM4ZhUnKUw/IBgDGw7T0t3Rz6B4BRCWaHvtSbycnRDn9bQoNx/63O/X/ny0ra+2qYsWufjQk34fE7UgC0fdYA8K9ThuDWIZG4d3wCXv3xOIf1oV17+1+/Tt0cG44/PzbZ4bVBkaGYPTkRd6TGwBgYgJ9MHKx8ZiMHdt1jN4cEdXuUIDaiox7tbd1dncYNtijrTuflHjPIgoxbYrv8nzOS+odh5uh4/Mf0jjFrfGIk4s0h+OltycpYe8tAs8O6tzDjZowZZHGY1/XrQHfj+V3fbTuAtu3EtBFt9f6nkXFKm/fvZ1Rebxf83Wm69m1PP2MgfvfIBKTE9MNNA/o5tMuDExPx09uSlb8jw4Jx04CuY9G/TBisTL++X7X3qc6f15qHxmNUguMyq4mnt7xweouIiIi8h6e3iIiIiHrA0ENERERSYOghIiIiKTD0EBERkRQYeoiIiEgKDD1EREQkBYYeIiIikgJDDxEREUmBoYeIiIikwNBDREREUmDoISIiIikw9BAREZEUGHqIiIhICl2/w14i7V8wb7fbVa4JEREROat9u92+HXeW1KGnuroaAJCYmKhyTYiIiMhV1dXVsFgsTpc3CFdjko60trbiwoULiIiIgMFg8Nh87XY7EhMTcf78eZjNZo/NlxyxnX2Hbe0bbGffYDv7hjfbWQiB6upqJCQkICDA+St1pD7SExAQgMGDB3tt/mazmR3KB9jOvsO29g22s2+wnX3DW+3syhGedryQmYiIiKTA0ENERERSYOjxApPJhJUrV8JkMqldFV1jO/sO29o32M6+wXb2DS22s9QXMhMREZE8eKSHiIiIpMDQQ0RERFJg6CEiIiIpMPQQERGRFBh6vGDt2rVITk5GSEgI0tLScODAAbWrpJrdu3fjhz/8IRISEmAwGLBlyxaH6UIIrFixAgMHDkRoaCgyMjJw8uRJhzJXrlzBnDlzYDabERkZiXnz5qGmpsahzOHDh3HnnXciJCQEiYmJWL16dZe6bN68GSNGjEBISAjGjBmDbdu2uVwXLcrJycGkSZMQERGB2NhYzJo1C6WlpQ5l6uvrkZ2djf79+yM8PBwPPPAAKioqHMqUlZUhKysLYWFhiI2NxeLFi9Hc3OxQZteuXbj11lthMpmQmpqK9evXd6lPb+u/M3XRqrfeegtjx45VHraWnp6Ojz76SJnOdva8l19+GQaDAQsXLlReYzt7xqpVq2AwGBx+RowYoUzXZTsL8qiNGzcKo9Eo/vSnP4mSkhLxxBNPiMjISFFRUaF21VSxbds28fOf/1z89a9/FQDEBx984DD95ZdfFhaLRWzZskV8+eWX4kc/+pFISUkRdXV1SpkZM2aIcePGiX379olPP/1UpKamitmzZyvTbTabiIuLE3PmzBFHjx4V7733nggNDRW/+93vlDKfffaZCAwMFKtXrxbHjh0Ty5cvF8HBweLIkSMu1UWLMjMzxbp168TRo0dFcXGxuOeee8SQIUNETU2NUmb+/PkiMTFR5Ofni4MHD4opU6aI2267TZne3NwsRo8eLTIyMsShQ4fEtm3bRExMjFi2bJlS5syZMyIsLEwsWrRIHDt2TLzxxhsiMDBQbN++XSnjzPrfW1207G9/+5vIzc0VX331lSgtLRUvvPCCCA4OFkePHhVCsJ097cCBAyI5OVmMHTtWPPPMM8rrbGfPWLlypRg1apQoLy9Xfi5evKhM12M7M/R42OTJk0V2drbyd0tLi0hISBA5OTkq1kobrg89ra2tIj4+XrzyyivKa1VVVcJkMon33ntPCCHEsWPHBADx+eefK2U++ugjYTAYxLfffiuEEOK3v/2tiIqKEg0NDUqZJUuWiOHDhyt//+QnPxFZWVkO9UlLSxP//u//7nRd/EVlZaUAIAoKCoQQbcsRHBwsNm/erJQ5fvy4ACAKCwuFEG3hNCAgQFitVqXMW2+9Jcxms9Kuzz//vBg1apTDez344IMiMzNT+bu39d+ZuvibqKgo8Yc//IHt7GHV1dXi5ptvFnl5eeL73/++EnrYzp6zcuVKMW7cuG6n6bWdeXrLgxobG1FUVISMjAzltYCAAGRkZKCwsFDFmmnT2bNnYbVaHdrLYrEgLS1Naa/CwkJERkZi4sSJSpmMjAwEBARg//79SpmpU6fCaDQqZTIzM1FaWoqrV68qZTq/T3uZ9vdxpi7+wmazAQCio6MBAEVFRWhqanJYthEjRmDIkCEO7TxmzBjExcUpZTIzM2G321FSUqKU6akNnVn/namLv2hpacHGjRtRW1uL9PR0trOHZWdnIysrq0tbsJ096+TJk0hISMDQoUMxZ84clJWVAdBvOzP0eNClS5fQ0tLisAIAQFxcHKxWq0q10q72NumpvaxWK2JjYx2mBwUFITo62qFMd/Po/B43KtN5em918Qetra1YuHAhbr/9dowePRpA27IZjUZERkY6lL1++d1tQ7vdjrq6OqfWf2fqonVHjhxBeHg4TCYT5s+fjw8++AAjR45kO3vQxo0b8cUXXyAnJ6fLNLaz56SlpWH9+vXYvn073nrrLZw9exZ33nknqqurddvOUn/LOpHeZGdn4+jRo9izZ4/aVdGt4cOHo7i4GDabDf/93/+NuXPnoqCgQO1q6cb58+fxzDPPIC8vDyEhIWpXR9dmzpyp/D527FikpaUhKSkJmzZtQmhoqIo18x4e6fGgmJgYBAYGdrmivKKiAvHx8SrVSrva26Sn9oqPj0dlZaXD9ObmZly5csWhTHfz6PweNyrTeXpvddG6BQsWYOvWrdi5cycGDx6svB4fH4/GxkZUVVU5lL9++d1tQ7PZjNDQUKfWf2fqonVGoxGpqamYMGECcnJyMG7cOKxZs4bt7CFFRUWorKzErbfeiqCgIAQFBaGgoACvv/46goKCEBcXx3b2ksjISAwbNgynTp3S7frM0ONBRqMREyZMQH5+vvJaa2sr8vPzkZ6ermLNtCklJQXx8fEO7WW327F//36lvdLT01FVVYWioiKlzI4dO9Da2oq0tDSlzO7du9HU1KSUycvLw/DhwxEVFaWU6fw+7WXa38eZumiVEAILFizABx98gB07diAlJcVh+oQJExAcHOywbKWlpSgrK3No5yNHjjgEzLy8PJjNZowcOVIp01MbOrP+O1MXf9Pa2oqGhga2s4dMmzYNR44cQXFxsfIzceJEzJkzR/md7ewdNTU1OH36NAYOHKjf9dmly56pVxs3bhQmk0msX79eHDt2TDz55JMiMjLS4ep2mVRXV4tDhw6JQ4cOCQDi17/+tTh06JD4+uuvhRBtt4lHRkaKDz/8UBw+fFjce++93d6y/r3vfU/s379f7NmzR9x8880Ot6xXVVWJuLg48cgjj4ijR4+KjRs3irCwsC63rAcFBYlf/epX4vjx42LlypXd3rLeW1206KmnnhIWi0Xs2rXL4dbTa9euKWXmz58vhgwZInbs2CEOHjwo0tPTRXp6ujK9/dbT6dOni+LiYrF9+3YxYMCAbm89Xbx4sTh+/LhYu3Ztt7ee9rb+91YXLVu6dKkoKCgQZ8+eFYcPHxZLly4VBoNBfPLJJ0IItrO3dL57Swi2s6c899xzYteuXeLs2bPis88+ExkZGSImJkZUVlYKIfTZzgw9XvDGG2+IIUOGCKPRKCZPniz27dundpVUs3PnTgGgy8/cuXOFEG23iv/nf/6niIuLEyaTSUybNk2UlpY6zOPy5cti9uzZIjw8XJjNZvHoo4+K6upqhzJffvmluOOOO4TJZBKDBg0SL7/8cpe6bNq0SQwbNkwYjUYxatQokZub6zDdmbpoUXftC0CsW7dOKVNXVyd+9rOfiaioKBEWFibuu+8+UV5e7jCfc+fOiZkzZ4rQ0FARExMjnnvuOdHU1ORQZufOnWL8+PHCaDSKoUOHOrxHu97Wf2fqolWPPfaYSEpKEkajUQwYMEBMmzZNCTxCsJ295frQw3b2jAcffFAMHDhQGI1GMWjQIPHggw+KU6dOKdP12M4GIYRw7dgQERERkf/hNT1EREQkBYYeIiIikgJDDxEREUmBoYeIiIikwNBDREREUmDoISIiIikw9BAREZEUGHqIiIhICgw9REREJAWGHiIiIpICQw8RERFJgaGHiIiIpPD/AashMZEKJA13AAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Create a long sequence of transaction masses with periods with highly unusual mass clusters\n", + "N = 500_000\n", + "seq = generate_seq(N, 50, 40)\n", + "\n", + "# Approach 1 - calculate the current average\n", + "\n", + "# Requires a removal strategy for having a meaningful \"current\"\n", + "R = 0.8\n", + "# At each time step, remove the first element with probability 1 - R\n", + "removals = np.random.choice([0, 1], size=N, p=[R, 1 - R])\n", + "# After const steps, remove with probability 1, so that we simulate a mempool with nearly const size\n", + "removals[256:] = 1\n", + "j = 0\n", + "y = []\n", + "for i in range(1, N+1):\n", + " y.append(np.sum(seq[j:i])/(i-j))\n", + " if removals[i-1] == 1:\n", + " j += 1\n", + "\n", + "x = np.arange(0, N)\n", + "plt.figure()\n", + "plt.plot(x, y)\n", + "plt.show()\n" + ] + }, + { + "cell_type": "code", + "execution_count": 82, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjAAAAGvCAYAAABFKe9kAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAABriElEQVR4nO3deXwU5f0H8M/m5shBgCRgwi2XyI2QIhQUQQQrirUetR5Ya5uoiK1K7c+rVagHautVK0IpIoiCFlAgcgSVIBgJEITIaYCQcGYDAXL//khm99llZ3dmdmZndvfzfr14vZbNZPJkk+x853m+z/dra2hoaAARERFREIkwewBEREREajGAISIioqDDAIaIiIiCDgMYIiIiCjoMYIiIiCjoMIAhIiKioMMAhoiIiIIOAxgiIiIKOlFmD8Ao9fX1KCkpQXx8PGw2m9nDISIiIgUaGhpw5swZtG/fHhER8vMsIRvAlJSUICMjw+xhEBERkQaHDh1Cenq67MdDNoCJj48H0PgCJCQkmDwaIiIiUqKiogIZGRmO67ickA1gpGWjhIQEBjBERERBxlf6B5N4iYiIKOgwgCEiIqKgwwCGiIiIgg4DGCIiIgo6DGCIiIgo6DCAISIioqDDAIaIiIiCDgMYIiIiCjoMYIiIiCjoMIAhIiKioMMAhoiIiIIOAxgiIiIKOgxgiMgwr3+5B+99td/sYRBRCArZbtREZK6yigt49csfAQC/yeyEmCjeLxGRfviOQkSGqG9ocDw+X1Nn4kiIKBQxgCEiQ8REOt9ezlczgCEifTGAISJD2Gw2x+Nz1bUmjoSIQhEDGCIy3DnOwBCRzhjAEJHhmANDRHpjAENEhjtwotLsIRBRiGEAQ0SG23643OwhEFGIYQBDRIbrl55k9hCIKMQwgCEiw1XV1ps9BCIKMQxgiMhwrANDRHpjAENEhuM2aiLSGwMYIjLcuRoWsiMifTGAISLDcQmJiPTGAIaIDMclJCLSGwMYIjLcx/mHzR4CEYUYVQHMjBkzMGTIEMTHxyMlJQWTJk1CUVGRyzHvvvsuRo0ahYSEBNhsNpSXl190nlOnTuGOO+5AQkICkpKSMGXKFJw9e9blmO3bt2PEiBGIi4tDRkYGXnzxRfXfHREREYUkVQFMbm4usrKysGnTJuTk5KCmpgZjx45FZaWzTPi5c+dw7bXX4s9//rPsee644w7s3LkTOTk5WL58OTZs2ID777/f8fGKigqMHTsWHTt2RH5+Pl566SU888wzePfddzV8i0Rktis6JZs9BCIKMVFqDl65cqXL/+fOnYuUlBTk5+dj5MiRAICpU6cCANavX+/xHLt27cLKlSuxZcsWDB48GADwz3/+E9dddx1efvlltG/fHh988AGqq6vx/vvvIyYmBpdddhkKCgowa9Ysl0CHiIIDdyERkd78yoGx2+0AgORk5XdXeXl5SEpKcgQvADBmzBhERETg22+/dRwzcuRIxMTEOI4ZN24cioqKcPr0aY/nraqqQkVFhcs/IrKGc1VM4iUifWkOYOrr6zF16lQMHz4cffr0Ufx5paWlSElJcXkuKioKycnJKC0tdRyTmprqcoz0f+kYdzNmzEBiYqLjX0ZGhppvh4gMVFnNGRgi0pfmACYrKwuFhYVYuHChnuPRbPr06bDb7Y5/hw4dMntIRNSE26iJSG+qcmAk2dnZjuTb9PR0VZ+blpaGY8eOuTxXW1uLU6dOIS0tzXFMWVmZyzHS/6Vj3MXGxiI2NlbVWIgoMM5V16GhoQE2m83soRBRiFA1A9PQ0IDs7GwsXboUa9euRefOnVV/wczMTJSXlyM/P9/x3Nq1a1FfX4+hQ4c6jtmwYQNqamocx+Tk5KBHjx5o1aqV6q9J4am6th5bi0+jrr7B7KGEvbr6BlTXsSM1EelHVQCTlZWF+fPnY8GCBYiPj0dpaSlKS0tx/vx5xzGlpaUoKCjA3r17AQA7duxAQUEBTp06BQDo1asXrr32Wvz2t7/F5s2b8c033yA7Oxu33nor2rdvDwC4/fbbERMTgylTpmDnzp1YtGgRXn/9dUybNk2v75vCwKOLt+HGtzbijbV7zR4Kge0EiEhfqgKYt99+G3a7HaNGjUK7du0c/xYtWuQ45p133sGAAQPw29/+FgAwcuRIDBgwAP/73/8cx3zwwQfo2bMnrr76alx33XW48sorXWq8JCYmYvXq1Thw4AAGDRqERx99FE899RS3UJMqy7aVAADeXM8AxgoqGcAQkY5sDQ0NITm/XlFRgcTERNjtdiQkJJg9HDJBpydWOB4fnDnBxJGEp1OV1Rj41xzH/794eAR6tePfIhF5p/T6zV5IRBQQP52s9H0QEZFCDGCIiIgo6DCAIaKAiI7k2w0R6YfvKEQUEKcqq80eAhGFEAYwRBQQe4+dNXsIRBRCGMBQyLokqZnZQyBB23hWyiYi/TCAoZDVIjbS7CGEDD2qLbCQHRHpiQEMhazmMZpafZGbyqpaXPVKLp5cusOv85xkDgwR6YgBDIUszsDoY9XOUhw4UYkPvi326zwL/Px8IiIRAxgKWc2inTMwNWwkqFlslD6BYNeUlrqch4gIYABDIaylMANTWVVr4kiCW3PhdfSns/c+7kIiIh0xgKGQJRZOO8sARrOWsc6ZrMpq7a9jNWfBiEhHDGAoLJSUXzB7CEErRggEOZNFRFbBAIbCQlSkzewhBC2b8NIxgCEiq2AAQ2Hh7AVeePVwtoq1XIjIGhjAUFhgDow+OANDRFbBAIbCwpkLNWYPIST4GwjqUdGXiAhgAENh4gyXkHTh7wzM+RouQRGRPhjAUFhgAKOPSj/7GeX/dFqnkRBRuGMAQ2GBAYw+/J2B2X+8UqeREFG4YwBDYeFsFXNg9OBvALO7tEKnkRBRuGMAQ2GBMzD68DeJt57FeIlIJwxgKCxwG7U+/J2B+Vm31jqNhIjCHQMYCgsVnIHRRaXGQnaZXRi4EJG+GMBQWDjLOjC60DqTFRvd+FbDQJKI9MIAhsJCuOXA7D12Fou/O6R74TitS0jri44DAP5XcETP4RBRGIsyewBEgRBuOTBjZuUCACJsNkwelK7bebW+jpe1T8DOkgqkJTbTbSxEFN44A0Nh4Vx1HWrrwm8LTN7+k7qer7JaWwDz8+5tAQCtW8ToORwiCmMMYChsaE1ADWZ6d+HW+hq2jGuc7A23mTAiMg4DGAobFWGYyKt1xkSO1gAkPrYpgAmzXCQiMg4DGAob4Xj3r3fycnVtPWo0LMVJMzBnWBE54I6fqcLWYvagotDDAIbCRrjtRAKAgkPlup/znIZlpPjYaACcgTHDz2auwY1vbWQjTQo5DGAobLAfkj6On72g+nOcMzChHcDU1Tdg19EK1Nfru33dHzV1jWNZX3TM5JEQ6YsBDIWNcJyBMYKWzVwtm3JgQv1n8OTSHRj/+ld4Z8M+s4dykVB/7Sn8MIChsME3cH1omcmKb5qB8beXktUt3HIIAPDiyiKTR3Ix/v5TqGEAQ2GDb+D68PQ6+qr4K83AnKuuQ52FllfCCZdQKdQwgKGwwTdwfRw7U+Xy/zfX7cWwGWtw+PQ52c9pEess+s1EXnOs2llm9hCIdMUAhkJedKQNAGdg9LJut2sy6EurilBWUYUZn+92eb7ivDNgjI1yvtWcrHQNgIiItGAAQyEvPi78tvDGxxrX5qxnWoLH58vPV7v8f9TL6z0eV1Ubfi0drKBT6+ZmD4FIVwxgKORJ+RcVYRTASNuWAeiecyJXyM7XDJd0AQ31RF6rCsdCjhTaGMBQyIt39OEJnxyYeCGA0fvCdUamJYO4ZORJedPHQ6Wlw0ffHcJXe46bPQzFwimAp/DAAIZCXrjUIBHFCDkn9nP6Bgxyr6Ov17e8aRwb9+rbIdsMe4+dxWMfb8edszebPRTFqrl0RyGGAQyFPEcOTJhOoR87o75yrjdyd/K+ZlZS4mMBABnJwZ+LYRdmm2qFJbWk5tGOx1aqxksUihjAUMhLiAu/GRiR3jkwcktIUsl6OZldWzcdF/wzAQnCEp34e9UjNd7xuMR+PqBjIgo3DGAo5JyqrMbrX+7B4dONFxApoTWcdiGJ9A7cSiu0zehIS3mhMBMWFel86xRf3w7C7JLNZtP1ax61n8es1UU4pvL1F5cTiUKJcXstidBYoXXuxoPonhqP4d3aBORr/uXTHfh8R6nj/1JCa3VdPS7U1CEuOjIg47CKMzonL/90Ur5gnTehmoskLp2JMcs5nQO13/03H9sP27Gu6DiWPXil4s9LiIvCibONW9zD8fefQhdDczLUdz+dxrPLfsAd730bsK+Z/9Npl/83j4lyXFhC4e5fLaNnnlrEKLsgJjRrzA+RW4IKVnK5P+eq6zSdb93uY/h6z4mLnt9+2A4A2HHErup8YhXkUAseKbwxgCFDnTwb+KqrCXHRLv+PsNnQMiY07/6V0Gv7bFSEc3pBzKuRAhNf4kM0F6lMZklHbaABNAZD98zdgl/P/hYXalwDoAiNK1IRwrRQqAWPFN4YwJChmsc47/4CtY3T0wW1pePiGX5v4Mu2lehyHnGHjTirIwaM7hddUagGMF/v8bwt/OTZao/Pe1MrJELb3erqtNChunKovfYU3hjAkKHEi16gCpgleghg4sM4kXd36RldzhMb5VwqEtsGtIh1Pu/tAtmsKffi670XL48EM7mgOCpS/ZRJpDBbcqzCdfZSj+Dj++LTvg8iChIMYMhQrtPXgQkePAUw4dhOQNJcYY6KEtJrW1nlnGkRf8begtTzXmZngpncLh9/k2Wral1fLz1+jqH6M6DwxACGAiZQyzdijQ6JNDN/+pz6af1gN7hTsm7natMyBoB8oOKtnUCXNi11G4eVHDxZ6fF5f2c7yt0qKPdNT/TrfAAQHcG3fAodqn6bZ8yYgSFDhiA+Ph4pKSmYNGkSioqKXI65cOECsrKy0Lp1a7Rs2RKTJ09GWVmZyzFbtmzB1VdfjaSkJLRq1Qrjxo3Dtm3bXI7Zvn07RowYgbi4OGRkZODFF1/U+C2SVew7fjYgX8fTDMy2Q+UAgB9KKgIyBivZ8KN+/XoONm2hLrV7TlwVZ7jaJcYBACb0bQebzYbUhMb/R0bY0NAQOlVqW8rkpqzYftSv87rnwLgnp2tRF0KvO5GqACY3NxdZWVnYtGkTcnJyUFNTg7Fjx6Ky0nkH8sgjj2DZsmVYvHgxcnNzUVJSgptuusnx8bNnz+Laa69Fhw4d8O233+Lrr79GfHw8xo0bh5qapmZvFRUYO3YsOnbsiPz8fLz00kt45pln8O677+r0bZMZzlcHJok31svUvfsWa1JH2n20cZ/nPBZPMzAPjOwKwBlY1tU3hNR29k37TxlyXvcARgyU3JeXlNrKHBgKIarS2leuXOny/7lz5yIlJQX5+fkYOXIk7HY7Zs+ejQULFuCqq64CAMyZMwe9evXCpk2bMGzYMOzevRunTp3Cc889h4yMDADA008/jb59++Knn35Ct27d8MEHH6C6uhrvv/8+YmJicNlll6GgoACzZs3C/fffr9O3ToFWrfFNVw/dUlpi77GzCMf7T38Kwp6rrnVpETC0czK+PXAKl6bEezzeW55TXHQEoiNtqKlrQMWFWkePqmAX72HJEnD2ftLqohkYYWbxzIVaxLZUnxOzameZ74OIgoRfC6J2e2Odg+TkxjX2/Px81NTUYMyYMY5jevbsiQ4dOiAvLw8A0KNHD7Ru3RqzZ89GdXU1zp8/j9mzZ6NXr17o1KkTACAvLw8jR45ETEyM4zzjxo1DUVERTp/2fAdRVVWFiooKl39kLUu2HjHta9844BIAQO92CaaNwSzRkdr+zC/U1GHQX7/E5Lc3Op7rmtKYxyJXpM1bEq/NZnPMwujdIdtMckHbsTP+1UByD2DEZGn3jxGFI80BTH19PaZOnYrhw4ejT58+AIDS0lLExMQgKSnJ5djU1FSUljaWdo+Pj8f69esxf/58NGvWDC1btsTKlSvxxRdfICoqynGe1NTUi84hfcyTGTNmIDEx0fFPmt0h6xAb3QXa8aaLySffHzZtDGaprq33Wp9Fzv7jlRftWlnwbTEA4NUvf/T4Od6SeAHnLEKgttQHM2+vpa/XWU6XNi20DofIcjQHMFlZWSgsLMTChQtVfd758+cxZcoUDB8+HJs2bcI333yDPn36YMKECTh/Xnv31unTp8Nutzv+HTp0SPO5yBjRJjaVC4UOyP44rmE2IKGZ+sJpvgITKRGVMwi+lXsLYDSWA9h/wvOOKaJgpOmKkp2djeXLl2PdunVIT093PJ+Wlobq6mqUl5e7HF9WVoa0tDQAwIIFC3Dw4EHMmTMHQ4YMwbBhw7BgwQIcOHAAn332meM87juXpP9L53EXGxuLhIQEl39kLdLduxnG9E71fVAI09KXJ0JD8kzFee8XVscSUogFMGJrBb14e420zsAQhRJVAUxDQwOys7OxdOlSrF27Fp07d3b5+KBBgxAdHY01a9Y4nisqKkJxcTEyMzMBAOfOnUNERIRLq3np//X1jXfJmZmZ2LBhg2NXEgDk5OSgR48eaNWqlfrvksKeVAX2kqRmJo/EHIGqweNrBkYKYELtAlwuU1/In/YZ3gIYb7Mz3oiVsYmCnaoAJisrC/Pnz8eCBQsQHx+P0tJSlJaWOpZ+EhMTMWXKFEybNg3r1q1Dfn4+7rnnHmRmZmLYsGEAgGuuuQanT59GVlYWdu3ahZ07d+Kee+5BVFQURo8eDQC4/fbbERMTgylTpmDnzp1YtGgRXn/9dUybNk3nb58CqV9GkmlfW9oRcqRc+zJlMNNrxuOy9t5nNn3nwEQpOi7YfHvA81bq4lPnNJ/T28/sg00/aTpnxfka1BswW0RkBlUBzNtvvw273Y5Ro0ahXbt2jn+LFi1yHPPqq69i4sSJmDx5MkaOHIm0tDQsWbLE8fGePXti2bJl2L59OzIzMzFixAiUlJRg5cqVaNeuHYDGQGj16tU4cOAABg0ahEcffRRPPfUUt1AHuRN+7srwh1jcLhzzYfQKYHr52MXlq12EYwYmxFo6LN/uuWHmykLtxey8/cy09reqb2BDRwodqrL0lFTPjIuLw5tvvok333xT9phrrrkG11xzjdfz9O3bF1999ZWa4ZHFmTn7kdTcuSX/2JmqsFlKahkbhbNVtV4vhvZzNaipr0eblr7rlvxhVFd8nC+/k8tXoBSqOTByQUHBIbvmc1bX1qO6tl6215JW5eerkcilJAoBbIxBAWVWCfnICGfOlVwZfKNdqKnDrqMVAX0NpIvfpv0nZY/p99xqDP7bl4qq47YSAkFPW7OV5sCEWgAjduoWbTtc7td5jXid3HssEQUrBjAUUFp2w+jNrCDqztnfYvzrX2FloedaRkY4VdmYXLput+9+SD+W+V6WEJfiTlZenLh6ocZ7zRlpG3Wo5cB8uctzhVst29dFRgQw4djQlEITAxgKqBITl5H6NXXzNesOdMvBxirS8/K0JWD6o1pB3o+S6rgRwkzWaQ8BDOD9AhmsMzCnK6vx1GeF2HFY+5KQFnq+TtJsXLC99kRyGMBQQC3aEvgCgw1NHZCkN/CTleYlEwPWvYAcPq1sx0x6q8b8IbmGgt5qwSQEaQDzwue7MC/vJ1z/xtcB/bp6zlS1asp7kQs8iYINAxgKqF2lge9RdaGmcfZBmgFZamJPJgDYc0zbDhKj5XnJkxEdPt04i1Zq9xwIegtOpBkYf/sEBdrmg8Z0nPbF22sptx26vr4BnxUcwf7jZ12eT2rWmL+ktYYMkdUwgKGAyv/JczNOI0l3sYM6NhZBbJdo7g4ksbuzlVTVqNte/shHBR6f3+LlYt8y1rnxUW4Gx4oSTOqc7S2A2esWoEhW/1CKhxcW4KpXcl2el4rYeVtC3Xf8LHJ/9J0vRWQFDGAooC6ovEjqQQpghndr0zSG4Llw+qu/iuKB4lZzJeSqzEZHyrcgEJOAg6kjdfukOFO+rrdgo6C43OPzWw95ft4ZwMgvIV39Si7uen8zvi8O/I0GkVoMYCiguqe2DPjXlO5i9x1rvGP9IoC7gMwmzToBvmc8lCT6ehMX3fh2YoN8ABMRYXPmYgRRANNJ6OIcyF1s3mZg5IIMudkiaQlJyeueW8RZmFBwoaYOnxUcwYmzwbVkqxQDGNJVqf0CXvh8Fw7JlFC/NDU+wCNyXgSGdUkG4OyLFA7EGZidJd7zj1b5COyqfPT1kT5eIDMDIJEuoCX24GnrIAYFnkoBJMSp79ythLfZErnXOTXBOVskVp1OatE0A6MgB+bzHdorCJN1vLF2Lx5eWIBfvpNn9lAMwQCGdPXQh1vx7ob9uOVfnv9gVmwP/BujVFxNuovukNw84GMAnLtAAileuLB+7yP/yNcMjK/6IdLExAqFF78ffARUViK+jp5mRSou1Lp0pG4eo0+QvMRLwrlcOwHx90zccSTNwNgV1IFJSzRnyYz0tbBp1+eBE5Umj8QYDGBIVwVNlUePmlTt1pPKqsY7ZqmK7CmTCnmJVWzNKKbnK6/BV4BVp1MTwMFNy1rJLdTl3JhJXBTbJyTPirN54pbnK5vyrQD53UJGEWv1iL/r0s/34Enf2+VDrdBguIrxko8WChjAkK6SfSSCZiQHdgdQ85hIvPzLfgCcCaTHz1SZEkAkCQHCGQVl+/XmaweY0pyULm1beHx+eLfWAFx3GnnSsXWLpq8XnPVIxEAuSrhAiEszvYWu3WY2Tzx4whmsJAgJ1LU+Ztu2BbhgHxmjxEI3kkZgAEO6qq33/sYYyPyTW4dkYMcz45DZtfHC2rqlM7hS0vdHb3HC915eGfg73LIKfRL5BnZo5fH5zk1LdL527LRSsJ3Xyopklm6KhBpH0ZERjmWk8vPmBWpiDo044xVshQRJm9ZBNMupBQMY0lV3H0m6P5Z5rl1hFLGJY/OYKMQ2VeM1++Jp1dkHX3fmgHwAI82++ZrJadX0phqsFWE/LSjx+Lz79+1YsvTyfR47c8Fnew1/lu7qhJnGKLENhEV//0hfYhK/GTdtRmMAQ7pqGx/r8xizmikCzouK2W/gZn99OUrygwZ2THI8PnbGOUUtBibefsZSfsXi/MMaR2murjJLaJVuF4gjTYHJNpndQg0NDbji+TX42cy1Xrt4+/O7cvKs58/dziWisDC4U7Lj8eYD3itt19c3BDxfy18MYEhXYqEyMeIXd3GcN7GQnJJqpIHg7evX1tXjvv9swRtr9wRwRI2OKVhmujTFOcu2VSimJgWHtfUNXnN8WgX5tLZ7oPKLfu0BOHdhuVuooP9X4RH5gMKfjtazcn70+HxpRWjnRlAjqf8bAGzcKx/ANDQ04Ma3vsHEf36tW7J+IDCAIV2JCZzrdh9zPBZzX0rKzXvzlLaemtHSQOTtAvL13hP4ctcxvLza88XHSN5mAiTispy4sykuOsLxc/aW4zOgaVq7cxvPMxlW18otUV3KrTops1Qkt91ZNMvLz/qNtXtd/t/nkgSZI33r29SRXdpSTeFj4z75AKayug7bDtvxw9EKn0uaVsIAhnRlE3btbT5wyuPzRyzwB+KtX08gbPDSb0YMEJTkpOgpz8ubnCf5B10DQSlR1NtSlOOCH6TVQd2XdNq0bFw23XVUe12b77wE1N+6Tf13TG4M/LTUmundrjH4UVKZtSbAv3tkrB+8/H5GCm/Q3nK2rIYBDBlmcCfPyZ5W2AHh7W4kECo9VHOViHf4enQOVnPHrrY2i/uborRE5y1BN7lF4wW/4kJtUF4kT7kt/0nJse7LQFLCuBiQanHirHvA1PgzOlddh3PV6hIzfQWP4kxpMBUaJP2YVSdLCwYwZBibzfWNW2ryF07NFOXIJXYCrhc8b6XklerTPlHxsWorJd83oovL/x0zMF4CmKRm0ZC+xWDcieQ+Zql0v/sS0p/G9QAAjLy0DfTUPNa5m04uSVeONFvkHhRJxFpF3maFKHSJM+dWxwCGDPP+1wccj22wOUr47z8emmWt1VA6/V9U6v+28/tGdHY89rXLQO6i1atdAu670nme267oAABwn1xQssursaGj97wRK3MPYJrJ/Cyl5Nt1OjdGtEEMRNQtw6n5vPyfgudCRvqpN3GXqFoMYMgw7s3mpLvyZds819EIhJhIa/zKK32TOHDC/wCmhZBYLZbBVyomMgJfPDwCf5nY2/Fca5mZFmkGxtfWXyUzNVZ1pqrWpbO3XDKy3luVG+D8nZFmM5XMwJwXliulJSRlAQxnYMJRTa3396bjZ6rwzd4TppbDkFjj3ZzCwuSB6QCAHmmB70gtkcrdm+1CjbLcj2gdAq52ic72DVsO6nNROniycRZtXt5PLs9LW4x9LUVJAUwwzsAAjV3XJWK101phhqt7aku/vsZTTQHjwA5JF31M6mf0sYJaOicrncFK26YZGCWvu16Vmym4fJzvfdv/+Nc34I73vsWqnd671wcCAxgKmDVN26rXCturA214N33zEfyhJIH1w83Fun5NPWZ0APn+PodON15YfTUMlGYCTgXpTiTx+08Skq7Fmi0jLm3reCzO2CglBR7fC7V23EUoeAevqXMGVa2bApjyczVBmUBNxqvw0btLyp/6+8qiQAzHKwYwFDBWaOkuBjBlJhfzUlJMT0nnYDX6XKI8odebGwdc4vF58aLtTTAvIQGu4xaTrqWZKQAY2sVZBXWrlyBEjre2HN1SGmd31OaTJTWLdoxXbQIwBT8lyz7S75YvVng/ZwBDAXPL4HSzh4AewkXhm70nTBwJYDehyd/DCwt8HqPkTU6uZYTYhdkbaSt1sC4hyW1DPnTKWeMoPs65o0fJtn33WRpvAUy7xMadT2qTeCMibI4lL7WfS8Fv7zHfM7BKjgGcZQLMZP4IKGzcP9K55dasBLAI4W75a5MDGD12GBmh4rzv2iJdmvoBRdhcE5KTmyurIyOXBBws1AZeGxX8rrnP0og1eY65zRZed3k7AEC/9CRV4wCcy0gMYMKPnvWvqmrNX4JkAEOGqhbW2cVk0nNeCrkFirfeIIFgZk8ob/YpyJNp3TSDUt/gWmxP2ioPeM/xCfYk3nUq87iU1FRxr4IsFjTM2+/6MX9evzaOYnbB+dqTdmbPOuuNAQwZqkzYrSHWPik+pSy3w8iZGrMb2u04XB6wr5XWVGxNCbuC3JyYqAhH0bMTQuJqophj4bUab+NFNJiKZolK7K6/O3cM7eD3OTe5BSlijR33C48jCKlUP4uitYYMBT+lAcx5C9xgKsEAhgwlBipiZV73jr6ebNx3AkOeX4OVhcq26+X/dAo/lvlunGcVBxQk6LbQ0O/GkxsGtFd8rFwHY3fSDIG4ayEiwuYITrzFnmLTz1DgrZu0Ume9/E24V86Vdj6JOTdKtVFYCyYqwmaJWh+kH28tTETBEtwygCFDidtKRe53m57c/u9vceJsFR6Yn6/o60x+Ow9jX91g+TfdFJkEWE8qNfS78eTKpt1XStry7FB4Ma6TqerbWkE/pfg4ZwDjnt9hZVIM7v49+ihwrEh6q2ayH3P/nZFmUQBo6IfUlEDtYwmptr4BpxXMxlHo8ZabJv7umf1eywCGDCVX88VXrQG1xAaRcjVKrELK/1F6137ijP+5CtKbjh4XWolcTyvxDU5Op9bO6rVf7QmedfnWMrknNw/yf4edt+KG64pc/44ShACwRGV3d+nnc1zBXbbZpQbIHF/tkW9/IQbvSkpBGIkBDBnqEre7ylE9GuuE6L2EEBft/FU+arf2m640GyI3g+HuhIY8B3diQmi1zO6BMb1SATjH58vlMjVlpCJ13oi7wX48FjzLfgnC1mjRpTK1M8SZJl9SEy4O/AY0VeGNi3ZdShSXY4+rDHDVJPEGY7PNcFRbV4+jdvXLiXJeX7NH9mPie23ZGXPfaxnAkKHc7673lDXucFGaZ6GFew8mdz1NbGUAAFFNfWzEWSNv3HenaCEuQcjNin3X1LxP6fZyucBD2qGkVBuVx5tJrrVDuyTPyz8/66q8dYWn5VYp8PzJS77UfpldY3JBlZIkXmmGx+wLFCnzwPx8ZM5Yi28VLM0rIVZv9sbs2W4GMGQo95yLIyqmu6+7PM3xWM1aq3iH4MlzN/RRfC4jpLdq7vsgwXcH/d+pI854bD3keUuv2ungK7t5rrrbqrnnWQo5x4LoItmpjeefnafZE0D5bBbgeUmnfZLv3WMFMlV+3WdtJG2Efkhy3clTmnatsR9ScPhyV+NNybSPtgX065q9aYIBDBnK/f3xz9f1VPy5GcKFXk0y4Y9l3uuYiBcFuTdwI2UkO+/WlezG0tulKZ5noPpcoqyKrmRMrxSPz6vdnv7vrw6oOt5MkTJZ0M1jPC8V/UwIYA6f9r7rzFOuk1SwzhMpUFR7EZF2idXVN8jOAkoBGXNggoteNwNDOyf7PgjAt/vNLYPAAIYMIVdqvpXCSq2Aa6lqpVuplRDHZkYhNTH/R0kewroi+YQ6T2rr6lF4pOKi56WLklzQNvXq7o7HSma8UmVqy1yh8M0vHHRp40xW9lU48cTZKpeqxoD7jg/X46Wgftth+WTwQR1bXfRcTFQEEptFO76mJ6nxjT/bY5yBCSpyCfRS6wkAOHPB981ghE3BdkUAHVurm03WGwMYMkRnYZeJqK+G0ucAdG3dHhvlnFpftq1Et/NqkV+s/x3M/2S+J6mRpdwMSS+hj5GS5SS57eCdZH724UhMtv1mn/fcotr6BpeqxoBrZePT59QH253beP5ZOGvBeD6ncwmJMzDB5JhM2Yr+GUmOx0py6twrP8v559q9io4zCgMYMkSiTB6E2N/FvXmdN3JvxP6Su9gHyort+gVmErlZHSm5+d0N+z1+PC5K3U6u5BYx8HSjlqygDgwA3Du8s6LjQoWSWUT3GRExj+WzAtff1e6pvrsGF5XKJFr7SOR1LCEFUX4Sye9sjBH+toO1fYcnDGDIEHJJjW2ELbZqpqfnbjzo75A88rVjyWgVCqZz1UoVpovFkuDDujTuiGmuoLqvtzoQkqjICI9F61opDGD6pnvehh2q7hjaUfZjUtAnV/jRk3GXpfk8Rq4oYVtHMTu5AMa5hGR2sTLSl9wNjDslS01mYwBDhpDbbipOqW8LYC8gq1LaC6jWS2NEd2LtEXEJoHvTtlq5aWbRjC92K/pa4nKcRGmNnw4mr5/Lqaqtw9bi07oleEvLQDtL5HNVpMqnajqkj+3tDGDUJm+29rWE1LQ8WFVbr6g7OZlLTXXcAycqFZ0zGHagMYAh0+SqTE4NJVJBP2/E2Q01O3vEVR2x4nF7mVol/lCzLd6dmN8hV1zPDH9avB03vrUR/5K5U1VbR0gKFg4paGCqZleauGvsyx881/Z58rpeHp/3VQsmNirS0axTyzLSV3uO46PvDqn+PNJGzEfTqzqur11zErmK3IHAAIZMszj/sNlDMI2Scvti7Ratb0q3DM5wPG6X6AxgrLAskCzsSLNSLRgpL+rvK52zUOKr9cR4z6UAhnS6eMcPAFzft7GRptxsBwCMuLQpwVpFFWlxNnP1D57za67tI9RSEp53BjDyY5J2ImlJ5L1z9mY89vF2bDN5idbd8TNV+O+mn7w2zgxGzYRl4e9+8lznSa0tCutPmVn5nAEMkQnU9s7RUsyuV7sEpAn5MF1TnInQVpgejoiw4ZKmWSEly1pWIe6kqxJmjn7e3fOsmpRzVO1lGVDKVflW4ZKiu/Uys5lioGwXguDWCjpSpzhqwWj/2XzyvbVuUn4/Px//92khHv9ku9lDMcy/Fea46GX/ce91t4zEAIYCrkeqtlL+ehadu7qn5yJsgSJ2Hq5RkN+i5a7KPQ1JLLa2ZKvnC0vXtup2e2n9WUqkqsmHT/vfx6Wqtg7PLtuJ3B+NXZoUKw2LS1839L/E8VhMnu6hYMlpfB/5hFylRcU8Ee/MxSDRWY3XSwDjxwyMZF7eT5o/1wjS39GK7UdNHolxNsvc7Ey7prvH593dNLDx9zhSYS2Y+ZvM+xkzgCHDTOjruYrolBHats96u1tUSxybmgRZvbQXlnOUFLPboPNF+bUvPTdrm9i03KHUQ1df6tc49h1vTChcosOd+if5RzDnm4O46/3Nfp/LG5vMG7sYlIq5QeK2crmlsg7J8oHj70d1VTtEF1LlZ7GtgaMOjJdGkNJOwmMWrQVTVVtnyt+u1ckUi8bwbsr6ckk70Hzl3Y24tA3iY6Pw98l9VY1PTwxgLKSyqhY/lFxcQTVYyTWyEyvsqrFVx/X0Mb1THY83mVAOOyLChuimpo5K8j8qdG6aJpc0e30/Z2B38KTv3QotYn1vyfZGyhtJk6nqq4ZeW9KV1FfxRAxspMaYgGuy8hc7POeqdPEy85Xeyr/k65xHfo5N06922fUlzcCcr6mTTRxOtXA/pNq6eox6aT1+/tJ6S+RzWYk4US2+NmKTVW+z2TFNU7cffef9puKe4Z2w49lxjqKHZmAAYyG3vrsJ1/3jK6wv8ryjINh0bev5QiBWhVSzLKRkpkKphDjnMsCKHeZMJ0sdX73NrnhbWvBHC5laMN2EPklKptm7C0tI4s3wrUMyPBx9sdFNS3ne8kOUEndZ+VPDIqmZc8ZErjCYGmJgI9do1FuhxmQ/u3XHRUe65EIBjXk50ljk/q6sXMzu2JkqHLVfwJHy86qWH7vJdOgOB5cIgfApL1WdPw6izRUMYCxESuSz2rqxVmLPoQZhD4R4oTmqYnpaz3YCohXbza3Gu91LLxvx7l0P0tZbuaaEIiVVisV+SMeFC93Yy5wzXN6CE6lHy95j/icCivVv/EkKThGKMMoVepPL/enUNMsxqrvnHKuDJz1vTfW2K81bd+9fDVYWKLqz2WyOr+mpCzbgbCdgxX5IYmXZD74tVvx5YpsLJblnoUSszbVut/xNspISD1bBAMaC1nr55QomlwiBirgNWPxD2qfiwqX3xVyi9/KMpL6+AcNnrsXIF9d5neZe/UOZ7Meu1XkG5qYBjbuflHzPZxQcIwZCO4Xlzys6O5cPvW3HjIpo/F3wFsRp8Y2KgnDuomS+J1GRTAfojx7IxF8n9cHjMlutV2loSiqXcwMAD41x5iCpTXKXiufJ1ftwVOM9c8HSyzT/UVGlO7mFMxjUI3E8WD25tFD2Y+P7OJeR5QJ4q2AAQ4YR+7hUyeRcqGlQp6bMuhWcrKzGkfLzKD51TnPBt/RWzqBNTe8oOUr7FGnRV1gaFKvxeit0JS6d6HmR1Csg2qdyi2hKfBzuHNbxomrEUqGx2GhtOUOeWjaI5wXUJ7mfa9opJZeXI7UbqKlrcHS+1sLo4Oe8ikJqYpflUyHUE8idr+Jy3mZF+3dIcjxeadCst14YwJApJjbtAlISlEgzOSV2dUGAkhkEI4mzE3vKLr4QTh3jewePeOH6bKv/S1092zmXPvS6sCx/8Eo8fm3Pi5YzbhrQuB3zoavkv08xJ8Gfi+RFY9JpWVCuc2/vdgken5cjLZvuOqotSf/qXp6XpMTZTLUFxaRctPxiz1v0Y6IiHAGv2q3UYqK+VWv8HLdgbo9evt7jeQZSWuK870r5naDi6rKeeYdGYABDppDe0JW86UoBzBEFU77iG2ehl94zgeapNoNUfdUbsRrvIYWlvb0RcwD0ugPtc0kifj+qq0teAgDM+lV/HJw5weV7cCfO0v0osyyjxYUaffIb1sgs5865Z4iq8/jbNDQtUX4n0sCmO+YSlbN80pi83URIMzxqAxjxd8GqM6cHTvj/92RVLWT6kf2iX2OZhHMKZ62MnLHVg6oAZsaMGRgyZAji4+ORkpKCSZMmoaioyOWYCxcuICsrC61bt0bLli0xefJklJVdvMY/d+5c9O3bF3FxcUhJSUFWVpbLx7dv344RI0YgLi4OGRkZePHFFzV8e8FF6/ZiMx21n8eTS3eovvhIyZtHFcyqtE9qPPZkZbWqvhsV573f0V+roJuvXjzdeUu7kABlu130qIMjBgw/yMwGXOFH4TR/+FMwLdBSE+Kw7amx2P3XaxUdf93lvn/X3ANAUbtE+a2q7aQAX2UAc/fPOvk8Zk9Tjpo/pQZWasj7CQSxVUSokQtm45p2Hy5QmPj8z7We60VZhaorZm5uLrKysrBp0ybk5OSgpqYGY8eORWWls17EI488gmXLlmHx4sXIzc1FSUkJbrrpJpfzzJo1C08++SSeeOIJ7Ny5E19++SXGjRvn+HhFRQXGjh2Ljh07Ij8/Hy+99BKeeeYZvPvuu35+u9YmbnU8Vx0cvToeXliAD74txthXN6j6PKkvT0m574tWQrNox7ZfNXeZvpYkJgo1T4xuJpjR6uIE5F5pzmUIJa0CPtysb3O8738q9/j89TIFCI1yQ//Gu0I1fYCsILF5tEtA6M308c6minLJtrd52Xruvg1a5FhiVfC3JOqbnujzGCmwfid3n6pzi77Zpz2hmrRZJNNIUy6XqgGefyetWANIpCqAWblyJe6++25cdtll6NevH+bOnYvi4mLk5+cDAOx2O2bPno1Zs2bhqquuwqBBgzBnzhxs3LgRmzZtAgCcPn0af/nLXzBv3jzcfvvt6Nq1K/r27Ytf/OIXjq/zwQcfoLq6Gu+//z4uu+wy3HrrrXjooYcwa9YsHb9160lqFnwZ8pt99G6RqzcizaoomYGxwbnk5E/3Y3dXCe0E/Nm1osR/PZTbThS2x35aELit3FKCqdyS1PjLAxvASIXa9PzZWo04gyL3t32dl9ddbHzpfqlpr2I2UyQGMEZ2FD4dgGRZK++SMoXMyyFXm8vdbzI76jgY4/i1ZmG3N+YYJCc3Tjnn5+ejpqYGY8aMcRzTs2dPdOjQAXl5eQCAnJwc1NfX48iRI+jVqxfS09Nxyy234NAhZ8SYl5eHkSNHIibG+Uc7btw4FBUV4fRpzwlnVVVVqKiocPkXdIQM+SeX7jBxIPp54abLPT4vzcCUVVQpKgcuFWFSu87vjdgbaJnJtWC85QmI29H1IHXilStYpaRTtp4SmwJ3vesf6dV6Qo8Kv1FCsq3cjMSQTvJLd+IMzI+lrsu1UnB/4ITvysmiS5Kcs4JfySR96kGu9o2e9N6GbwWfFRxB5ow12H64XPXnyvVD6t3eOetr9zJDrTTQMZvmAKa+vh5Tp07F8OHD0adPHwBAaWkpYmJikJSU5HJsamoqSksb10H379+P+vp6vPDCC3jttdfw8ccf49SpU7jmmmtQXV3tOE9qaupF55A+5smMGTOQmJjo+JeRoa3Ak1VsOahPS3Sz/aJfe0wf3/OipEdxKlNJ8z3nDIzvafINfxqNhLgoLPnDzxSP0+zmbp3byNe4uWd4J12/llk5LnK+NaiVg5oaQ968uXavLueRyNVd8ZbsLBYM3LTfdWeU9Lexu1RdHprY6PELmWrU/7n3ClXnNEuOl1pK3hg58+SvhxcW4Kj9An7xxje6nVO8aftP3kHZ46yaeO1OcwCTlZWFwsJCLFy4UNXn1dfXo6amBv/4xz8wbtw4DBs2DB9++CH27NmDdevWaR0Opk+fDrvd7vgnzuiQccRS9556qthsNvzu510xuofrNlDxzfofCi4QanYidWjdHNufGYeBHVr5PFYiV6dGT3YPScXS7IO3i4/YekEPY3un+j4ogMYJv0N6LgV8X1yuy3l8NbVT68112vNJAKCy2vWiK1YOPqhyFkayXuYm4lJhm7vReWL+WLBZeTVekZ4736ygp4LO55K31su/74qVtN13K1ppsU5TAJOdnY3ly5dj3bp1SE9PdzyflpaG6upqlJeXuxxfVlaGtLTGN6l27RrXeXv37u34eNu2bdGmTRsUFxc7zuO+c0n6v3Qed7GxsUhISHD5R8YT62GoncKWbFOwxdSZqBi8eRKeqrpKHYK9bWkWa6XosZwxrIuzSq4VcgeuFzpgV5zXL3ldr55iRlWA1ktKvHN2ZpuG5QZA/vdPzN3x52JfeMTYJR6tJQHW7AqNqucScba22MfSXZ/28kncl1/i/NgnMkvNNvhuR2I0VQFMQ0MDsrOzsXTpUqxduxadO7sWwxk0aBCio6OxZs0ax3NFRUUoLi5GZmYmAGD48OGO5yWnTp3CiRMn0LFjY+JQZmYmNmzYgJoa55t1Tk4OevTogVatlN9Vk/HEKufzPSSq6sWIJF4ruLwpkVKuZD0AJAkJnO+s9+/uHQA6Cl2J/a1PoodmMZGOJcXD5frlS+zQ6aJplYR6KYgf00t+Bq1FjOf6H1qJbQz+7Ede3ucmNUz1JVh2eyqVKDQilcvrk/7+5XpgAa4/9/0ab0wDQVUAk5WVhfnz52PBggWIj49HaWkpSktLcf584x94YmIipkyZgmnTpmHdunXIz8/HPffcg8zMTAwbNgwA0L17d9xwww14+OGHsXHjRhQWFuKuu+5Cz549MXr0aADA7bffjpiYGEyZMgU7d+7EokWL8Prrr2PatGk6f/ukp4Vb1C3bqal7I+1aKj51TnXPF2+iFDQ1NJKn7dXevKVDABMvdOJ+7+sDfp9PD46dSDoGC+eq9clv2KjTNuB+fi4FLn/wSvxtUh+8cks/2WNmqqxt8vPuyhv3+ZMoq8fvrRH+/ZU1fv89EYvI6dEVXSJVb/5JYXK1t15mZlMVwLz99tuw2+0YNWoU2rVr5/i3aNEixzGvvvoqJk6ciMmTJ2PkyJFIS0vDkiVLXM4zb948DB06FBMmTMDPf/5zREdHY+XKlYiObnxjTUxMxOrVq3HgwAEMGjQIjz76KJ566incf//9OnzLwUPPC7UVqUkmTROSGNUmK3ozIcA1T9z1Ekr7m5FjIJe8/OwvLgvoOC6x4FZq6U5Vr1oYj4/r4Xgs19fqyet6eXweAFq1iMGvh3V05E15orar93idm4WSfoZ3c1bqPnBCfUK6XAFBtQm6enSKN4rqJSRP/+6++27HMXFxcXjzzTdx6tQpVFZWYsmSJRflrSQkJGD27Nk4ffo0Tp48iSVLlly0a6hv37746quvcOHCBRw+fBiPP/649u8ySG064LkPS6hQk34hbkO99d083cYg1t4wY0dCO6FE/LcW+nlfIyT6emv8phdpecQqyzUALmrI6K+BHZ3L34VHPC8Z3jDAmQ8U4aULtV4Gd3KOyQr5UP6w8o4iLSKFH7+WIFpuCfXRsd0dj739zJOaywfKVhF8tevDyEurinwfFMR+PcxZLEnNWnSFjk0axSl0b3koehLXl8WtrB995zlZzgzeStcbQc0uM1/E2Tp/qvu2FTo969EYVKzaK7eVOiU+Dp/8PhOfZQ33uq3a3Y1NjTPVai/UGCo+5XlMbVpaux+OROmSCADEC8FpMMx0b5VpuKnFrUM6OB57u2HQO5/KCAxgLGyrTttArUqshCv35mk08aJyUqfCZ1qVn5PfSdEjVfn2SD3YAnD3L0pvygXSYwlJDDwWfKs9sfxKYQpfae8Ypbzt6BnUMVl1voyaXBaRWBdkzjcHPR7z+1HdHI/V5mJMFJZojZ7hUbPr7NYrnDP+SupQme3l1T8qPtbXn67Yc8tbgq64o8mqs3MMYEhXB1V0eBX/kD7fYV7DN2mbrNk7crxVQ/31MOddkx4JfUMtVsxOyoHRa+eQREmNISX0zs359wZ9k0c7CDvLtJYamLvxoMfnbx7oLJWR+6O6bce/yezkeOxPQ0gl/rdNeTVtcWffbIsksuvl+Umeq5978pGXjRd3DHXOkJ8MQDsILRjAkK5WaNwu+Y81vruexivISdAycSDN/lh1pwQA3CAsEazb7X/tiofHXOr3OfQk7UICPBf8CxWtmvIK9J7gEos2ai1mJ0fs2fWfjepmtMTZsPe/MSZQ6HNJYy2qTq1baPr8rw3ugxZoIy51zhz66lW3YY/87JO4vJ23zzr5eSIGMBQ0pozo7PugEJUgbH321BhSrWGdW/s+KIDErd2rdpo3G2e0R8c27kQSk2f1piYXRC1/llt2GlTM7hf9GhOfz3ioBB6OMoTCi3KFDaVmjUoTnz/wYynWSAxgLOjWIc71WauuPZrhvhFdHI/1vEtvFu25Y3agiD9vJfRYs1eSIPrQ1ebM0jz28Xa/z5EqlNe3ks5tGmcJjAwyVmvsC2S0Ej8Sqr3pkdY4A7MhCHJZ/KFko4P7zJ7c7qWkpq34NXXKri9GL/9pxQDGgsTp4Lz91py608sT43sqPlbc1ure0M4fr9/a3/HYjIDx3iudM0unLbTWPKqHtsRQKxjfR5/6PmLTUT1I+VaHT5/XtTiZP16c3NfsIfhFvAFRs5W6S1ttS05m2VOmXz2WyIjQuPSHxncRYsQKjM8t+8HEkRhvsFAbQ00/k9/9N1+3MYwUdnB8aVBvlN+P6ir7se7CDqNPvrfOVuqBHVrhsWt7BOUFTpoiB4AiPwofPnV9b98HqZAmbE+3yrZ5MVCVa2qqpmq2O6O7nw/pJNbXUb5Mde9w541DTQBqHflr6dYjup1LLODpbTa7RYy5s9O+MICxOD2rzlqRONsktwvCaOJW6n+u9Z1MrMUIYUuuN39bsUv2Y3oHEld08n1h+cOobrhF5RKXVhnJzXwfpFCXts4GmO/7scvEW98hLaKFgoxqd/T4Il6Q1RATbeVuIp67wVmZWe3F/sGruvk+yA/ilv9PC5Rf5G8a6EyM/+6gfnVWjPKtj4Rc0VMTvQfeYoPY3Ufl619NEZbtrZjOwACGdHGlwgu0OzEX4+Pv1PVSMoI//V68ERPrzmtsIDdEuJPVY/lBXLqygpk3OQM0PdsqLPLj96qFztV4Rat26pur8tuR2n6eSmr+3DLYGcT+R+WNRn+hps2xM8bkwUjmb1Jer0esgbOzxNhu2XrY5SXQcCfW2PLFWzG7O4Vio1bcicQAxqLEbaXBQI/xinfNcqyanOnLJULF03+s0VabpIMQBOmRAyTehVmBOBsnt3uC5KXGG1c9WQxy/v3VflWfK+4we8HLDKOZPtC5UKGetGy5V/J+LC3fP/aJfNK8ODtnlcavIgYwFnWTUDwqGPhT10JqKKek6+kfhKqgZjQ/VEOcchVfH08zH38SGv3JiRRmq57+307/Bgegq8WSGMW6E2ZXRQ5GaloP+MOf5pafFigvNhdIB3SunaMnLSs3Yu84OVJ7DKWzuWt1qD+lNwYwFiWW4D5kUpn9QElpivLlEghF4lS2mvVuX9RuZdbimyeuwiu/7Ie7hARTyQShqaSSN5QoHS5WgW4XoMQvBzUG7kWl/u+4aNPSmrN1C3471OwhXCTQva/0dvfPOpk9BMP8Vqh/pWajgy+3D+3g+yCLYwBjUeLOFD3utgMhUuNFdbiK/BnxLv2tdfqUiQeAB37u3CWU/5MxCX2XJDXD5EHpHu+OxClfb3eD7ZsuNKGa3N0jrfH33luvIKWe/YUz8dSfhn1iEqoeeYxDLVZEEACmKqjMLO2kS2xmvS7F4o3NDyqask4RZkOt2tQxVWhOqmfit5gnc65afvu52XWyvGEAEwSsOHXniZpARCS+oauZbTqoYzGwTm2cyylfe+lJZBQxqHl3g3yOwR3DLp69CSVS4F6kQwAjJpb7U9xNDG6/KPS/SrDWQN9INw7wvWQ97rLGpV4tRSR/NdjY4py92yc4HqupGisGp+t13hVmhLfWKW938hsPM70i8abJW25S3/RExV8z0BjAkG6uEOoxqHmPEnutvPql8q6rRjF7DN4aGopvOnrkAFlt6l2agTlwohJVtcqLknki/l556/nii5E7kfSWNdoZbDVA+R9hjII6L2Lht2KVNw+PCwUr9axn4omaYFVs6qh3g00j7DmmfGn12qbcQgDwNLkkLiEv8fIzeenmfo7H9nPW6lPGAEaj/+YdxI1vfYOyCmO3BQaTXwp3WVqXAJZ8b+ybW7CT7oIBfbroil2uay0whS7lQ9XVN2DfMf0SKxdYeJeJnrJHO5eC1L5+4sSQp/QosR/XyJfWqTq3WJxz4WZjyyUcP6MtyTjUqp4P7ugsu7BMRadud2Kn8y8KtTXrNQoDGI3+77Od2Fpcjin/2WLY17h5kHNa14pFhNyJa7V6d8QViUmwer4uLU2+0/Y15Qu4Ft1TsmvLly5tnFupk3Uum6+FeFdo5N9WqBJzxM6rKKsPqKsd4o/NOvzeeiJ1pQ5FtwxWfy1QMqum1ncG5QdqxQDGT4VHlCeMqfWXCb0cj63aTEuOUY3bAGDaNc4tx3p+HbObaKpdztEj2Tgiwoa9z4/H7r9e61Il1gqO6vCzvVOnnKF/3jYAAHCtMAPmDyu2Z/BW0CwYPDXxMt8HBamHx3R3PF6vY9PKWbf0830QgJim94aP863R/kJirXcsciGuzz67LDh2IiXENc5iDFXZ/+QZoeeMr4ZsYm7D9f/8WtXX8eZ3QrLmVyYk8nZuo64ui14duaMiI1xmdswm9rbx133CFlQ1jf7cXd+vPQ7OnIB37hykx7BcythbxdPXOwMAudfql4O016dKiTd2W3v3VOdsopqKv2Iir1V7IomFMOfp2HKlb3qSouN6tov3fZAJGMDoQEmbc38Fy7bZrU+NxRcPj8CHvx2m6vNuE2oSzN+kfBeBnnURxKqTM7/Yrdt5lVJal8WoJTSrEIsV+tsyQaxerKbRn9GUFBrTSmvvoWFdnDcdcr+K1/R29oYqP6fub2/R7zIdj414zxRv+PJV9DYSG6360zcrUNYVKZ+B2fzk1WjVPBoL7/f8ftxJyG/xZuSlzoaftXXWec9hAKORuN761+XWLI9thsgIG3q1S1BdFTQ2yjkD8OUufXvEaPGDir4jgfbEeOfS4jGNCYtWJnYH97elgBgUemuUGUqmXdMdL93cF19O+7mqz7PZbJhzzxD8aVwPl7YOIrG55XtfqbvYixfLbYeMDSYLDpUrPlbsiTTDhBsXI6XEx2HrU2MxrIvn2kNKA2mxTpCV3hsZwGhkg/ON8fMdxmVmW63ceyAoyfeZdo1zTViPxoZWoaR6bLOYSMeUspXeTPQi1kl5Y61+xQrVXNSCmc1mwy8HZ2jqdTW6RwqyRneTnQ0Ub0zeUFlIUjznbf/epHpsSjzRtF27OMSrlweakTOG/rDmqIKMXrkIEvG9Y+49Vzge7zvuf3n1UPEHYdp3fZF+BajG99EnSVOrfkLRKG+rQ4OaGrGpqToajPQo4qg2HytQwvHmxGgDmjpfby0u13wOqy7LfvHwCMdjq/eBCxQGMBaXIazhP/rRNhNHYjwlDQ0l4h3Bn5fu0G0MD13tnCrVs6Gg0vyWF2663PH4hJevL1UeDcUZGL2N6hGY7cFqvf1rZ0JwpAX7UgWjy5tuAEorLmC/ihu+LU+OcTxetdP8JWxPeqY5E2kXqKg27MvDV/tuIwG4VqS2CgYwOvFnh4NSoT4FLjY0zNt3UnEdUX+647oT3yQeWrhVt/MqJdbS8aZXu8YAZleIzsCIeTD+LhFe38/5e3Vax6Rvf3VPjcf/TeyNP47t7rKzzurErbdqZyveuH2A47ERTWrFfJaFW5QXzBMT+F9cZc08GPEm6JllP+h23ocUBjAjL9XWKsZIDGB08rcV+v1ChSuxH9GDHzqDh0B2TRa/1jd7rVuZs3dTALP/RGVAdsEF2uy7Bjser/Azx0zcgvr3lda6OE25sjOyr1J2AbGKiX3bOx6/kyvft8sT8Sbl8U+26zYmT7z1FPNm/3HjinBakdLeXENlEoHNxABGJ/M3GVeq/LFrnUsroZSw6o235RPJqB5tfR4TqsQ7xnW79StsZRViUb2HPvRvJkwMStXclZNnYoVXtQGh+LPYuM+6NwhWFR9nXrVwKzYhZQATBMRqov/NO2jeQCxm1i39HY/3lOmX4Cyu9Zoxu7Fp+tWqjs9a8L1BIyEKPmJLjnoVN3zizsZApARo8WnWcMdjf5udilITjC0yaBQGMEEgXmiipufapxWJdSZ8La+LvXvumatf35xHrnFO6T+4IPB5MGmJcSj627XY/8J1Af/aoUratQVYd5dJMBnezbmcoPb1FIvhGfGzmCqU3f/3V8qXkbJGOwsAfmpwx2ytugjL7Ho2vv3dSKGLuZcfyZu3D3Q8Pq2ykKERGMD46eVfOhPacnXsURGu/jrJWc58wWZzOgiLRfXW6LCNV+sYfBUDvF2oXhyKS4tfPTba8difbroA8P7dQxyPPy2w5sUpmLwj7KA6oLJx6z9udSbyLjJgSU+8sVFTmE5cInliiX47G/UkLsFN13GM9wzv5HjcPEa+rYhYZmLvMfPLejCA8VPrls4/licMTEoLlzvIdonOhEs92wSEoud+4Qz2Fm4xJ9gzklhC4EE/82ASmzlnMZ9fYa1E3mAkzgpf9Uquqs8VO2Yv2+5fYEr6sNlsODhzAg7OnOB104R4UzXlys6yxwUKAxgd6dE9V85MoT7IYot1BDXTf6dc4fsgDebd6zyvVYtGibVwnlxaaOJIgouSBHEKDKvt9Lu+n3OHlVX/7q/q6axrZMYYpUCntYKq4UZjABMkLk111id5eVWRiSMx3ivCspwvIy41ZifSld2cNQ8+45KDaV640Rm4l5Sf9+tcvx7mXHI7X23NJE3Sh9gHSk1navG957fzvtN1THr5523OJbhQnHlVgwGMDsRCQP6+ySoRig38RBOFwmNmEadK//SxsfUq/CHeMVp154Q/xDyfn81c69e5nryut+OxGUUKQ82y7Csdj+ep3B0ploY4atf/PVPsAzXyxXWKP0/cIm7VnMYWsc6t1E99ttPEkZiPAYwOHhE6df7uv/mGfR1/Mv+DiZhEq8RYYVfDhRprTvsa5R+39nc8/s3szeYNJAiIuRc5P1izXHwwuVzo26X2QvqAsOslc4Z/gakv4faeEE4YwOhATHraccS4NvFi5v+/NFaZDEXv/maw74M0WP/HUY7HavqqSAIRYoq/e5sP+u7iTU6hfBNgdb522Jlp7j3OHWtnLujbqFcvD17l3PIdijOvSjGACSJi5v9MFdsDg5G4Fl1bb84dlNjaQO1OC7NYNfHQH48KBcbK/aw9sf2ZsY7Hczce9OtcBDwkXEjt59Rd7JOE/k+1dfr/3i5/0LnEtXa38hk3Mf/tjXV7dR2TXrKF1/0ti44xEBjA6ORZYUvrP9bsMXEkoWFCX2cejJFtGkKB+Eb9q3fzTByJMcQCY/2fy/HrXAnCTQBL2ftv2lhnLku/51ar+lyxzs+Xu/Rf0utziXOJ6965yhNyxd19/1LZ6ylQxGX2f6xlAEN+uutnnRyPZ+X8aNjXmSrk24RiEz9JXLS6PBjxzjrciG/UW4vLzRuIQfRebrhlcDoA5sGYTZxRfmC+ddthBMNSYzCM0QgMYIJMtnA32vupVSaOxHgb/jQaPdPiXXJR5CTERTvqE+jpi4dHOB5/X3xa13PryVv1zFDQQvj+/F3z/9WQDMfj5Syk5rdLhR0/VvOrwc6ftZrt1Bv+5Jwdet2iM+piiYHCIxUAAOtmFhmDAYyOxKJqarcVKiVOb4a6Dq2bY+XUkS65KIHWq12C4/FNb200bRy+PH9jH8fjnSUVJo7EGDueGed4POnNb/w616COyY7H2Sb0ugo179zp3Fyg9n3vj2Od+U2fGFCg8+8393U8VvP326G1swr0a19aM4ARSwxUG5BDFAzC52oYAGJRNSP3598tLFcdCUDdGbK+GwekOx6/FIKFDsVlpN2lZ0wcCbnr2tY5A6P2fU/Mb3p08TbdxuTJ4dPa3yvDdYnG6hjAGMioap/PCAnDw/0s7kW+iQ07F4V55ctQ8ddJzhmrLdx+riv7eeW7kdz77hgRKIjLSMdVFAGdI2ynnpf3k65j0svLKqqWhyIGMDrb8uQYx+Mrnv8yIF+TdwfGunmQc3bj8U+s2aUWAP40rofvg4LYu8JSxfQl/lVHvnNYR8fjX74Teju3Au3rx505I/2eVbcb6f27nXWc3lq/T7cxSWZOduaKqOkuP7qHs+fQ0/+zZsVb8b0pHDGA0VnbeGeDqzNVxu0SEu8OrLpGG6rq6q0ZMIrT8aFo7GVpjscfbj6k67mt+jMNFumtmrv8/8wF5e99V/V0VtI2YvnTW3dlNc4a+H6ul3D7NWYAY4DXhfLurxsUXIh3B1bNkg8l2552btPur/IO0yyhntf3g5/JyiunOneYXTMrOAoVWplY10UtsXicEb2Rnrm+t++DPNj6f9c4Hvd52pq7PsUdU3uPhVd+GAMYA9zQ/xLH41e/NK4mjCiUa8JYQWIzZ82KM1W1ll22W/qHnzke7zoaeruRDsy4zvH4un985de5eqY5d5jtP1HpeGzNn6z1ZSQ3932QjHn3OndwGtEb6e7hnTV9XqsWMTqPRH/ijqmJfdt7OTL0MIAxyOCOrRyPCw3qj7ROqI8S6jVhrEC8i+s8/XMTRyJvQIdWvg8KYu7LAaV25bU9fPl06xGvX4t8e/P2gZo+z71Y4YmzypNttVDTckMsUWBkkVJ/SDWwzCw5YQYGMAb56HeZjscT//m1IV+js9svK2dhjOV+F2fVvInfjtB2txksxGn9YTPW+HUucUZn6qICv85Fri1A1FrxkLMlxuC/6b8BQvy9UbPsfsdQZ8I328RYCwMYg7jfUby7Qf/segDY9pQzN4OzMMZ77Vf9HY+7/tmaszBPTuiN9X8chaK/XWv2UAzhPq3vT2Ve91mWYEjUtDpx27KaOazL2ie6/F/vxqTi780btw/QfJ47Z3+rx3BIBwxgDCTe3b3wuTHdoxOFjq6AMV1dyWnSgEt8H2QBndq0cGn4Fmp2CL2vev7fSr/OJfbRsmqiZjARq992bK0uL2aJkMPV/S9f6DYmyYEZ12H3X69VnSsivpd/teeEZXPgwo2qAGbGjBkYMmQI4uPjkZKSgkmTJqGoyHXb24ULF5CVlYXWrVujZcuWmDx5MsrKPDdNO3nyJNLT02Gz2VBeXu7ysfXr12PgwIGIjY1Ft27dMHfuXFXfmBW439099KExZctfFN4wBvzVv2695NsjY5zlzzs9scLEkYQvsRGgvxLczvVZAfsj+UvKyVCbRzTQLYfLfk55UTwlbDab6kax0udNu8b5d2/VHLhwoyqAyc3NRVZWFjZt2oScnBzU1NRg7NixqKx0ZvA/8sgjWLZsGRYvXozc3FyUlJTgpptu8ni+KVOmoG/fvhc9f+DAAUyYMAGjR49GQUEBpk6divvuuw+rVgXf3ZHYXPB/24x5Y7xFmLI9c6HW72Z35N3DQkdwAPj9/HyTRhLexHIFj3/sX2G7H54b5/sgCojdf3UuffZ7zjolCx662vXvnjcv5lMVwKxcuRJ33303LrvsMvTr1w9z585FcXEx8vMb38Dtdjtmz56NWbNm4aqrrsKgQYMwZ84cbNy4EZs2bXI519tvv43y8nL88Y9/vOjrvPPOO+jcuTNeeeUV9OrVC9nZ2bj55pvx6quv+vGtWsP9874z5LziurO/U+rkm5hf8kVhKYNGE4jlChZ9519hu+YxUf4Oh3TiPkOyrkh59VyjueeV3fX+ZpNGQoCfOTB2e+P24OTkxu6u+fn5qKmpwZgxznL6PXv2RIcOHZCX5yzX/cMPP+C5557DvHnzEBFx8RDy8vJczgEA48aNczmHu6qqKlRUVLj8M1KDimoR4izM6h88L6f5S1x3Bnh3YLTYqEhc0cnZ1bjn/63E3mNnTRxReOoi7MS7Z84Wv85V8NQ1vg+igBBzTu6Zs8UyOSexUZFYlu3cLZX743EmfptIcwBTX1+PqVOnYvjw4ejTp3GffGlpKWJiYpCUlORybGpqKkpLSwE0Bhq33XYbXnrpJXTo0MH9tI7zpKamujyXmpqKiooKnD/vuUrjjBkzkJiY6PiXkZHh8TizdEtxdmz9z8aDhnwNMVACGMQY7aMHMl3+P2ZWLl/zAFsr1ELyV1Jz191Nn+84qtu5SR333JnO0z+3TNmCy9MT8Yt+ziRgJn6bR3MAk5WVhcLCQixcuFDV502fPh29evXCr3/9a61fWva8drvd8e/QIX17pfhr9dSRjsdP/28nys9VG/J19r9wncv/Z35hzO4nauQeNAIMHANt+vieLv/f4UfhyL3Pj3c8rlDRVZn05/631fXPn6PeIkHMP25z3YbNv3lzaApgsrOzsXz5cqxbtw7p6c5umGlpaaiurr5oR1FZWRnS0hobsa1duxaLFy9GVFQUoqKicPXVVwMA2rRpg6efftpxHvedS2VlZUhISECzZs08jik2NhYJCQku/6zEvS5M/+dysLJQ/zu8iAiby7bQd3L38Y/LYHJBjFXebEPd737eVbdzRUVG4OMHMtG5TQt8/MDPfH8CGcr9b6vLnz+3zHKSuMwFMIgxg6oApqGhAdnZ2Vi6dCnWrl2Lzp1dK34OGjQI0dHRWLPGWR2zqKgIxcXFyMxsnG7/5JNPsG3bNhQUFKCgoADvvfceAOCrr75CVlYWACAzM9PlHACQk5PjOEewcv9jfGD+94Zc5BLiotG7nWsAZ9Qfl0XeS0x3cOYE5P5plMtzV7zgX5VYUs5TEKnV4E7JWPfHURfVWCJzuP9srbKF2Waz4ds/X+3yXKcnVlgmwAoHqgKYrKwszJ8/HwsWLEB8fDxKS0tRWlrqyEtJTEzElClTMG3aNKxbtw75+fm45557kJmZiWHDhgEAunbtij59+jj+SUFQr169kJLS2GH5gQcewP79+/HYY49h9+7deOutt/DRRx/hkUce0fN7N4WnOwojtld//vCIi57r9MQK/FPnUtjOP1X2jenYusVFb2gUOFLtET2DGbIG99kOI1oNaJGaEHdR647O0z9nIBMgqgKYt99+G3a7HaNGjUK7du0c/xYtWuQ45tVXX8XEiRMxefJkjBw5EmlpaViyZImqQXXu3BkrVqxATk4O+vXrh1deeQXvvfcexo0LjVoN7m+wD324FZ2eWIEyHRvTSV9nn1tOzCs5P6KbjiXw+TfqKjUhjhdQIp3ZbDZ89dhos4fh0ZMTents2yEFMp2eWIHvDp4yYWShT/USkqd/d999t+OYuLg4vPnmmzh16hQqKyuxZMkSR/6LJ6NGjUJDQ8NFO5dGjRqFrVu3oqqqCvv27XP5GqHA00WutKIxgImK1K/DQ2SEzSUxEQBq6xvQ6YkVeHlVkcxnqcfGva4YxBDpKyO5Of46qY/vA00QGxXp9W/+5nfyHMHM/31aGMCRhTb2QjLRwZkTLspVAQC7zrsfoiIjcHDmBAztnOzy/Bvr9jr+qDo9sQJH7Z63qHsj1cNh/HIxLmkQ6evOYR1dKjBbzcGZEy5a7nL3300/odMTK/DRFmvtlA1GLD9pMilX5b7/fIcvdzXuvBrTK8WQr7Xod5moq2+Q7aKcOWMtgMb1ZqU9TKQlJM7AEFEg3ND/Ekzs2x4/lp1BK7faPVZgs9lcblrkNlA89sl2PPbJdt7g+IEBjEW8d9fggHydyIjGP64hz3+J42eqPB7TefrnaBETiT9P6IVbh3RAZIR8dCKlwNg4B0NEARIZYUMvD7PXViQFKNsPl+NsVS1u//e3Lh8XA5zZdw3G6B4piIiwobquPqDjDEYMYMLUliddWzV8uvUIpi4qcPy/sroOTy4txJNLla3XcgaGiEhe3/QkAI0BzeHT53Dl39dddMyU/1zcK+9UpTFFT0MBc2AIADBpwCV+TWXOy/tJx9EQEYWu9FbN8f7dymbd3//mgMGjCV6cgSEXB2dOQG1dPSb+82v8dPIczrPLMhGR7q7qmepy07i+6Bju9tCQdJ2O/b5CDQMYukhUZARWCr2bPKmvb8AXhaWora/H1uJyPH197wCNjogo9IzqkcKEXpUYwJAmERE2TOjbDkDjrgAiIqJAYg4MERERBR0GMERERBR0GMAQERFR0GEAQ0REREGHAQwREREFHQYwREREFHQYwBAREVHQYQBDREREQYcBDBEREQUdBjBEREQUdBjAEBERUdBhAENERERBhwEMERERBR0GMERERBR0GMAQERFR0GEAQ0REREGHAQwREREFHQYwREREFHQYwBAREVHQYQBDREREQYcBDBEREQUdBjBEREQUdBjAaNTQYPYIiIiIwhcDGI3OVdcBAJpHR5o8EiIiovDDAEaDhoYGHDhRCQCIieJLSEREFGi8+mrw7LIfHI+7tGlp4kiIiIjCEwMYDeZuPOh4nNg82ryBEBERhSkGMBp0btMCABDL5SMiIiJT8AqsQf+MJADAo2O7mzsQIiKiMMUARgOb2QMgIiIKcwxg/GBjKENERGQKBjBEREQUdBjAaMAivEREROZiAOMHG1eQiIiITMEARoMGNkIiIiIyFQMYIiIiCjoMYDTg/AsREZG5GMD4wcYkGCIiIlMwgCEiIqKgwwBGA+bwEhERmYsBjB+4gERERGQOBjAacAKGiIjIXAxg/MAcXiIiInMwgCEiIqKgoyqAmTFjBoYMGYL4+HikpKRg0qRJKCoqcjnmwoULyMrKQuvWrdGyZUtMnjwZZWVljo9v27YNt912GzIyMtCsWTP06tULr7/++kVfa/369Rg4cCBiY2PRrVs3zJ07V9t3aABW4iUiIjKXqgAmNzcXWVlZ2LRpE3JyclBTU4OxY8eisrLSccwjjzyCZcuWYfHixcjNzUVJSQluuukmx8fz8/ORkpKC+fPnY+fOnXjyyScxffp0vPHGG45jDhw4gAkTJmD06NEoKCjA1KlTcd9992HVqlU6fMv64QoSERGROWwNfkwnHD9+HCkpKcjNzcXIkSNht9vRtm1bLFiwADfffDMAYPfu3ejVqxfy8vIwbNgwj+fJysrCrl27sHbtWgDA448/jhUrVqCwsNBxzK233ory8nKsXLlS0dgqKiqQmJgIu92OhIQErd+i5/Eu+B4rth/FM9f3xt3DO+t6biIionCm9PrtVw6M3W4HACQnJwNonF2pqanBmDFjHMf07NkTHTp0QF5entfzSOcAgLy8PJdzAMC4ceO8nqOqqgoVFRUu/4zGSrxERETm0BzA1NfXY+rUqRg+fDj69OkDACgtLUVMTAySkpJcjk1NTUVpaanH82zcuBGLFi3C/fff73iutLQUqampF52joqIC58+f93ieGTNmIDEx0fEvIyND67dGREREFqc5gMnKykJhYSEWLlyo+YsXFhbihhtuwNNPP42xY8dqPg8ATJ8+HXa73fHv0KFDfp3PK+bwEhERmSpKyydlZ2dj+fLl2LBhA9LT0x3Pp6Wlobq6GuXl5S6zMGVlZUhLS3M5xw8//ICrr74a999/P/7yl7+4fCwtLc1l55J0joSEBDRr1szjmGJjYxEbG6vl29GMK0hERETmUDUD09DQgOzsbCxduhRr165F586uCayDBg1CdHQ01qxZ43iuqKgIxcXFyMzMdDy3c+dOjB49GnfddReef/75i75OZmamyzkAICcnx+UcZmrgFAwREZGpVM3AZGVlYcGCBfjss88QHx/vyGtJTExEs2bNkJiYiClTpmDatGlITk5GQkICHnzwQWRmZjp2IBUWFuKqq67CuHHjMG3aNMc5IiMj0bZtWwDAAw88gDfeeAOPPfYY7r33XqxduxYfffQRVqxYoef37jdOwBAREZlD1QzM22+/DbvdjlGjRqFdu3aOf4sWLXIc8+qrr2LixImYPHkyRo4cibS0NCxZssTx8Y8//hjHjx/H/PnzXc4xZMgQxzGdO3fGihUrkJOTg379+uGVV17Be++9h3HjxunwLRMREVGw86sOjJUZWQfm9/Pz8UVhKf56w2W4M7OTrucmIiIKZwGpAxP2mMVLRERkCgYwGoTmnBUREVHwYADjB86/EBERmYMBjAbcRk1ERGQuBjBEREQUdBjA+IE5vEREROZgAKMBk3iJiIjMxQDGDzam8RIREZmCAYwGnIAhIiIyFwMYPzAHhoiIyBwMYIiIiCjoMIDRgEm8RERE5mIA4weuIBEREZmDAYwmnIIhIiIyEwMYPzCJl4iIyBwMYIiIiCjoMIDRgEm8RERE5mIA4wdW4iUiIjIHAxgNOAFDRERkLgYw/uAEDBERkSkYwBAREVHQYQCjQQOzeImIiEzFAMYPXEEiIiIyBwMYDTj/QkREZC4GMH6wsRQvERGRKRjAEBERUdBhAKMBc3iJiIjMxQDGD1xAIiIiMgcDGA04AUNERGQuBjB+YA4vERGRORjAaMBCdkREROZiAOMHzsAQERGZgwEMERERBR0GMERERBR0GMD4wcaN1ERERKZgAKMBc3iJiIjMxQDGD0ziJSIiMgcDGCIiIgo6DGA0aGAtXiIiIlMxgCEiIqKgwwBGAybxEhERmYsBjB9szOIlIiIyBQMYIiIiCjoMYDTgEhIREZG5GMD4gQtIRERE5mAAo0F90xQMU2CIiIjMwQBGg/JzNQCAxGbRJo+EiIgoPDGA0eBkZRUAoE3LWJNHQkREFJ4YwKhUV9+AU5XVAIDWLWNMHg0REVF4YgCj0ulz1ahv2oWU3JwBDBERkRkYwKh08mzj7Eur5tGIiuTLR0REZAZegVU6ebYx/6U181+IiIhMoyqAmTFjBoYMGYL4+HikpKRg0qRJKCoqcjnmwoULyMrKQuvWrdGyZUtMnjwZZWVlLscUFxdjwoQJaN68OVJSUvCnP/0JtbW1LsesX78eAwcORGxsLLp164a5c+dq+w51dqIp/6UN81+IiIhMoyqAyc3NRVZWFjZt2oScnBzU1NRg7NixqKysdBzzyCOPYNmyZVi8eDFyc3NRUlKCm266yfHxuro6TJgwAdXV1di4cSP+85//YO7cuXjqqaccxxw4cAATJkzA6NGjUVBQgKlTp+K+++7DqlWrdPiW/bNp/0kAwMET50weCRERUfiyNTRoL4x//PhxpKSkIDc3FyNHjoTdbkfbtm2xYMEC3HzzzQCA3bt3o1evXsjLy8OwYcPwxRdfYOLEiSgpKUFqaioA4J133sHjjz+O48ePIyYmBo8//jhWrFiBwsJCx9e69dZbUV5ejpUrVyoaW0VFBRITE2G325GQkKD1W7xIpydWOB4fnDlBt/MSERGR8uu3XzkwdrsdAJCcnAwAyM/PR01NDcaMGeM4pmfPnujQoQPy8vIAAHl5ebj88ssdwQsAjBs3DhUVFdi5c6fjGPEc0jHSOTypqqpCRUWFyz8iIiIKTZoDmPr6ekydOhXDhw9Hnz59AAClpaWIiYlBUlKSy7GpqakoLS11HCMGL9LHpY95O6aiogLnz5/3OJ4ZM2YgMTHR8S8jI0Prt6YIq/ASERGZR3MAk5WVhcLCQixcuFDP8Wg2ffp02O12x79Dhw4Z8nU+/O0w3DI4HR8/kGnI+YmIiMi3KC2flJ2djeXLl2PDhg1IT093PJ+Wlobq6mqUl5e7zMKUlZUhLS3NcczmzZtdziftUhKPcd+5VFZWhoSEBDRr1szjmGJjYxEba/zW5syurZHZtbXhX4eIiIjkqZqBaWhoQHZ2NpYuXYq1a9eic+fOLh8fNGgQoqOjsWbNGsdzRUVFKC4uRmZm44xFZmYmduzYgWPHjjmOycnJQUJCAnr37u04RjyHdIx0DiIiIgpvqnYh/eEPf8CCBQvw2WefoUePHo7nExMTHTMjv//97/H5559j7ty5SEhIwIMPPggA2LhxI4DGbdT9+/dH+/bt8eKLL6K0tBR33nkn7rvvPrzwwgsAGrdR9+nTB1lZWbj33nuxdu1aPPTQQ1ixYgXGjRunaKxG7UIiIiIi4yi+fjeoAMDjvzlz5jiOOX/+fMMf/vCHhlatWjU0b9684cYbb2w4evSoy3kOHjzYMH78+IZmzZo1tGnTpuHRRx9tqKmpcTlm3bp1Df3792+IiYlp6NKli8vXUMJutzcAaLDb7ao+j4iIiMyj9PrtVx0YK+MMDBERUfAJSB0YIiIiIjMwgCEiIqKgwwCGiIiIgg4DGCIiIgo6DGCIiIgo6DCAISIioqDDAIaIiIiCDgMYIiIiCjoMYIiIiCjoaOpGHQykAsMVFRUmj4SIiIiUkq7bvhoFhGwAc+bMGQBARkaGySMhIiIitc6cOYPExETZj4dsL6T6+nqUlJQgPj4eNptNt/NWVFQgIyMDhw4dYo8lg/G1Dgy+zoHB1zkw+DoHhpGvc0NDA86cOYP27dsjIkI+0yVkZ2AiIiKQnp5u2PkTEhL4xxEgfK0Dg69zYPB1Dgy+zoFh1OvsbeZFwiReIiIiCjoMYIiIiCjoMIBRKTY2Fk8//TRiY2PNHkrI42sdGHydA4Ovc2DwdQ4MK7zOIZvES0RERKGLMzBEREQUdBjAEBERUdBhAENERERBhwEMERERBR0GMCq9+eab6NSpE+Li4jB06FBs3rzZ7CGFnA0bNuD6669H+/btYbPZ8Omnn5o9pJAzY8YMDBkyBPHx8UhJScGkSZNQVFRk9rBC0ttvv42+ffs6Cn5lZmbiiy++MHtYIW3mzJmw2WyYOnWq2UMJOc888wxsNpvLv549e5oyFgYwKixatAjTpk3D008/je+//x79+vXDuHHjcOzYMbOHFlIqKyvRr18/vPnmm2YPJWTl5uYiKysLmzZtQk5ODmpqajB27FhUVlaaPbSQk56ejpkzZyI/Px/fffcdrrrqKtxwww3YuXOn2UMLSVu2bMG//vUv9O3b1+yhhKzLLrsMR48edfz7+uuvTRkHt1GrMHToUAwZMgRvvPEGgMZ+SxkZGXjwwQfxxBNPmDy60GSz2bB06VJMmjTJ7KGEtOPHjyMlJQW5ubkYOXKk2cMJecnJyXjppZcwZcoUs4cSUs6ePYuBAwfirbfewt/+9jf0798fr732mtnDCinPPPMMPv30UxQUFJg9FM7AKFVdXY38/HyMGTPG8VxERATGjBmDvLw8E0dG5D+73Q6g8cJKxqmrq8PChQtRWVmJzMxMs4cTcrKysjBhwgSX92nS3549e9C+fXt06dIFd9xxB4qLi00ZR8g2c9TbiRMnUFdXh9TUVJfnU1NTsXv3bpNGReS/+vp6TJ06FcOHD0efPn3MHk5I2rFjBzIzM3HhwgW0bNkSS5cuRe/evc0eVkhZuHAhvv/+e2zZssXsoYS0oUOHYu7cuejRoweOHj2KZ599FiNGjEBhYSHi4+MDOhYGMERhLisrC4WFhaatY4eDHj16oKCgAHa7HR9//DHuuusu5ObmMojRyaFDh/Dwww8jJycHcXFxZg8npI0fP97xuG/fvhg6dCg6duyIjz76KOBLogxgFGrTpg0iIyNRVlbm8nxZWRnS0tJMGhWRf7Kzs7F8+XJs2LAB6enpZg8nZMXExKBbt24AgEGDBmHLli14/fXX8a9//cvkkYWG/Px8HDt2DAMHDnQ8V1dXhw0bNuCNN95AVVUVIiMjTRxh6EpKSkL37t2xd+/egH9t5sAoFBMTg0GDBmHNmjWO5+rr67FmzRquZVPQaWhoQHZ2NpYuXYq1a9eic+fOZg8prNTX16OqqsrsYYSMq6++Gjt27EBBQYHj3+DBg3HHHXegoKCAwYuBzp49i3379qFdu3YB/9qcgVFh2rRpuOuuuzB48GBcccUVeO2111BZWYl77rnH7KGFlLNnz7pE8wcOHEBBQQGSk5PRoUMHE0cWOrKysrBgwQJ89tlniI+PR2lpKQAgMTERzZo1M3l0oWX69OkYP348OnTogDNnzmDBggVYv349Vq1aZfbQQkZ8fPxF+VstWrRA69atmdelsz/+8Y+4/vrr0bFjR5SUlODpp59GZGQkbrvttoCPhQGMCr/61a9w/PhxPPXUUygtLUX//v2xcuXKixJ7yT/fffcdRo8e7fj/tGnTAAB33XUX5s6da9KoQsvbb78NABg1apTL83PmzMHdd98d+AGFsGPHjuE3v/kNjh49isTERPTt2xerVq3CNddcY/bQiFQ7fPgwbrvtNpw8eRJt27bFlVdeiU2bNqFt27YBHwvrwBAREVHQYQ4MERERBR0GMERERBR0GMAQERFR0GEAQ0REREGHAQwREREFHQYwREREFHQYwBAREVHQYQBDREREim3YsAHXX3892rdvD5vNhk8//VT1ORoaGvDyyy+je/fuiI2NxSWXXILnn39e1TlYiZeIiIgUq6ysRL9+/XDvvffipptu0nSOhx9+GKtXr8bLL7+Myy+/HKdOncKpU6dUnYOVeImIiEgTm82GpUuXYtKkSY7nqqqq8OSTT+LDDz9EeXk5+vTpg7///e+O1iW7du1C3759UVhYiB49emj+2lxCIiIiIt1kZ2cjLy8PCxcuxPbt2/HLX/4S1157Lfbs2QMAWLZsGbp06YLly5ejc+fO6NSpE+677z7VMzAMYIiIiEgXxcXFmDNnDhYvXowRI0aga9eu+OMf/4grr7wSc+bMAQDs378fP/30ExYvXox58+Zh7ty5yM/Px80336zqazEHhoiIiHSxY8cO1NXVoXv37i7PV1VVoXXr1gCA+vp6VFVVYd68eY7jZs+ejUGDBqGoqEjxshIDGCIiItLF2bNnERkZifz8fERGRrp8rGXLlgCAdu3aISoqyiXI6dWrF4DGGRwGMERERBRQAwYMQF1dHY4dO4YRI0Z4PGb48OGora3Fvn370LVrVwDAjz/+CADo2LGj4q/FXUhERESk2NmzZ7F3714AjQHLrFmzMHr0aCQnJ6NDhw749a9/jW+++QavvPIKBgwYgOPHj2PNmjXo27cvJkyYgPr6egwZMgQtW7bEa6+9hvr6emRlZSEhIQGrV69WPA4GMERERKTY+vXrMXr06Iuev+uuuzB37lzU1NTgb3/7G+bNm4cjR46gTZs2GDZsGJ599llcfvnlAICSkhI8+OCDWL16NVq0aIHx48fjlVdeQXJysuJxMIAhIiKioMNt1ERERBR0GMAQERFR0GEAQ0REREGHAQwREREFHQYwREREFHQYwBAREVHQYQBDREREQYcBDBEREQUdBjBEREQUdBjAEBERUdBhAENERERBhwEMERERBZ3/B1bXq0p/cA7rAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "N = 5_000_000\n", + "seq = generate_seq(N, 50, 40)\n", + "# Approach 2 - calculate a decaying average\n", + "D = 0.99999\n", + "y = []\n", + "avg = 2000\n", + "for i in range(N):\n", + " avg = avg * D + seq[i] * (1 - D) \n", + " y.append(avg)\n", + "\n", + "x = np.arange(0, N)\n", + "plt.figure()\n", + "plt.plot(x, y)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Approach 1: \n", + "Clearly fails in this case and is highly influenced by the unusual clusters. \n", + "\n", + "#### Approach 2:\n", + "Learns the long term average and clusters only cause minor spikes." + ] + }, + { + "cell_type": "code", + "execution_count": 95, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAj4AAAGvCAYAAABb4N/XAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAA7CElEQVR4nO3de1yUdd7/8fcAAiIgKoqiIp4PqVgi5pprItWS6y/XrW3XuiPb6m5vbE23Wm0r213TujusbXFrmWm56+rWpm15SvFAtZqKYpqpqeRZ8AQCKoeZ+f0BMzDODCcdxrl4PR8PHzIX18x8R796vfl8D5fJarVaBQAA0Aj4ebsBAAAADYXgAwAAGg2CDwAAaDQIPgAAoNEg+AAAgEaD4AMAABoNgg8AAGg0CD4AAKDRCPB2A643FotFJ06cUFhYmEwmk7ebAwAAasFqtaqgoEDR0dHy83Nf1yH4XOHEiRPq2LGjt5sBAADq4ejRo+rQoYPb7xN8rhAWFiap/A8uPDzcy60BAAC1ceHCBXXs2NF+HXeH4HMF2/BWeHg4wQcAAB9T0zQVJjcDAIBGg+ADAAAaDYIPAABoNAg+AACg0SD4AACARoPgAwAAGg2CDwAAaDQIPgAAoNEwbPDJy8tTfHy8BgwYoL59+2ru3LnebhIAAPAyw+7cHBYWpoyMDIWEhKioqEh9+/bV2LFj1apVK283DQAAeIlhKz7+/v4KCQmRJBUXF8tqtcpqtXq5VQAAwJuuKvi89NJLMplMeuKJJ65Rc8plZGRo9OjRio6Olslk0rJly1yel5aWptjYWAUHB2vw4MHasmWLw/fz8vIUFxenDh066KmnnlJkZOQ1bScAAPAt9R7q2rp1q95++23179+/2vO++uorJSQkqEmTJg7H9+zZo1atWikqKsrpOUVFRYqLi9NDDz2ksWPHunzdJUuWaPLkyZozZ44GDx6sWbNm6Y477tC+ffvUpk0bSVJERIR27typnJwcjR07VnfffbfL9wMAXL9Kyiw6W1SsMwUlOl14WRculamopEyXSswqKjbrYmmZikstMlusKrNYZbZYKn6veGwu/71c+e+2AQD7Uau1ytfO36uKwYOr996DgxQY4J1Bp3oFn8LCQt13332aO3eupk+f7vY8i8Wi1NRUde/eXYsXL5a/v78kad++fUpMTNTkyZP19NNPOz0vOTlZycnJ1bbh9ddf1yOPPKLx48dLkubMmaPly5frvffe05QpUxzOjYqKUlxcnL744gvdfffdLl8vLS1NaWlpMpvN1b4vAMBzci5c1tfZ55R1JE/f5xbo0OkiHc+75O1m4RqrjJkNr17BJzU1VaNGjVJSUlK1wcfPz08rVqzQj3/8Yz3wwANauHChsrOzlZiYqDFjxrgMPbVRUlKizMxMTZ061eG9kpKStGnTJklSTk6OQkJCFBYWpvz8fGVkZOg3v/lNtZ8pNTVVFy5cUPPmzevVLgBA3Z3Iu6SlO47r050ntPdUgctzAvxMahUaqMjQIEWENFFIYICaBfqracXvgQF+CvD3UxM/k/z9TQrwM8nfz6/i9/JfporXMlV8YT/i+JtMthMcjrl+bpVTUQcBft6bYlzn4LN48WJt375dW7durdX50dHRWrdunYYNG6Zx48Zp06ZNSkpK0uzZs+vcWJszZ87IbDY7DVtFRUVp7969kqTDhw/r0UcftU9qfvzxx9WvX796vycA4No6eu6iZq39Xp9kHbcPRZlMUp924RoU21K92oapW5tQxUY2U8uQQPn5kTJw9eoUfI4ePaqJEydqzZo1Cg4OrvXzYmJitHDhQg0fPlxdunTRvHnzHBK1JyQkJCgrK8uj7wEAqDuzxap3Mg5p1tr9Ki6zSJISOrfUz29qrztuaKuIkEAvtxBGVqfgk5mZqdzcXN100032Y2azWRkZGXrrrbdUXFxsn8dTVU5Ojh599FGNHj1aW7du1aRJk/Tmm2/Wu9GRkZHy9/dXTk6O0/u0bdu23q8LAPCs/EulemLxDq3fd1qSNKRLK/0+uZcGdIzwbsPQaNQp+IwcOVK7du1yODZ+/Hj16tVLv//9712GnjNnzmjkyJHq3bu3PvzwQ+3fv1+33nqrgoKC9Oqrr9ar0YGBgRo4cKDS09M1ZswYSeUTqdPT0zVhwoR6vSYAwLPyL5Zq3Lub9e2JCwoK8NOf7+qre+I7eHwEAKiqTsEnLCxMffv2dTjWrFkztWrVyum4VB5GkpOT1alTJy1ZskQBAQHq06eP1qxZo8TERLVv316TJk1yel5hYaEOHDhgf5ydna2srCy1bNlSMTExkqTJkycrJSVF8fHxSkhI0KxZs1RUVGRf5QUAuH5cLjXrgflb9O2JC2rVLFALxieoXwcWkqDhefSWFX5+fpoxY4aGDRumwMDKMdu4uDitXbtWrVu3dvm8bdu2acSIEfbHkydPliSlpKRowYIFkqR7771Xp0+f1vPPP69Tp05pwIABWrVqFfv0AMB1xmq16rllu7XzaJ4iQpro748MVq+24d5uFhopk5X7ODiwLWfPz89XeDj/MAHgan2SdVwTF2fJzyQt/PVgDe3GLvq49mp7/TbsvboAAN6Xd7FEf/p0jyTptyO7E3rgdQQfAIDHvPr5Pp0tKlGPqFD9z63dvN0cgOADAPCMo+cuasnWo5KkP/6/vl67NxNQFb0QAOARaesPqNRs1dBurTSkaytvNweQRPABAHjA2cJifbz9uCTpiaQeXm4NUIngAwC45j7MPKYSs0X92jfXoNiW3m4OYEfwAQBcUxaLVYu+PiJJ+q+bO3m5NYAjgg8A4JrKPHJeR85dVFhQgEbHRXu7OYADgg8A4Jpa/s1JSdJtN0SpaaDzPRwBbyL4AACuGYvFqhW7yoPPqH7tvNwawBnBBwBwzWw/cl65BcUKCw7QLd3ZpRnXH4IPAOCa2bj/tCTp1p5tFBTAMBeuPwQfAMA1k/H9GUnSMKo9uE4RfAAA10TexRLtOpYnieCD6xfBBwBwTfzn4FlZrFL3NqFq17ypt5sDuETwAQBcE1uyz0mShnaj2oPrF8EHAHBNbD9yXpJ0U6cWXm4J4B7BBwBw1S6XmrXnxAVJ0k0xEd5tDFANgg8A4KrtOp6vMotVrcOC1D6C+T24fhF8AABXbfvhimGumAiZTCYvtwZwj+ADALhqOyuWsd8Yw/weXN8IPgCAq7b3ZIEk6YbocC+3BKgewQcAcFUulZiVfbZIktSrLcEH1zeCDwDgquzPKZDVKkWGBqp1WJC3mwNUi+ADALgqe0+VL2On2gNfQPABAFyV7yrm9/RqG+bllgA1I/gAAK6KveLTjooPrn8EHwDAVTl4unxic/c2oV5uCVAzgg8AoN4Ki8t0uqBYktS5dTMvtwaoGcEHAFBvP5wpr/ZEhgYqPLiJl1sD1IzgAwCot+yK4BPbimoPfAPBBwBQb7bg0zmS4APfQPABANSbbaiL+T3wFQQfAEC9HbIFH4a64CMIPgCAevuh4h5dsQx1wUcQfAAA9VJYXKa8i6WSpJiWIV5uDVA7BB8AQL2cyLskSYoIaaJmQQFebg1QOwQfAEC9HD9fHnyimzf1ckuA2iP4AADq5XhFxSc6guAD30HwAQDUiy34dGhB8IHvIPgAAOrlhL3iE+zllgC1R/ABANSLbY5P+whWdMF3EHwAAPVCxQe+iOADAKizUrNFpy5cliS1Z3IzfAjBBwBQZ7kFxbJYpSb+JkWGBnm7OUCtEXwAAHWWW1HtaR0aJD8/k5dbA9QewQcAUGenC4olSa3Dmd8D30LwAQDUWW5F8GkTxjAXfAvBBwBQZ7bg05rgAx9D8AEA1NlpKj7wUQQfAECdnS6omNxM8IGPIfgAAOqsco4Pk5vhWwg+AIA6O80cH/gogg8AoE4sFitzfOCzCD4AgDo5f7FEZRarJLFrM3wOwQcAUCenC8urPS1CmigwgMsIfAs9FgBQJ2cLSyRR7YFvIvgAAOrkXFF58GnZLNDLLQHqjuADAKiT8xfLg0+LEIIPfA/BBwBQJ7aKTwsqPvBBBB8AQJ2ctw91NfFyS4C6I/gAAOrk3MVSSQx1wTcRfAAAdXKeyc3wYQQfAECd2Cc3E3zggwg+AIA6sVd8GOqCDyL4AADq5NxFhrrguwg+AIBau1Ri1uVSiySGuuCbCD4AgFqzVXsC/f3ULNDfy60B6o7gAwCotfP2zQubyGQyebk1QN0RfAAAtWbftZmJzfBRBB8AQK3ZlrJHhLBrM3wTwQcAUGsXLpdJkpo3JfjANxF8AAC1duFS+e0qCD7wVQQfAECtXbhcHnzCgwk+8E0EHwBArV24VD7UFU7FBz6K4AMAqLXKik+Al1sC1A/BBwBQa7Y5PlR84KsIPgCAWrOt6mKOD3wVwQcAUGsFVHzg4wg+AIBas8/xacocH/gmgg8AoFasVmvlqi6GuuCjCD4AgFopLrOoxGyRxFAXfBfBBwBQK/kV83v8/UxqFujv5dYA9UPwAQDUin0pe3CATCaTl1sD1A/BBwBQK5UTmxnmgu8i+AAAaoWJzTACgg8AoFZYyg4jIPgAAGrFNscnLIiKD3wXwQcAUCsFxeVDXaHcoBQ+jOADAKiVIlvwCSL4wHcRfAAAtVJUbJYkNQtiDx/4LoIPAKBWbBWfZlR84MMIPgCAWikqYagLvo/gAwColULbUFcgwQe+i+ADAKgVhrpgBAQfAECtsKoLRkDwAQDUSqG94sOqLvgugg8AoFao+MAICD4AgFqx7eMTQvCBDyP4AABqVFJmUYnZIkkKZVUXfBjBBwBQo4sVe/hIzPGBbyP4AABqZJvYHBTgpwB/Lh3wXfReAECNbPN7mNgMX0fwAQDUqJDNC2EQBB8AQI3YtRlGQfABANSocg8fJjbDtxF8AAA1YqgLRkHwAQDUyD7UxR4+8HEEHwBAjYpKyld1sYcPfB3BBwBQI9sGhiFUfODjCD4AgBpdKim/XUVIIBUf+DaCDwCgRpdKy4e6mjYh+MC3EXwAADW6VDHU1ZSKD3wcwQcAUCN7xYfgAx9H8AEA1OhSafkcH4a64OsIPgCAGtmHugg+8HEEHwBAjWxDXcEMdcHHEXwAADW6VLGBYQgVH/g4gg8AoEa24MPkZvg6gg8AoEbs4wOjIPgAAGrEcnYYBcEHAFAti8Wqyyxnh0EQfAAA1bpcZrZ/TcUHvo7gAwColm1isyQFBxB84NsIPgCAatn38GniJz8/k5dbA1wdwwafvLw8xcfHa8CAAerbt6/mzp3r7SYBgE+yL2Vnfg8MIMDbDfCUsLAwZWRkKCQkREVFRerbt6/Gjh2rVq1aebtpAOBTWMoOIzFsxcff318hISGSpOLiYlmtVlmtVi+3CgB8D5sXwkjqHHxmz56t/v37Kzw8XOHh4RoyZIhWrlx5TRuVkZGh0aNHKzo6WiaTScuWLXN5XlpammJjYxUcHKzBgwdry5YtDt/Py8tTXFycOnTooKeeekqRkZHXtJ0A0BhcZA8fGEidg0+HDh300ksvKTMzU9u2bVNiYqLuuusuffvtty7P/+qrr1RaWup0fM+ePcrJyXH5nKKiIsXFxSktLc1tO5YsWaLJkydr2rRp2r59u+Li4nTHHXcoNzfXfk5ERIR27typ7OxsLVq0yO37AQDcu8wcHxhInYPP6NGjdeedd6p79+7q0aOHXnzxRYWGhmrz5s1O51osFqWmpmrcuHEymyuXQ+7bt0+JiYl6//33Xb5HcnKypk+frp/97Gdu2/H666/rkUce0fjx49WnTx/NmTNHISEheu+995zOjYqKUlxcnL744ou6flwAaPQqV3URfOD7rmqOj9ls1uLFi1VUVKQhQ4Y4v7ifn1asWKEdO3bogQcekMVi0cGDB5WYmKgxY8bo6aefrtf7lpSUKDMzU0lJSQ7vlZSUpE2bNkmScnJyVFBQIEnKz89XRkaGevbs6fY109LS1KdPHw0aNKhebQIAo7pouzM7Q10wgHqt6tq1a5eGDBmiy5cvKzQ0VEuXLlWfPn1cnhsdHa1169Zp2LBhGjdunDZt2qSkpCTNnj273o0+c+aMzGazoqKiHI5HRUVp7969kqTDhw/r0UcftU9qfvzxx9WvXz+3r5mamqrU1FRduHBBzZs3r3fbAMBoLrOqCwZSr+DTs2dPZWVlKT8/Xx999JFSUlK0ceNGt+EnJiZGCxcu1PDhw9WlSxfNmzdPJpNnN8FKSEhQVlaWR98DABoDVnXBSOo11BUYGKhu3bpp4MCBmjlzpuLi4vTGG2+4PT8nJ0ePPvqoRo8erYsXL2rSpEn1brAkRUZGyt/f32myck5Ojtq2bXtVrw0AcGRf1dXEsFu/oRG5Jvv4WCwWFRcXu/zemTNnNHLkSPXu3Vsff/yx0tPTtWTJEj355JP1fr/AwEANHDhQ6enpDm1IT093OdcIAFB/tqGuoCaG3foNjUid4/vUqVOVnJysmJgYFRQUaNGiRdqwYYNWr17tdK7FYlFycrI6deqkJUuWKCAgQH369NGaNWuUmJio9u3bu6z+FBYW6sCBA/bH2dnZysrKUsuWLRUTEyNJmjx5slJSUhQfH6+EhATNmjVLRUVFGj9+fF0/EgCgGsVlFkncoBTGUOfgk5ubqwceeEAnT55U8+bN1b9/f61evVq33Xab07l+fn6aMWOGhg0bpsDAQPvxuLg4rV27Vq1bt3b5Htu2bdOIESPsjydPnixJSklJ0YIFCyRJ9957r06fPq3nn39ep06d0oABA7Rq1SqnCc8AgKtTXFoefKj4wAhMVu7j4MC2qis/P1/h4eHebg4AeN2ERdv12TcnNW10H40f2tnbzQFcqu31m/gOAKjW5YqKDxsYwggIPgCAahWXVUxuDuCSAd9HLwYAVMs2uTmIyc0wAIIPAKBalcGHSwZ8H70YAFCtYvbxgYHQiwEA1SphqAsGQvABAFTLtnNzMBUfGAC9GABQLSY3w0gIPgCAajG5GUZCLwYAVMu+jw9DXTAAejEAwC2zxapSc/mdjRjqghEQfAAAbtmqPRJDXTAGejEAwC3bndklgg+MgV4MAHDLNrE5wM+kAH8uGfB99GIAgFvcoBRGQ08GALhlX8rehInNMAaCDwDALdscHyo+MAp6MgDArcsMdcFg6MkAALcqKz4MdcEYCD4AALdsk5u5QSmMgp4MAHCLG5TCaAg+AAC3uE8XjIaeDABw6zKrumAw9GQAgFvFpbZVXQx1wRgIPgAAtyrn+HC5gDHQkwEAbrFzM4yG4AMAcIt7dcFo6MkAALe4ZQWMhp4MAHCLOT4wGnoyAMCtUnN58Gniz+UCxkBPBgC4VVIRfAKp+MAg6MkAALdKyqj4wFjoyQAAt0qp+MBg6MkAALdsFZ9AKj4wCHoyAMCtUrNVktQkwOTllgDXBsEHAOBWZcWHnZthDAQfAIBbJfbl7FR8YAwEHwCAW0xuhtHQkwEAbjG5GUZDTwYAuEXFB0ZDTwYAuMUGhjAaejIAwK2SiuXsVHxgFPRkAIBbJWVmSVR8YBz0ZACAW7YNDIOo+MAg6MkAALdKzczxgbHQkwEALlksVpVZKm5ZwQaGMAiCDwDAJduuzRKTm2Ec9GQAgEtVgw9DXTAKejIAwKXSsioVH4IPDIKeDABwyVbxCfAzyc+POT4wBoIPAMCl0jI2L4Tx0JsBAC6VsJQdBkRvBgC4ZL8zOxUfGAi9GQDgkv3O7FR8YCD0ZgCAS7ahLio+MBJ6MwDAJdtydnZthpEQfAAALhVT8YEB0ZsBAC5VVny4VMA46M0AAJdKzbYblHKpgHHQmwEALpWYzZKkIIa6YCD0ZgCAS7adm6n4wEjozQAAl4rZxwcGRG8GALhkn9zMUBcMhN4MAHCphIoPDIjeDABwqdR+ry42MIRxEHwAAC5xry4YEb0ZAOCSbXJzAMEHBkJvBgC4VMYGhjAgejMAwKUyMzcphfEQfAAALpVayis+AX5cKmAc9GYAgEtl9jk+VHxgHAQfAIBLlXN8CD4wDoIPAMAl2waGDHXBSOjNAACXqPjAiAg+AACXyizs4wPjoTcDAFwqNdtWdVHxgXEQfAAALtkqPmxgCCOhNwMAXLJXfJjjAwMh+AAAXCpjVRcMiN4MAHCpzMKqLhgPwQcA4FLlUBeXChgHvRkA4JL9JqWs6oKBEHwAAC7Zhrqo+MBI6M0AAJdKuUkpDIjgAwBwyX7LClZ1wUDozQAAlypvWUHFB8ZB8AEAuFTKTUphQAQfAIBLbGAII6I3AwBcKrVwywoYD8EHAOCSfR8flrPDQOjNAAAnFotVFQUfBbCBIQyE4AMAcFJasaJLYgNDGAu9GQDgxLaHj8SqLhgLwQcA4KRq8GFVF4yE3gwAcFJ1qIuKD4yE4AMAcGKr+Pj7mWQyEXxgHAQfAICTUvtSdkIPjIXgAwBwUmbhBqUwJno0AMCJ/XYVVHxgMAQfAIAT2w1K2cMHRkOPBgA4KatY1dWEXZthMAQfAIATKj4wKno0AMBJKXN8YFAEHwCAE9s+PqzqgtHQowEATmw7N1PxgdEQfAAATsqY4wODokcDAJzY9vFhVReMhuADAHBSarFVfAg+MBaCDwDAib3iw1AXDIYeDQBwYp/jw1AXDIbgAwBwUrmqi8sEjIUeDQBwYt/Hhzk+MBiCDwDAiX3nZjYwhMHQowEATspY1QWDIvgAAJyYLUxuhjERfAAATti5GUZFjwYAODHbVnVR8YHBEHwAAE5sc3z8CT4wGIIPAMCJbY6Pv4ngA2Mh+AAAnNgrPqzqgsEQfAAATljVBaMi+AAAnNiHutjAEAZDjwYAOCmj4gODIvgAAJzYlrOzqgtGQ/ABADih4gOjIvgAAJyY2ccHBkXwAQA4YQNDGBXBBwDgxGxmqAvGRPABADgxW1nODmOiRwMAnLCBIYyK4AMAcMIcHxgVwQcA4MS2j08A9+qCwRB8AABOysxUfGBMBB8AgBPm+MCoCD4AACe2OT5+JoIPjIXgAwBwYq/4MMcHBkPwAQA4qbxlBZcJGAs9GgDghDk+MCqCDwDASVnFcnZWdcFoCD4AACdUfGBUBB8AgBN2boZREXwAAE4qKz5cJmAs9GgAgBP7Pj5cJWAwdGkAgBMqPjAqejQAwImZOT4wKIIPAMAJq7pgVAQfAIAT9vGBURF8AABOuFcXjIrgAwBwwj4+MCqCDwDAgcVilbU897CqC4ZDjwYAOLBVeyTJ30TFB8ZC8AEAODBXDT7M8YHBEHwAAA7M1srgw3J2GA3BBwDgwGyuUvEh+MBgCD4AAAe2PXwk5vjAeAg+AAAHtjk+fibJj4oPDIbgAwBwUMYNSmFg9GoAgANuUAojI/gAABywazOMjOADAHBAxQdGRvABADiw36CU4AMDIvgAABzYlrNT8YEREXwALysps6jnsyv19Ec7vd0UQBIVHxgbwQfXBWuVLfIbmy++P63iMov+ue2Y8i+Vers5QOXkZu7TBQMi+MDrPt15QgOnr9V/DpzxdlOuueIysw7kFlR7TtMm/vavX/j3t55uElAjM/v4wMDo1fC6GSu+07miEo1792vDVX5eWbVPSa9nKOW9Le5PqvJD9dIdx2WxGOvPAL6nzFy5czNgNAQfeF2vtmH2r6/niseH245q5GsbdPB0Ya2f8+6X2ZKkjftP2wON1WpVwWX3Q1rLso5fXUM94EBuoTbuP+2x179UYta8L7OVc+Gyx94DtUfFB0ZGr0adfXXgjD7efuyavNbu4/n64vvKIa73Nx2+bqs+T330jQ6eLtLI1zbW+jlDu7Wyfz3/Pz9Ikn7x9ib1e+Fzvb3xoCRp1trvHZ7zx0/3eL3qs/ybk4qdslzp3+VIkpJe36iU97Yobf2BGp976HSh1u/NtT8+U1isBV9lVxv2Ptp+TH/+bI8Gz0i/bv/+GxOzlX18YFwEH9TZfe9+rcn/3KnURduv+rV+8/dM+0RKm85TV1z1617pcqn5ql+jRUgT+9fr9+U6ff9sYbHD4ycW79BXB87aH//5sz26XGrW1h/OS5Jmrtyrgsul2pJ9zuF5+ZdK9ev3t151e6+G7e/21+9vcwhhr6zeV2MwSXxto8Yv2KqZK76TJM3NOKQXPt2jH//verfPPZF3yf714q1Hr7b5uErmiuXsAUxuhgERfFAr24+c1z+3HnW4CC7/5qS9JF5fR89dcnm8uupAXU3/bI96PbdK97/7tcvv17a60ic63P71+PlbVWa22B9/vP2YBk5fq1tfWW8/tizrhNNrTFqSpZtiIuyP+73wuVo2C5Qk3RAdrh/3aC1JWr/vtA7VYUjtWthx5LxunpGuf249qvDgAPvxhZsPO5w3oyLQVFVUXObw5yFJb2ccUkmZRbuO50uSzl8sdRrGW/3tKQ16ca1O5VcOcU39eJf25zhOCH/yw52avCSrXp+rsTpbWKzX1+zX0XMXXX6/un+7tjk+VHxgRAQf1MrkJVl6+l/f6M/L9zgc7/qM++pMbYYsOrRoav/6/ptj7F/3e+HzerTSNds8my8PnFH2mSKH7y3cfFhdnlmh/9tQ8xDOlbr9YaX96/SKoZ0fzl7U14fOunuKVu4+pe1H8hyOnSsqkST97vYeevWe/vbjiXUYUrsWPsk6oVMXLuvpf32j1mFB9uPTrph3NfeLbB07X3kxzbtYosEz0vXTN790Wo7f49mV6t4mtPK1PvlWuVXm8fzp0z06XVCspTscA9Htf8mwf30q/7I+yjymj3ccV+yU5Vf3IRuRxVuP6q/p32vY/65XYXGZw/dW7T6prs+s0B+W7nJ6Xubhc3p0YaYk9vGBMRF8UCs/nC2/0M3/6gen7/33wm1Ox4qKy9R56grFTlnusnqzclf5HJJj5ysrPk8k9dA7/zXQ/vhaXeQGdmph/3rEqxtUXFY57DW7Ys7K/67ap13H8qt9nc2HyoekekZVTsZ+M718fk7VAHfvO5u1+3i+QoMqqybP/bRPrdraJixYI3q2tj8e+tK6Wj2vvkrNFhVVXBQjQwPtxw+eLnL3FEnSIx9k2p935NxFFRaXae+pAj38/laHzy2Vz9uyuXC5TAkz0u3VhsAA9/8F/Whmur2NVdVmnlFJmaVB5kldT/OR1u/LVZ/nV9krhacLKode+05b7dDWjzLLg+bfvz5in2tm8/PZm+xfm0wEHxgPwccHbD50Vre8vE57T11o0Pe1Wq06dv6irFaropsHuz1v9bc5+uSKIYwDuZXDNP1e+Nzp4vXWFRevjx4bosjQIN1+Q1uH41cTftLWH1DslOVOF+Kez65SSVl5e/q2b24/PvqtLx2GXKoqLjPbL9b/PbyL/fhra/a7HEL76Ztf2n/KfuOXA/TQ0Fil/264wzmPDe/q8Nh2XZo/PkGRoeUVl+N5l7Rk65EaP6vNJ1nHNfb/vtI3x/Jqdf69b2/SkJnp2pJ9zuVFLq5jhMPjRQ8PVmRooL47eUE/ffNLp3Cx9YfzTtUFmztuiLJ/fcvL62SxWB0Co+Q4GfxE/mVNcjG89crq6kPqxZIy9Xh2pbo8s0I/nKk+wLmz61i+Br24Vv/c5n6+0Usr99rDfUNyF7beTP9eF0vMSnxto07kXVKLkECH71edO1f1z33myr1644oJ9jZXzj8DjIDg4wNmbzioY+cv6SezvtAHm35osPf9MPOYbnl5vTpPXaFSFz89r538Y/vXExdnqcezK53Osen+h5UOwyM9qyxhv9L+6ckOj5/6sH63cnhl9T5JcrkMu8ezK1VSZlFQlc0DJenmmelOIU6SPShJ0k0xLbTrhdvtj788cEZvbzwkSQpyUcGI6xAhk8mkrq1D9bdfD7Yf79e+ubpENrM/rjrJe8szI+1f//5fu2p9cZ294aC2H8nT/3vrK728am+N528/kqcLl8v0i7c3ac6G8p/8E2Jb2r8/ZkC0Q0Dr1S5cc+4vr8plnylSl2dWqNRc2e6qFZzPHr/F4b1iI5tpSnIvSdLJ/Mvq8swKhz9XSfpR10gdmnGn/fHSHcd195z/OLV79Ftfavk3J11+puNVqoi3vrpBX35f940xF205rNMFxXr6o2+00M2/uTlVKiWxU5Y3SPXnnjn/UeepK/TuF4ecvpdbpcLzo5fW6fM9p5zOiZ2yXGaLVVdm3L+s3W8PP8O6R9qPV+0LgFEQfBpQSZlF357Ir/N/kFUDw/OffNtgP2F+d7KywmQrm//5rhvsxzpHhirz2ST745Iyi2KnLHea5Gpzy8vrtXhLefXiyrkD3asMHwUG+OnrKhf+DzOPKXbKcqeLZF29NLafw+Mez67UpzvLJyD/qGtlpWHi4qxq/4zbRQQrLLiJ1j95q9P37hvcSb+7rYfDMUuVv+9bukdq5cRhmpLcSyN7t9G6J2/Vnf3aqne7cCX1rqyI+PmZtPfPP3F4ndgpy3XmipVjVwoJrAxyszccrPbv40oFFZWaTq1C9LdfD9aInq01ql87TUnupbkPxGteSrxaNgtUfGxL/e/PK+ci/Xx2eTBpH9FUb/3qRvvxiJAm2vXC7WofUV5diGkZoseGd3Wo/HxdUVH4y71x2vn87Uod0U1+fiaHv/+cC+Wf2c8kvf6LOPvx1EXbXQaOKy/qDy3YqsVbjtTp393l0so/s+cq/s1VHSKVpF/Ed3B43HnqCo9XSGwrAqcv/86+as6ma+tQh8ffnij/93vf4BjH855ZoYyKHwZuqDJh/y9r9zv0+z/ddYOW/PfN167xwHWC4NOAbvrzGo3665fqPHWFVu5y/dOqK9ERTZ2OxU5Z7vHqT9WLqE1cxwhlz7xT2TPvlL+fSa1Cg7T8t44/2Xf7w0r9q2Kfn/YRTRVWZahpysfl1YtNFROAn/5JT2XPvFPNmzZxeI2o8GDtm+544e/x7Er7T6y1UXX5uSSFN22iH14apd8mdnM697Y+UZo+pq/Dsdgpy6sdZuoc2UxZz9/mcOw/B8/o8ZHdteiRysrOlX9/vduF67HhXRVcUW36v/sGauXEYU4raIKb+Gvn87c7HIufvlaLvj7idnl+syuG9aTyvw9382KCmzj/F/Bh5jHd0j1S88cnqE14+RDnbX2iNLJKMPvFoI4OQ35S+bDc7Te0Vdq4mzRzbD91aBGisOAm2vDUrVr6Pz/SLweVX4Df/q94jezVxuG554pK1bzK31dUeLB2//EOh3MsVmnsTR2chgw7T12he9/eJHdKzBZN+XiXOk9doUVf127Y0FXf7/nsKv16wVZ7gPJzMTT4i7c3KXbKch0+W78htrp4O+OQYqcsdwpAgzs7VmksVqt+eGmUffWgVDmHa3iP1pr/4CCH8237aoUHN2GODwyJ4NOAqs59+M3fy39ajZ2yXN/nVH8vJxvbMIGNrfoTO2W59p2q3WvUR9XSd5fWoTKZTA7/Id4Q3VzZM+90eM4HFRNaL5eateuPdziFI9sy9rOFJW7/cw0K8Ff2zDud5pl0faZ8XsVb61zPS7jSm7+6Uf91cyfd1qf8wj359p768xUhJyjAX/ff3EnbqlSwpMphpoHT17p87YiQQIehmZkVVaUfdY1U9sw7deDFZHvAqY/mIU2UPfNOhzkZzyzdpV7PrdLgGWv1/Ce7XT6vamVEKh/2s/WVI2edlzd/8fQI+9epI7o6fd+Vqcm97cNeVY3q306/SqisMjTx99ONMS0cgt28Bwc5VI16uxj6DA0KcOpXUnll48rh0K+zz9k/33cny/8tRIQ00cEZdzpU4J5Zust+Xm3m/0xK6qGf31RZ2Unfm2uf12PbwfvJ23vol4M6Ojxv+CsbFDtludbuyXE756k+bEG1aoixBSDbkO498R31nymJ9u/f0q18svz2525Tx5aOIdzPZNKIXm20c5pjwJacK2eAUZis19OyhOvAhQsX1Lx5c+Xn5ys8PLzmJ9TB/e9+rS9ruBHn1OReio5oqpu7tLIvKf6veV/ri+/P6C/3xulnN3bQ0h3HNGlJ9fNeOkc205u/ulFdWjdTSKBzFaA2Xlm9V2nrD2r80FhNG31DzU9Q+Wqtx/+xwz5fpWvrZkr/3a2SyidlXrk5YadWIdr41IgrX8bJhn25enB+zZv6JcS21G9Hdld8bAsNmZmu8xdLtXbyj9WtjfOF1WKx6k+f7dHxvEt6+ef9HS4mjy3M1KpvnedISNK+6T9RUED9w0x95V8q1UMLtupE3iWddDMJ28bWV/adKtAdszKqPVeSvvz9CHVoEVKvdhWXmTXtk2/18LDOLv+cq2O1WnW51KKmLiosVV0uNSvAz6QAf8ef1dLWH7DP5XLlh5dGSZL+seWIpn7svHS7qnbNg/VuSrx6tw3Xc5/s1t+/PqJJST00Mam7Dp4udLtj95O399CExO4qLjOr57OrnL4f4Gdy2qQz/XfD1SWyWZ0rKr2eW6nLpRZ98fQInci7pHvf2ex0zrOjeuvhYeXVOKvV6vQepWaL/rb5sNbtzdUzd/ZW73aV/88dPXdR/7fhgP6VeVzrn7rVPkwJ+ILaXr8JPldoiODzxi8HaH9OgdLWH6z5SVXYLmY2Czcf1nPLXP/Efy3VJfjYZB4+pz999p3+59auuuOKlVpWq1Xj5n6tTYfOasdzt6lFs0A3r+LsbGGxpi//zmnfl+p8PunH6hFVtwuyzcJNP+i5Txz3sTk04075eXF/kzKzRct3ndTExVluz7myr6z+9pT+u2JvFlfWP3mrOleZaO1rVuw6qf/5u/NO4rbgY/P+f35w2peoOo8ndtPvbu9pf3zs/EXd8vJ6h3Pe+OUA3TWgvf2x2WLV/e9+rV3H89WyWaCOuNlA8GpUDaozV35nn1wvSf+eMFT9O0Rc8/cErncEn3pqqOBT9T/KDzb9oOc/qfk/448eG6J4N6ssavrJ92rUJ/g0hN3H8/XTN7+s8bytf0hy2JCvPiwWqzZnn1Xr0CCHidjedq6oRDf9eY3DsQ4tmuofj9ysji1dV3BchYTvX0xWE39jjHxnnynSiFc3KG3cTRrVv53b8z7KPKYna1gx+NQdPZU6wnlOmCTlXLisE3mX1L9DRLU7HB85e1H3zdvsdpfy+sh6/jZFhDj/wOCqwgM0FgSfevJk8On/wmpduFzmFHxcyT5TpM92ntBra/ZLkj58bIgG1XFpaVFxmXYcydNf07/XmcJiHarnniYbn7pVnVr5TjWgpMyi3SfyNe+LbP3m1q4Oe/UA1blUYtY/tx3V0h3HFd+phabe2dsjt23Iu1iiv20+rI93HNehKzaLjAhpohs7Rmj9PudtGKTyVY9XznECQPCpN08Fn51H83RX2leSpLkPxNsn2gIAgKtX2+u3MWrbPqDqrRkSOrMpGAAA3mDY4JOXl6f4+HgNGDBAffv21dy5c73aHtu+IP3aN3faswYAADSM+q1z9gFhYWHKyMhQSEiIioqK1LdvX40dO1atWrWq+ckeUFKxe24TfyYeAgDgLYat+Pj7+yskpHxVS3FxsaxWq1fvpFxqDz6G/SMHAOC6V+er8MyZMzVo0CCFhYWpTZs2GjNmjPbtu7bLqDMyMjR69GhFR0fLZDJp2bJlLs9LS0tTbGysgoODNXjwYG3ZssXh+3l5eYqLi1OHDh301FNPKTIy0uXrNISCy9du91YAAFA/dQ4+GzduVGpqqjZv3qw1a9aotLRUt99+u4qKXC+V/uqrr1RaWup0fM+ePcrJyXH5nKKiIsXFxSktLc1tO5YsWaLJkydr2rRp2r59u+Li4nTHHXcoNzfXfk5ERIR27typ7OxsLVq0yO37NYRnKzYa7NSqfrvjAgCAq1fn4LNq1So9+OCDuuGGGxQXF6cFCxboyJEjysx03hXWYrEoNTVV48aNk9lceVPFffv2KTExUe+//77L90hOTtb06dP1s5/9zG07Xn/9dT3yyCMaP368+vTpozlz5igkJETvvfee07lRUVGKi4vTF1984fb10tLS1KdPHw0aNMjtOfV18HSh/caaY2rYvwcAAHjOVU84yc/PlyS1bOm8RNvPz08rVqzQjh079MADD8hisejgwYNKTEzUmDFj9PTTT9frPUtKSpSZmamkpMobSvr5+SkpKUmbNpXfpTknJ0cFBQX2NmZkZKhnz54uX0+SUlNTtWfPHm3dWvP9oOqqa+tQfZI6VM/c2Us/6ua94TYAABq7q1rVZbFY9MQTT2jo0KHq27evy3Oio6O1bt06DRs2TOPGjdOmTZuUlJSk2bNn1/t9z5w5I7PZrKgox00Ao6KitHfvXknS4cOH9eijj9onNT/++OPq169fvd/zasV1jHC6yzgAAGhYVxV8UlNTtXv3bn35ZfX3S4qJidHChQs1fPhwdenSRfPmzfP4/WQSEhKUlZXl0fcAAAC+pd5DXRMmTNBnn32m9evXq0OHDtWem5OTo0cffVSjR4/WxYsXNWnSpPq+rSQpMjJS/v7+TpOVc3Jy1LZtWzfPAgAAjV2dg4/VatWECRO0dOlSrVu3Tp07d672/DNnzmjkyJHq3bu3Pv74Y6Wnp2vJkiV68skn693owMBADRw4UOnp6fZjFotF6enpGjJkSL1fFwAAGFudh7pSU1O1aNEiffLJJwoLC9OpU6ckSc2bN1fTpk0dzrVYLEpOTlanTp20ZMkSBQQEqE+fPlqzZo0SExPVvn17l9WfwsJCHThwwP44OztbWVlZatmypWJiYiRJkydPVkpKiuLj45WQkKBZs2apqKhI48ePr+tHAgAAjUSd787ubm7O/Pnz9eCDDzodX7NmjYYNG6bg4GCH4zt27FDr1q1dDpNt2LBBI0aMcDqekpKiBQsW2B+/9dZbeuWVV3Tq1CkNGDBAf/3rXzV48OC6fBwnnro7OwAA8JzaXr/rHHyMjuADAIDvqe31mxtHAQCARoPgAwAAGg2CDwAAaDQIPgAAoNEg+AAAgEaD4AMAABoNgg8AAGg0ruompUZk29bowoULXm4JAACoLdt1u6btCQk+VygoKJAkdezY0cstAQAAdVVQUKDmzZu7/T47N1/BYrHoxIkTCgsLc3t7jvq4cOGCOnbsqKNHjzaKHaH5vMbW2D6v1Pg+M5/X2Iz4ea1WqwoKChQdHS0/P/czeaj4XMHPz8/l/cOulfDwcMN0strg8xpbY/u8UuP7zHxeYzPa562u0mPD5GYAANBoEHwAAECjQfBpIEFBQZo2bZqCgoK83ZQGwec1tsb2eaXG95n5vMbW2D5vVUxuBgAAjQYVHwAA0GgQfAAAQKNB8AEAAI0GwQcAADQaBJ8GkpaWptjYWAUHB2vw4MHasmWLt5vkERkZGRo9erSio6NlMpm0bNkybzfJo2bOnKlBgwYpLCxMbdq00ZgxY7Rv3z5vN8tjZs+erf79+9s3PRsyZIhWrlzp7WY1mJdeekkmk0lPPPGEt5viES+88IJMJpPDr169enm7WR51/Phx3X///WrVqpWaNm2qfv36adu2bd5ulsfExsY6/R2bTCalpqZ6u2kNhuDTAJYsWaLJkydr2rRp2r59u+Li4nTHHXcoNzfX20275oqKihQXF6e0tDRvN6VBbNy4Uampqdq8ebPWrFmj0tJS3X777SoqKvJ20zyiQ4cOeumll5SZmalt27YpMTFRd911l7799ltvN83jtm7dqrffflv9+/f3dlM86oYbbtDJkyftv7788ktvN8ljzp8/r6FDh6pJkyZauXKl9uzZo9dee00tWrTwdtM8ZuvWrQ5/v2vWrJEk3XPPPV5uWQOywuMSEhKsqamp9sdms9kaHR1tnTlzphdb5XmSrEuXLvV2MxpUbm6uVZJ148aN3m5Kg2nRooX13Xff9XYzPKqgoMDavXt365o1a6zDhw+3Tpw40dtN8ohp06ZZ4+LivN2MBvP73//eesstt3i7GV41ceJEa9euXa0Wi8XbTWkwVHw8rKSkRJmZmUpKSrIf8/PzU1JSkjZt2uTFlsET8vPzJUktW7b0cks8z2w2a/HixSoqKtKQIUO83RyPSk1N1ahRoxz+HRvV999/r+joaHXp0kX33Xefjhw54u0mecy///1vxcfH65577lGbNm104403au7cud5uVoMpKSnR3/72Nz300EPX9Kbc1zuCj4edOXNGZrNZUVFRDsejoqJ06tQpL7UKnmCxWPTEE09o6NCh6tu3r7eb4zG7du1SaGiogoKC9Nhjj2np0qXq06ePt5vlMYsXL9b27ds1c+ZMbzfF4wYPHqwFCxZo1apVmj17trKzszVs2DAVFBR4u2kecejQIc2ePVvdu3fX6tWr9Zvf/Ea//e1v9f7773u7aQ1i2bJlysvL04MPPujtpjQo7s4OXCOpqanavXu3oedESFLPnj2VlZWl/Px8ffTRR0pJSdHGjRsNGX6OHj2qiRMnas2aNQoODvZ2czwuOTnZ/nX//v01ePBgderUSf/85z/161//2ost8wyLxaL4+HjNmDFDknTjjTdq9+7dmjNnjlJSUrzcOs+bN2+ekpOTFR0d7e2mNCgqPh4WGRkpf39/5eTkOBzPyclR27ZtvdQqXGsTJkzQZ599pvXr16tDhw7ebo5HBQYGqlu3bho4cKBmzpypuLg4vfHGG95ulkdkZmYqNzdXN910kwICAhQQEKCNGzfqr3/9qwICAmQ2m73dRI+KiIhQjx49dODAAW83xSPatWvnFNh79+5t6OE9m8OHD2vt2rV6+OGHvd2UBkfw8bDAwEANHDhQ6enp9mMWi0Xp6emGnxfRGFitVk2YMEFLly7VunXr1LlzZ283qcFZLBYVFxd7uxkeMXLkSO3atUtZWVn2X/Hx8brvvvuUlZUlf39/bzfRowoLC3Xw4EG1a9fO203xiKFDhzptP7F//3516tTJSy1qOPPnz1ebNm00atQobzelwTHU1QAmT56slJQUxcfHKyEhQbNmzVJRUZHGjx/v7aZdc4WFhQ4/HWZnZysrK0stW7ZUTEyMF1vmGampqVq0aJE++eQThYWF2edtNW/eXE2bNvVy6669qVOnKjk5WTExMSooKNCiRYu0YcMGrV692ttN84iwsDCn+VrNmjVTq1atDDmP68knn9To0aPVqVMnnThxQtOmTZO/v79+9atfebtpHjFp0iT96Ec/0owZM/SLX/xCW7Zs0TvvvKN33nnH203zKIvFovnz5yslJUUBAY0wBnh7WVlj8eabb1pjYmKsgYGB1oSEBOvmzZu93SSPWL9+vVWS06+UlBRvN80jXH1WSdb58+d7u2ke8dBDD1k7depkDQwMtLZu3do6cuRI6+eff+7tZjUoIy9nv/fee63t2rWzBgYGWtu3b2+99957rQcOHPB2szzq008/tfbt29caFBRk7dWrl/Wdd97xdpM8bvXq1VZJ1n379nm7KV5hslqtVu9ELgAAgIbFHB8AANBoEHwAAECjQfABAACNBsEHAAA0GgQfAADQaBB8AABAo0HwAQAAjQbBBwAAeFxGRoZGjx6t6OhomUwmLVu2rM6vYbVa9eqrr6pHjx4KCgpS+/bt9eKLL9bpNRrhXtUAAKChFRUVKS4uTg899JDGjh1br9eYOHGiPv/8c7366qvq16+fzp07p3PnztXpNdi5GQAANCiTyaSlS5dqzJgx9mPFxcX6wx/+oH/84x/Ky8tT37599fLLL+vWW2+VJH333Xfq37+/du/erZ49e9b7vRnqAgAAXjdhwgRt2rRJixcv1jfffKN77rlHP/nJT/T9999Lkj799FN16dJFn332mTp37qzY2Fg9/PDDda74EHwAAIBXHTlyRPPnz9eHH36oYcOGqWvXrnryySd1yy23aP78+ZKkQ4cO6fDhw/rwww/1wQcfaMGCBcrMzNTdd99dp/dijg8AAPCqXbt2yWw2q0ePHg7Hi4uL1apVK0mSxWJRcXGxPvjgA/t58+bN08CBA7Vv375aD38RfAAAgFcVFhbK399fmZmZ8vf3d/heaGioJKldu3YKCAhwCEe9e/eWVF4xIvgAAACfcOONN8psNis3N1fDhg1zec7QoUNVVlamgwcPqmvXrpKk/fv3S5I6depU6/diVRcAAPC4wsJCHThwQFJ50Hn99dc1YsQItWzZUjExMbr//vv11Vdf6bXXXtONN96o06dPKz09Xf3799eoUaNksVg0aNAghYaGatasWbJYLEpNTVV4eLg+//zzWreD4AMAADxuw4YNGjFihNPxlJQULViwQKWlpZo+fbo++OADHT9+XJGRkbr55pv1xz/+Uf369ZMknThxQo8//rg+//xzNWvWTMnJyXrttdfUsmXLWreD4AMAABoNlrMDAIBGg+ADAAAaDYIPAABoNAg+AACg0SD4AACARoPgAwAAGg2CDwAAaDQIPgAAoNEg+AAAgEaD4AMAABoNgg8AAGg0CD4AAKDR+P8cUnyKMvhSngAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Increase the mean in the long term and see how long it takes the decaying avg to fully adjust\n", + "seq2 = np.concatenate((seq, np.repeat(4000, N // 2)))\n", + "N2 = len(seq2)\n", + "y = []\n", + "avg = 2000\n", + "for i in range(N2):\n", + " avg = avg * D + seq2[i] * (1 - D) \n", + " y.append(avg)\n", + "\n", + "x = np.arange(0, N2)\n", + "plt.figure()\n", + "plt.plot(x, y)\n", + "plt.yscale('log')\n", + "plt.show()" + ] + }, { "cell_type": "code", "execution_count": null, @@ -474,9 +623,9 @@ ], "metadata": { "kernelspec": { - "display_name": "Python [conda env:gr]", + "display_name": "Python 3 (ipykernel)", "language": "python", - "name": "conda-env-gr-py" + "name": "python3" }, "language_info": { "codemirror_mode": { @@ -488,9 +637,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.5" + "version": "3.10.12" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/mining/src/mempool/model/frontier.rs b/mining/src/mempool/model/frontier.rs index 8d21953271..70ac215bad 100644 --- a/mining/src/mempool/model/frontier.rs +++ b/mining/src/mempool/model/frontier.rs @@ -25,20 +25,30 @@ const COLLISION_FACTOR: u64 = 4; /// hard limit in order to allow the SequenceSelector to compensate for consensus rejections. const MASS_LIMIT_FACTOR: f64 = 1.2; -/// A rough estimation for the average transaction mass. The usage is a non-important edge case -/// hence we just throw this here (as oppose to performing an accurate estimation) -const TYPICAL_TX_MASS: f64 = 2000.0; +/// Initial estimation of the average transaction mass. +const INITIAL_AVG_MASS: f64 = 2036.0; + +/// Decay factor of average mass weighting. +const AVG_MASS_DECAY_FACTOR: f64 = 0.99999; /// Management of the transaction pool frontier, that is, the set of transactions in /// the transaction pool which have no mempool ancestors and are essentially ready /// to enter the next block template. -#[derive(Default)] pub struct Frontier { /// Frontier transactions sorted by feerate order and searchable for weight sampling search_tree: SearchTree, /// Total masses: Σ_{tx in frontier} tx.mass total_mass: u64, + + /// Tracks the average transaction mass throughout the mempool's lifespan using a decayed weighting mechanism + average_transaction_mass: f64, +} + +impl Default for Frontier { + fn default() -> Self { + Self { search_tree: Default::default(), total_mass: Default::default(), average_transaction_mass: INITIAL_AVG_MASS } + } } impl Frontier { @@ -62,6 +72,11 @@ impl Frontier { let mass = key.mass; if self.search_tree.insert(key) { self.total_mass += mass; + // A decaying average formula. Denote ɛ = 1 - AVG_MASS_DECAY_FACTOR. A transaction inserted N slots ago has + // ɛ * (1 - ɛ)^N weight within the updated average. This gives some weight to the full mempool history while + // giving higher importance to more recent samples. + self.average_transaction_mass = + self.average_transaction_mass * AVG_MASS_DECAY_FACTOR + mass as f64 * (1.0 - AVG_MASS_DECAY_FACTOR); true } else { false @@ -210,10 +225,7 @@ impl Frontier { /// Builds a feerate estimator based on internal state of the ready transactions frontier pub fn build_feerate_estimator(&self, args: FeerateEstimatorArgs) -> FeerateEstimator { - let average_transaction_mass = match self.len() { - 0 => TYPICAL_TX_MASS, - n => self.total_mass() as f64 / n as f64, - }; + let average_transaction_mass = self.average_transaction_mass; let bps = args.network_blocks_per_second as f64; let mut mass_per_block = args.maximum_mass_per_block as f64; let mut inclusion_interval = average_transaction_mass / (mass_per_block * bps); @@ -368,8 +380,12 @@ mod tests { assert_eq!(frontier.total_mass(), frontier.search_tree.ascending_iter().map(|k| k.mass).sum::()); } + /// Epsilon used for various test comparisons + const EPS: f64 = 0.000001; + #[test] fn test_feerate_estimator() { + const MIN_FEERATE: f64 = 1.0; let mut rng = thread_rng(); let cap = 2000; let mut map = HashMap::with_capacity(cap); @@ -394,13 +410,13 @@ mod tests { let args = FeerateEstimatorArgs { network_blocks_per_second: 1, maximum_mass_per_block: 500_000 }; // We are testing that the build function actually returns and is not looping indefinitely let estimator = frontier.build_feerate_estimator(args); - let estimations = estimator.calc_estimations(1.0); + let estimations = estimator.calc_estimations(MIN_FEERATE); let buckets = estimations.ordered_buckets(); // Test for the absence of NaN, infinite or zero values in buckets for b in buckets.iter() { assert!( - b.feerate.is_normal() && b.feerate >= 1.0, + b.feerate.is_normal() && b.feerate >= MIN_FEERATE - EPS, "bucket feerate must be a finite number greater or equal to the minimum standard feerate" ); assert!( @@ -441,7 +457,7 @@ mod tests { // Test for the absence of NaN, infinite or zero values in buckets for b in buckets.iter() { assert!( - b.feerate.is_normal() && b.feerate >= MIN_FEERATE, + b.feerate.is_normal() && b.feerate >= MIN_FEERATE - EPS, "bucket feerate must be a finite number greater or equal to the minimum standard feerate" ); assert!( @@ -492,7 +508,7 @@ mod tests { // Test for the absence of NaN, infinite or zero values in buckets for b in buckets.iter() { assert!( - b.feerate.is_normal() && b.feerate >= MIN_FEERATE, + b.feerate.is_normal() && b.feerate >= MIN_FEERATE - EPS, "bucket feerate must be a finite number greater or equal to the minimum standard feerate" ); assert!( @@ -506,6 +522,7 @@ mod tests { #[test] fn test_feerate_estimator_with_less_than_block_capacity() { + const MIN_FEERATE: f64 = 1.0; let mut map = HashMap::new(); for i in 0..304 { let mass: u64 = 1650; @@ -524,13 +541,16 @@ mod tests { let args = FeerateEstimatorArgs { network_blocks_per_second: 1, maximum_mass_per_block: 500_000 }; // We are testing that the build function actually returns and is not looping indefinitely let estimator = frontier.build_feerate_estimator(args); - let estimations = estimator.calc_estimations(1.0); + let estimations = estimator.calc_estimations(MIN_FEERATE); let buckets = estimations.ordered_buckets(); // Test for the absence of NaN, infinite or zero values in buckets for b in buckets.iter() { // Expect min feerate bcs blocks are not full - assert!(b.feerate == 1.0, "bucket feerate is expected to be equal to the minimum standard feerate"); + assert!( + (b.feerate - MIN_FEERATE).abs() <= EPS, + "bucket feerate is expected to be equal to the minimum standard feerate" + ); assert!( b.estimated_seconds.is_normal() && b.estimated_seconds > 0.0 && b.estimated_seconds <= 1.0, "bucket estimated seconds must be a finite number greater than zero & less than 1.0"