diff --git a/Cargo.lock b/Cargo.lock index 13e6b716e662..472f0c2f5679 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11478,6 +11478,7 @@ name = "pallet-sassafras" version = "0.3.5-dev" dependencies = [ "array-bytes", + "env_logger 0.11.3", "frame-benchmarking", "frame-support", "frame-system", @@ -20061,6 +20062,7 @@ dependencies = [ "sp-application-crypto", "sp-consensus-slots", "sp-core", + "sp-inherents", "sp-runtime", ] diff --git a/substrate/frame/sassafras/Cargo.toml b/substrate/frame/sassafras/Cargo.toml index 0eefca57849a..7861ad42fdd9 100644 --- a/substrate/frame/sassafras/Cargo.toml +++ b/substrate/frame/sassafras/Cargo.toml @@ -23,13 +23,15 @@ frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } log = { workspace = true } + sp-consensus-sassafras = { features = ["serde"], workspace = true } +sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } [dev-dependencies] array-bytes = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } +env_logger = { workspace = true } sp-crypto-hashing = { workspace = true, default-features = true } [features] @@ -42,6 +44,7 @@ std = [ "log/std", "scale-info/std", "sp-consensus-sassafras/std", + "sp-core/std", "sp-io/std", "sp-runtime/std", ] diff --git a/substrate/frame/sassafras/src/benchmarking.rs b/substrate/frame/sassafras/src/benchmarking.rs index 2b2467c6f84d..e247e9612971 100644 --- a/substrate/frame/sassafras/src/benchmarking.rs +++ b/substrate/frame/sassafras/src/benchmarking.rs @@ -18,7 +18,7 @@ //! Benchmarks for the Sassafras pallet. use crate::*; -use sp_consensus_sassafras::{vrf::VrfSignature, EphemeralPublic, EpochConfiguration}; +use sp_consensus_sassafras::vrf::VrfSignature; use frame_benchmarking::v2::*; use frame_support::traits::Hooks; @@ -26,12 +26,13 @@ use frame_system::RawOrigin; const LOG_TARGET: &str = "sassafras::benchmark"; -const TICKETS_DATA: &[u8] = include_bytes!("data/25_tickets_100_auths.bin"); +// Pre-constructed tickets generated via the `generate_test_teckets` function +const TICKETS_DATA: &[u8] = include_bytes!("data/tickets.bin"); -fn make_dummy_vrf_signature() -> VrfSignature { +fn dummy_vrf_signature() -> VrfSignature { // This leverages our knowledge about serialized vrf signature structure. // Mostly to avoid to import all the bandersnatch primitive just for this test. - let buf = [ + const RAW_VRF_SIGNATURE: [u8; 99] = [ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -40,16 +41,13 @@ fn make_dummy_vrf_signature() -> VrfSignature { 0x18, 0xca, 0x07, 0x13, 0xc7, 0x4b, 0xa3, 0x9a, 0x97, 0xd3, 0x76, 0x8f, 0x0c, 0xbf, 0x2e, 0xd4, 0xf9, 0x3a, 0xae, 0xc1, 0x96, 0x2a, 0x64, 0x80, ]; - VrfSignature::decode(&mut &buf[..]).unwrap() + VrfSignature::decode(&mut &RAW_VRF_SIGNATURE[..]).unwrap() } #[benchmarks] mod benchmarks { use super::*; - // For first block (#1) we do some extra operation. - // But is a one shot operation, so we don't account for it here. - // We use 0, as it will be the path used by all the blocks with n != 1 #[benchmark] fn on_initialize() { let block_num = BlockNumberFor::::from(0u32); @@ -57,14 +55,10 @@ mod benchmarks { let slot_claim = SlotClaim { authority_idx: 0, slot: Default::default(), - vrf_signature: make_dummy_vrf_signature(), - ticket_claim: None, + vrf_signature: dummy_vrf_signature(), }; frame_system::Pallet::::deposit_log((&slot_claim).into()); - // We currently don't account for the potential weight added by the `on_finalize` - // incremental sorting of the tickets. - #[block] { // According to `Hooks` trait docs, `on_finalize` `Weight` should be bundled @@ -77,78 +71,44 @@ mod benchmarks { // Weight for the default internal epoch change trigger. // // Parameters: - // - `x`: number of authorities (1:100). - // - `y`: epoch length in slots (1000:5000) + // - `x`: number of authorities [1:100]. + // - `y`: number of tickets [100:1000]; // // This accounts for the worst case which includes: - // - load the full ring context. - // - recompute the ring verifier. - // - sorting the epoch tickets in one shot - // (here we account for the very unlucky scenario where we haven't done any sort work yet) - // - pending epoch change config. - // - // For this bench we assume a redundancy factor of 2 (suggested value to be used in prod). + // - recomputing the ring verifier key from a new authorites set. + // - picking all the tickets from the accumulator in one shot. #[benchmark] - fn enact_epoch_change(x: Linear<1, 100>, y: Linear<1000, 5000>) { + fn enact_epoch_change(x: Linear<1, 100>, y: Linear<100, 1000>) { let authorities_count = x as usize; - let epoch_length = y as u32; - let redundancy_factor = 2; + let accumulated_tickets = y as u32; - let unsorted_tickets_count = epoch_length * redundancy_factor; + let config = Pallet::::protocol_config(); - let mut meta = TicketsMetadata { unsorted_tickets_count, tickets_count: [0, 0] }; - let config = EpochConfiguration { redundancy_factor, attempts_number: 32 }; + // Makes the epoch change legit + let post_init_cache = EphemeralData { + prev_slot: Slot::from(config.epoch_duration as u64 - 1), + block_randomness: Randomness::default(), + }; + TemporaryData::::put(post_init_cache); + CurrentSlot::::set(Slot::from(config.epoch_duration as u64)); - // Triggers ring verifier computation for `x` authorities - let mut raw_data = TICKETS_DATA; - let (authorities, _): (Vec, Vec) = - Decode::decode(&mut raw_data).expect("Failed to decode tickets buffer"); - let next_authorities: Vec<_> = authorities[..authorities_count].to_vec(); + // Force ring verifier key re-computation + let next_authorities: Vec<_> = + Authorities::::get().into_iter().cycle().take(authorities_count).collect(); let next_authorities = WeakBoundedVec::force_from(next_authorities, None); NextAuthorities::::set(next_authorities); - // Triggers JIT sorting tickets - (0..meta.unsorted_tickets_count) - .collect::>() - .chunks(SEGMENT_MAX_SIZE as usize) - .enumerate() - .for_each(|(segment_id, chunk)| { - let segment = chunk - .iter() - .map(|i| { - let id_bytes = crate::hashing::blake2_128(&i.to_le_bytes()); - TicketId::from_le_bytes(id_bytes) - }) - .collect::>(); - UnsortedSegments::::insert( - segment_id as u32, - BoundedVec::truncate_from(segment), - ); - }); - - // Triggers some code related to config change (dummy values) - NextEpochConfig::::set(Some(config)); - PendingEpochConfigChange::::set(Some(config)); - - // Triggers the cleanup of the "just elapsed" epoch tickets (i.e. the current one) - let epoch_tag = EpochIndex::::get() & 1; - meta.tickets_count[epoch_tag as usize] = epoch_length; - (0..epoch_length).for_each(|i| { - let id_bytes = crate::hashing::blake2_128(&i.to_le_bytes()); - let id = TicketId::from_le_bytes(id_bytes); - TicketsIds::::insert((epoch_tag as u8, i), id); - let body = TicketBody { - attempt_idx: i, - erased_public: EphemeralPublic::from([i as u8; 32]), - revealed_public: EphemeralPublic::from([i as u8; 32]), - }; - TicketsData::::set(id, Some(body)); + // Add tickets to the accumulator + (0..accumulated_tickets).for_each(|i| { + let mut id = TicketId([0xff; 32]); + id.0[..4].copy_from_slice(&i.to_be_bytes()[..]); + let body = TicketBody { id, attempt: 0, extra: Default::default() }; + TicketsAccumulator::::insert(TicketKey::from(id), &body); }); - TicketsMeta::::set(meta); - #[block] { + // Also account for the call typically done in case of epoch change Pallet::::should_end_epoch(BlockNumberFor::::from(3u32)); let next_authorities = Pallet::::next_authorities(); // Using a different set of authorities triggers the recomputation of ring verifier. @@ -157,55 +117,37 @@ mod benchmarks { } #[benchmark] - fn submit_tickets(x: Linear<1, 25>) { + fn submit_tickets(x: Linear<1, 16>) { let tickets_count = x as usize; let mut raw_data = TICKETS_DATA; - let (authorities, tickets): (Vec, Vec) = - Decode::decode(&mut raw_data).expect("Failed to decode tickets buffer"); - - log::debug!(target: LOG_TARGET, "PreBuiltTickets: {} tickets, {} authorities", tickets.len(), authorities.len()); - - // Set `NextRandomness` to the same value used for pre-built tickets - // (see `make_tickets_data` test). - NextRandomness::::set([0; 32]); - + let (randomness, authorities, tickets): ( + Randomness, + Vec, + Vec, + ) = Decode::decode(&mut raw_data).expect("Failed to decode tickets buffer"); + assert!(tickets.len() >= tickets_count); + + // Use the same values used for the pre-built tickets Pallet::::update_ring_verifier(&authorities); - - // Set next epoch config to accept all the tickets - let next_config = EpochConfiguration { attempts_number: 1, redundancy_factor: u32::MAX }; - NextEpochConfig::::set(Some(next_config)); - - // Use the authorities in the pre-build tickets - let authorities = WeakBoundedVec::force_from(authorities, None); - NextAuthorities::::set(authorities); + NextAuthorities::::set(WeakBoundedVec::force_from(authorities, None)); + let mut randomness_buf = RandomnessBuf::::get(); + randomness_buf[2] = randomness; + RandomnessBuf::::set(randomness_buf); let tickets = tickets[..tickets_count].to_vec(); let tickets = BoundedVec::truncate_from(tickets); - log::debug!(target: LOG_TARGET, "Submitting {} tickets", tickets_count); - #[extrinsic_call] submit_tickets(RawOrigin::None, tickets); } - #[benchmark] - fn plan_config_change() { - let config = EpochConfiguration { redundancy_factor: 1, attempts_number: 10 }; - - #[extrinsic_call] - plan_config_change(RawOrigin::Root, config); - } - // Construction of ring verifier #[benchmark] fn update_ring_verifier(x: Linear<1, 100>) { let authorities_count = x as usize; - - let mut raw_data = TICKETS_DATA; - let (authorities, _): (Vec, Vec) = - Decode::decode(&mut raw_data).expect("Failed to decode tickets buffer"); - let authorities: Vec<_> = authorities[..authorities_count].to_vec(); + let authorities: Vec<_> = + Authorities::::get().into_iter().cycle().take(authorities_count).collect(); #[block] { @@ -221,52 +163,7 @@ mod benchmarks { fn load_ring_context() { #[block] { - let _ring_ctx = RingContext::::get().unwrap(); - } - } - - // Tickets segments sorting function benchmark. - #[benchmark] - fn sort_segments(x: Linear<1, 100>) { - let segments_count = x as u32; - let tickets_count = segments_count * SEGMENT_MAX_SIZE; - - // Construct a bunch of dummy tickets - let tickets: Vec<_> = (0..tickets_count) - .map(|i| { - let body = TicketBody { - attempt_idx: i, - erased_public: EphemeralPublic::from([i as u8; 32]), - revealed_public: EphemeralPublic::from([i as u8; 32]), - }; - let id_bytes = crate::hashing::blake2_128(&i.to_le_bytes()); - let id = TicketId::from_le_bytes(id_bytes); - (id, body) - }) - .collect(); - - for (chunk_id, chunk) in tickets.chunks(SEGMENT_MAX_SIZE as usize).enumerate() { - let segment: Vec = chunk - .iter() - .map(|(id, body)| { - TicketsData::::set(id, Some(body.clone())); - *id - }) - .collect(); - let segment = BoundedVec::truncate_from(segment); - UnsortedSegments::::insert(chunk_id as u32, segment); - } - - // Update metadata - let mut meta = TicketsMeta::::get(); - meta.unsorted_tickets_count = tickets_count; - TicketsMeta::::set(meta); - - log::debug!(target: LOG_TARGET, "Before sort: {:?}", meta); - #[block] - { - Pallet::::sort_segments(u32::MAX, 0, &mut meta); + let _ = RingContext::::get().unwrap(); } - log::debug!(target: LOG_TARGET, "After sort: {:?}", meta); } } diff --git a/substrate/frame/sassafras/src/data/25_tickets_100_auths.bin b/substrate/frame/sassafras/src/data/25_tickets_100_auths.bin deleted file mode 100644 index 6e81f216455a..000000000000 Binary files a/substrate/frame/sassafras/src/data/25_tickets_100_auths.bin and /dev/null differ diff --git a/substrate/frame/sassafras/src/data/benchmark-results.md b/substrate/frame/sassafras/src/data/benchmark-results.md deleted file mode 100644 index 8682f96cbe5a..000000000000 --- a/substrate/frame/sassafras/src/data/benchmark-results.md +++ /dev/null @@ -1,99 +0,0 @@ -# Benchmarks High Level Results - -- **Ring size**: the actual number of validators for an epoch -- **Domain size**: a value which bounds the max size of the ring (max_ring_size = domain_size - 256) - -## Verify Submitted Tickets (extrinsic) - -`x` = Number of tickets - -### Domain=1024, Uncompressed (~ 13 ms + 11·x ms) - - Time ~= 13400 - + x 11390 - µs - -### Domain=1024, Compressed (~ 13 ms + 11·x ms) - - Time ~= 13120 - + x 11370 - µs - -### Domain=2048, Uncompressed (~ 26 ms + 11·x ms) - - Time ~= 26210 - + x 11440 - µs - -### Domain=2048, Compressed (~ 26 ms + 11·x ms) - - Time ~= 26250 - + x 11460 - µs - -### Conclusions - -- Verification doesn't depend on ring size as verification key is already constructed. -- The call is fast as far as the max number of tickets which can be submitted in one shot - is appropriately bounded. -- Currently, the bound is set equal epoch length, which iirc for Polkadot is 3600. - In this case if all the tickets are submitted in one shot timing is expected to be - ~39 seconds, which is not acceptable. TODO: find a sensible bound - ---- - -## Recompute Ring Verifier Key (on epoch change) - -`x` = Ring size - -### Domain=1024, Uncompressed (~ 50 ms) - - Time ~= 54070 - + x 98.53 - µs - -### Domain=1024, Compressed (~ 700 ms) - - Time ~= 733700 - + x 90.49 - µs - -### Domain=2048, Uncompressed (~ 100 ms) - - Time ~= 107700 - + x 108.5 - µs - -### Domain=2048, Compressed (~ 1.5 s) - - Time ~= 1462400 - + x 65.14 - µs - -### Conclusions - -- Here we load the full ring context data to recompute verification key for the epoch -- Ring size influence is marginal (e.g. for 1500 validators → ~98 ms to be added to the base time) -- This step is performed at most once per epoch (if validator set changes). -- Domain size for ring context influence the PoV size (see next paragraph) -- Decompression heavily influence timings (1.5sec vs 100ms for same domain size) - ---- - -## Ring Context Data Size - -### Domain=1024, Uncompressed - - 295412 bytes = ~ 300 KiB - -### Domain=1024, Compressed - - 147716 bytes = ~ 150 KiB - -### Domain=2048, Uncompressed - - 590324 bytes = ~ 590 KiB - -### Domain=2048, Compressed - - 295172 bytes = ~ 300 KiB diff --git a/substrate/frame/sassafras/src/data/tickets-sort.md b/substrate/frame/sassafras/src/data/tickets-sort.md deleted file mode 100644 index 64fc45e4fb00..000000000000 --- a/substrate/frame/sassafras/src/data/tickets-sort.md +++ /dev/null @@ -1,274 +0,0 @@ -# Segments Incremental Sorting Strategy Empirical Results - -Parameters: -- 128 segments -- segment max length 128 -- 32767 random tickets ids -- epoch length 3600 (== max tickets to keep) - -The table shows the comparison between the segments left in the unsorted segments buffer -and the number of new tickets which are added from the last segment to the sorted tickets -buffer (i.e. how many tickets we retain from the last processed segment) - -| Segments Left | Tickets Pushed | -|-----|-----| -| 255 | 128 | -| 254 | 128 | -| 253 | 128 | -| 252 | 128 | -| 251 | 128 | -| 250 | 128 | -| 249 | 128 | -| 248 | 128 | -| 247 | 128 | -| 246 | 128 | -| 245 | 128 | -| 244 | 128 | -| 243 | 128 | -| 242 | 128 | -| 241 | 128 | -| 240 | 128 | -| 239 | 128 | -| 238 | 128 | -| 237 | 128 | -| 236 | 128 | -| 235 | 128 | -| 234 | 128 | -| 233 | 128 | -| 232 | 128 | -| 231 | 128 | -| 230 | 128 | -| 229 | 128 | -| 228 | 128 | -| 227 | 128 | -| 226 | 126 | -| 225 | 117 | -| 224 | 120 | -| 223 | 110 | -| 222 | 110 | -| 221 | 102 | -| 220 | 107 | -| 219 | 96 | -| 218 | 105 | -| 217 | 92 | -| 216 | 91 | -| 215 | 85 | -| 214 | 84 | -| 213 | 88 | -| 212 | 77 | -| 211 | 86 | -| 210 | 73 | -| 209 | 73 | -| 208 | 81 | -| 207 | 83 | -| 206 | 70 | -| 205 | 84 | -| 204 | 71 | -| 203 | 63 | -| 202 | 60 | -| 201 | 53 | -| 200 | 73 | -| 199 | 55 | -| 198 | 65 | -| 197 | 62 | -| 196 | 55 | -| 195 | 63 | -| 194 | 61 | -| 193 | 48 | -| 192 | 67 | -| 191 | 61 | -| 190 | 55 | -| 189 | 49 | -| 188 | 60 | -| 187 | 49 | -| 186 | 51 | -| 185 | 53 | -| 184 | 47 | -| 183 | 51 | -| 182 | 51 | -| 181 | 53 | -| 180 | 42 | -| 179 | 43 | -| 178 | 48 | -| 177 | 46 | -| 176 | 39 | -| 175 | 54 | -| 174 | 39 | -| 173 | 44 | -| 172 | 51 | -| 171 | 49 | -| 170 | 48 | -| 169 | 48 | -| 168 | 41 | -| 167 | 39 | -| 166 | 41 | -| 165 | 40 | -| 164 | 43 | -| 163 | 53 | -| 162 | 51 | -| 161 | 36 | -| 160 | 45 | -| 159 | 40 | -| 158 | 29 | -| 157 | 37 | -| 156 | 31 | -| 155 | 38 | -| 154 | 31 | -| 153 | 38 | -| 152 | 39 | -| 151 | 30 | -| 150 | 37 | -| 149 | 42 | -| 148 | 35 | -| 147 | 33 | -| 146 | 35 | -| 145 | 37 | -| 144 | 38 | -| 143 | 31 | -| 142 | 38 | -| 141 | 38 | -| 140 | 27 | -| 139 | 31 | -| 138 | 25 | -| 137 | 31 | -| 136 | 26 | -| 135 | 30 | -| 134 | 31 | -| 133 | 37 | -| 132 | 29 | -| 131 | 24 | -| 130 | 31 | -| 129 | 34 | -| 128 | 31 | -| 127 | 28 | -| 126 | 28 | -| 125 | 19 | -| 124 | 27 | -| 123 | 29 | -| 122 | 36 | -| 121 | 32 | -| 120 | 29 | -| 119 | 28 | -| 118 | 33 | -| 117 | 18 | -| 116 | 28 | -| 115 | 27 | -| 114 | 28 | -| 113 | 21 | -| 112 | 23 | -| 111 | 19 | -| 110 | 21 | -| 109 | 20 | -| 108 | 26 | -| 107 | 23 | -| 106 | 30 | -| 105 | 31 | -| 104 | 19 | -| 103 | 25 | -| 102 | 23 | -| 101 | 29 | -| 100 | 18 | -| 99 | 19 | -| 98 | 20 | -| 97 | 21 | -| 96 | 23 | -| 95 | 20 | -| 94 | 27 | -| 93 | 20 | -| 92 | 22 | -| 91 | 23 | -| 90 | 23 | -| 89 | 20 | -| 88 | 15 | -| 87 | 17 | -| 86 | 28 | -| 85 | 25 | -| 84 | 10 | -| 83 | 20 | -| 82 | 23 | -| 81 | 28 | -| 80 | 17 | -| 79 | 23 | -| 78 | 24 | -| 77 | 22 | -| 76 | 18 | -| 75 | 25 | -| 74 | 31 | -| 73 | 27 | -| 72 | 19 | -| 71 | 13 | -| 70 | 17 | -| 69 | 24 | -| 68 | 20 | -| 67 | 12 | -| 66 | 17 | -| 65 | 16 | -| 64 | 26 | -| 63 | 24 | -| 62 | 12 | -| 61 | 19 | -| 60 | 18 | -| 59 | 20 | -| 58 | 18 | -| 57 | 12 | -| 56 | 15 | -| 55 | 17 | -| 54 | 14 | -| 53 | 25 | -| 52 | 22 | -| 51 | 15 | -| 50 | 17 | -| 49 | 15 | -| 48 | 17 | -| 47 | 18 | -| 46 | 17 | -| 45 | 23 | -| 44 | 17 | -| 43 | 13 | -| 42 | 15 | -| 41 | 18 | -| 40 | 11 | -| 39 | 19 | -| 38 | 18 | -| 37 | 12 | -| 36 | 19 | -| 35 | 18 | -| 34 | 15 | -| 33 | 12 | -| 32 | 25 | -| 31 | 20 | -| 30 | 24 | -| 29 | 20 | -| 28 | 10 | -| 27 | 15 | -| 26 | 16 | -| 25 | 15 | -| 24 | 15 | -| 23 | 13 | -| 22 | 12 | -| 21 | 14 | -| 20 | 19 | -| 19 | 17 | -| 18 | 17 | -| 17 | 18 | -| 16 | 15 | -| 15 | 13 | -| 14 | 11 | -| 13 | 16 | -| 12 | 13 | -| 11 | 18 | -| 10 | 19 | -| 9 | 10 | -| 8 | 7 | -| 7 | 15 | -| 6 | 12 | -| 5 | 12 | -| 4 | 17 | -| 3 | 14 | -| 2 | 17 | -| 1 | 9 | -| 0 | 13 | - -# Graph of the same data - -![graph](tickets-sort.png) diff --git a/substrate/frame/sassafras/src/data/tickets-sort.png b/substrate/frame/sassafras/src/data/tickets-sort.png deleted file mode 100644 index b34ce3f37ba9..000000000000 Binary files a/substrate/frame/sassafras/src/data/tickets-sort.png and /dev/null differ diff --git a/substrate/frame/sassafras/src/data/tickets.bin b/substrate/frame/sassafras/src/data/tickets.bin new file mode 100644 index 000000000000..af6fd097b1b9 Binary files /dev/null and b/substrate/frame/sassafras/src/data/tickets.bin differ diff --git a/substrate/frame/sassafras/src/lib.rs b/substrate/frame/sassafras/src/lib.rs index 285758afbe6d..1a21d8932093 100644 --- a/substrate/frame/sassafras/src/lib.rs +++ b/substrate/frame/sassafras/src/lib.rs @@ -21,28 +21,28 @@ //! is a constant-time block production protocol that aims to ensure that there is //! exactly one block produced with constant time intervals rather than multiple or none. //! -//! We run a lottery to distribute block production slots in an epoch and to fix the -//! order validators produce blocks in, by the beginning of an epoch. +//! We run a lottery to distribute block production slots for a *target* epoch and to fix +//! the order validators produce blocks in. //! -//! Each validator signs the same VRF input and publishes the output on-chain. This -//! value is their lottery ticket that can be validated against their public key. +//! Each validator signs some unbiasable VRF input and publishes the VRF output on-chain. +//! This value is their lottery ticket that can be eventually validated against their +//! public key. //! -//! We want to keep lottery winners secret, i.e. do not publish their public keys. -//! At the beginning of the epoch all the validators tickets are published but not -//! their public keys. +//! We want to keep lottery winners secret, i.e. do not disclose their public keys. +//! At the beginning of the *target* epoch all the validators tickets are published but +//! not the corresponding author public keys. //! -//! A valid tickets is validated when an honest validator reclaims it on block -//! production. +//! The association is revealed by the ticket's owner during block production when he will +//! claim his ticket, and thus the associated slot, by showing a proof which ships with the +//! produced block. //! -//! To prevent submission of fake tickets, resulting in empty slots, the validator -//! when submitting the ticket accompanies it with a SNARK of the statement: "Here's -//! my VRF output that has been generated using the given VRF input and my secret -//! key. I'm not telling you my keys, but my public key is among those of the -//! nominated validators", that is validated before the lottery. -//! -//! To anonymously publish the ticket to the chain a validator sends their tickets -//! to a random validator who later puts it on-chain as a transaction. +//! To prevent submission of invalid tickets, resulting in empty slots, the validator +//! when submitting a ticket accompanies it with a zk-SNARK of the statement: +//! "Here's my VRF output that has been generated using the given VRF input and my secret +//! key. I'm not telling you who I am, but my public key is among those of the nominated +//! validators for the target epoch". +#![allow(unused)] #![deny(warnings)] #![warn(unused_must_use, unsafe_code, unused_variables, unused_imports, missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] @@ -55,27 +55,24 @@ use scale_info::TypeInfo; use alloc::vec::Vec; use frame_support::{ - dispatch::{DispatchResultWithPostInfo, Pays}, - traits::{Defensive, Get}, - weights::Weight, - BoundedVec, WeakBoundedVec, -}; -use frame_system::{ - offchain::{SendTransactionTypes, SubmitTransaction}, - pallet_prelude::BlockNumberFor, + dispatch::DispatchResult, traits::Get, weights::Weight, BoundedVec, WeakBoundedVec, }; +use frame_system::pallet_prelude::BlockNumberFor; use sp_consensus_sassafras::{ digests::{ConsensusLog, NextEpochDescriptor, SlotClaim}, - vrf, AuthorityId, Epoch, EpochConfiguration, Randomness, Slot, TicketBody, TicketEnvelope, - TicketId, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID, + vrf, AuthorityId, Configuration, Epoch, InherentError, InherentType, Randomness, Slot, + TicketBody, TicketEnvelope, TicketId, INHERENT_IDENTIFIER, RANDOMNESS_LENGTH, + SASSAFRAS_ENGINE_ID, }; use sp_io::hashing; use sp_runtime::{ generic::DigestItem, traits::{One, Zero}, - BoundToRuntimeAppPublic, + BoundToRuntimeAppPublic, Percent, }; +pub use pallet::*; + #[cfg(feature = "runtime-benchmarks")] mod benchmarking; #[cfg(all(feature = "std", test))] @@ -86,37 +83,75 @@ mod tests; pub mod weights; pub use weights::WeightInfo; -pub use pallet::*; - const LOG_TARGET: &str = "sassafras::runtime"; // Contextual string used by the VRF to generate per-block randomness. const RANDOMNESS_VRF_CONTEXT: &[u8] = b"SassafrasOnChainRandomness"; -// Max length for segments holding unsorted tickets. -const SEGMENT_MAX_SIZE: u32 = 128; +/// Randomness buffer. +pub type RandomnessBuffer = [Randomness; 4]; + +/// Number of tickets available for current and next epoch. +/// +/// These tickets are held by the [`Tickets`] storage map. +/// +/// Current counter index is computed as current epoch index modulo 2 +/// Next counter index is computed as the other entry. +pub type TicketsCounter = [u32; 2]; + +/// Ephemeral data constructed by `on_initialize` and destroyed by `on_finalize`. +/// +/// Contains some temporary data that may be useful later during code execution. +#[derive(Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub struct EphemeralData { + /// Previous block slot. + prev_slot: Slot, + /// Per block randomness to be deposited after block execution (on finalization). + block_randomness: Randomness, +} + +/// Key used for the tickets accumulator map. +/// +/// Ticket keys are constructed by taking the bitwise negation of the ticket identifier. +/// As the tickets accumulator sorts entries according to the key values from smaller +/// to larger, we end up with a sequence of tickets identifiers sorted from larger to +/// smaller. +/// +/// This strategy comes handy when we quickly need to check if a new ticket chunk has been +/// completely absorbed by the accumulator, when this is already full and without loading +/// the whole sequence in memory. +#[derive( + Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Encode, Decode, MaxEncodedLen, TypeInfo, +)] +pub struct TicketKey([u8; 32]); + +impl From for TicketKey { + fn from(mut value: TicketId) -> Self { + TicketKey(value.0.map(|b| !b)) + } +} -/// Authorities bounded vector convenience type. +/// Authorities sequence. pub type AuthoritiesVec = WeakBoundedVec::MaxAuthorities>; -/// Epoch length defined by the configuration. -pub type EpochLengthFor = ::EpochLength; +/// Tickets sequence. +pub type TicketsVec = BoundedVec::TicketsChunkLength>; -/// Tickets metadata. -#[derive(Debug, Default, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, Clone, Copy)] -pub struct TicketsMetadata { - /// Number of outstanding next epoch tickets requiring to be sorted. - /// - /// These tickets are held by the [`UnsortedSegments`] storage map in segments - /// containing at most `SEGMENT_MAX_SIZE` items. - pub unsorted_tickets_count: u32, +trait EpochTag { + fn tag(&self) -> u8; + fn next_tag(&self) -> u8; +} - /// Number of tickets available for current and next epoch. - /// - /// These tickets are held by the [`TicketsIds`] storage map. - /// - /// The array entry to be used for the current epoch is computed as epoch index modulo 2. - pub tickets_count: [u32; 2], +impl EpochTag for u64 { + #[inline(always)] + fn tag(&self) -> u8 { + (self % 2) as u8 + } + + #[inline(always)] + fn next_tag(&self) -> u8 { + self.tag() ^ 1 + } } #[frame_support::pallet] @@ -131,15 +166,37 @@ pub mod pallet { /// Configuration parameters. #[pallet::config] - pub trait Config: frame_system::Config + SendTransactionTypes> { + pub trait Config: frame_system::Config { /// Amount of slots that each epoch should last. + /// + /// NOTE: Currently it is not possible to change the epoch duration after + /// the chain has started. Attempting to do so will brick block production. #[pallet::constant] - type EpochLength: Get; + type EpochDuration: Get; /// Max number of authorities allowed. #[pallet::constant] type MaxAuthorities: Get; + /// Redundancy factor + #[pallet::constant] + type RedundancyFactor: Get; + + /// Max attempts number + #[pallet::constant] + type AttemptsNumber: Get; + + /// Max number of tickets that can be submitted in one block. + #[pallet::constant] + type TicketsChunkLength: Get; + + /// Epoch lottery duration percent relative to the epoch `EpochDuration`. + /// + /// Tickets lottery starts with the start of an epoch. + /// When epoch lottery ends no more tickets are allowed to be submitted on-chain. + #[pallet::constant] + type LotteryDurationPercent: Get; + /// Epoch change trigger. /// /// Logic to be triggered on every block to query for whether an epoch has ended @@ -147,21 +204,32 @@ pub mod pallet { type EpochChangeTrigger: EpochChangeTrigger; /// Weight information for all calls of this pallet. - type WeightInfo: WeightInfo; + type WeightInfo: weights::WeightInfo; } /// Sassafras runtime errors. #[pallet::error] pub enum Error { - /// Submitted configuration is invalid. - InvalidConfiguration, + /// Tickets were found after the lottery is over. + TicketUnexpected, + /// Ticket identifier is too big. + TicketOverThreshold, + /// Bad ticket order. + TicketBadOrder, + /// Invalid ticket signature. + TicketBadProof, + /// Invalid ticket attempt number. + TicketBadAttempt, + /// Some submitted ticket has not been persisted because of its score. + TicketDropped, + /// Duplicate ticket. + TicketDuplicate, + /// Invalid VRF output. + TicketBadVrfOutput, + /// Uninitialized Ring Verifier + TicketVerifierNotInitialized, } - /// Current epoch index. - #[pallet::storage] - #[pallet::getter(fn epoch_index)] - pub type EpochIndex = StorageValue<_, u64, ValueQuery>; - /// Current epoch authorities. #[pallet::storage] #[pallet::getter(fn authorities)] @@ -172,113 +240,62 @@ pub mod pallet { #[pallet::getter(fn next_authorities)] pub type NextAuthorities = StorageValue<_, AuthoritiesVec, ValueQuery>; - /// First block slot number. - /// - /// As the slots may not be zero-based, we record the slot value for the fist block. - /// This allows to always compute relative indices for epochs and slots. - #[pallet::storage] - #[pallet::getter(fn genesis_slot)] - pub type GenesisSlot = StorageValue<_, Slot, ValueQuery>; - /// Current block slot number. #[pallet::storage] #[pallet::getter(fn current_slot)] pub type CurrentSlot = StorageValue<_, Slot, ValueQuery>; - /// Current epoch randomness. + /// Randomness buffer. #[pallet::storage] - #[pallet::getter(fn randomness)] - pub type CurrentRandomness = StorageValue<_, Randomness, ValueQuery>; + #[pallet::getter(fn randomness_buf)] + pub type RandomnessBuf = StorageValue<_, RandomnessBuffer, ValueQuery>; - /// Next epoch randomness. + /// Tickets accumulator. #[pallet::storage] - #[pallet::getter(fn next_randomness)] - pub type NextRandomness = StorageValue<_, Randomness, ValueQuery>; + #[pallet::getter(fn tickets_accumulator)] + pub type TicketsAccumulator = CountedStorageMap<_, Identity, TicketKey, TicketBody>; - /// Randomness accumulator. - /// - /// Excluded the first imported block, its value is updated on block finalization. + /// Tickets counters for the current and next epoch. #[pallet::storage] - #[pallet::getter(fn randomness_accumulator)] - pub(crate) type RandomnessAccumulator = StorageValue<_, Randomness, ValueQuery>; + #[pallet::getter(fn tickets_count)] + pub type TicketsCount = StorageValue<_, TicketsCounter, ValueQuery>; - /// The configuration for the current epoch. - #[pallet::storage] - #[pallet::getter(fn config)] - pub type EpochConfig = StorageValue<_, EpochConfiguration, ValueQuery>; - - /// The configuration for the next epoch. - #[pallet::storage] - #[pallet::getter(fn next_config)] - pub type NextEpochConfig = StorageValue<_, EpochConfiguration>; - - /// Pending epoch configuration change that will be set as `NextEpochConfig` when the next - /// epoch is enacted. + /// Tickets map. /// - /// In other words, a configuration change submitted during epoch N will be enacted on epoch - /// N+2. This is to maintain coherence for already submitted tickets for epoch N+1 that where - /// computed using configuration parameters stored for epoch N+1. - #[pallet::storage] - pub type PendingEpochConfigChange = StorageValue<_, EpochConfiguration>; - - /// Stored tickets metadata. - #[pallet::storage] - pub type TicketsMeta = StorageValue<_, TicketsMetadata, ValueQuery>; - - /// Tickets identifiers map. - /// - /// The map holds tickets ids for the current and next epoch. + /// The map holds tickets identifiers for the current and next epoch. /// /// The key is a tuple composed by: - /// - `u8` equal to epoch's index modulo 2; - /// - `u32` equal to the ticket's index in a sorted list of epoch's tickets. - /// - /// Epoch X first N-th ticket has key (X mod 2, N) - /// - /// Note that the ticket's index doesn't directly correspond to the slot index within the epoch. - /// The assignment is computed dynamically using an *outside-in* strategy. + /// - `u8`: equal to epoch's index modulo 2; + /// - `u32` equal to the ticket's index in an abstract sorted sequence of epoch's tickets. /// - /// Be aware that entries within this map are never removed, only overwritten. - /// Last element index should be fetched from the [`TicketsMeta`] value. - #[pallet::storage] - pub type TicketsIds = StorageMap<_, Identity, (u8, u32), TicketId>; - - /// Tickets to be used for current and next epoch. - #[pallet::storage] - pub type TicketsData = StorageMap<_, Identity, TicketId, TicketBody>; - - /// Next epoch tickets unsorted segments. + /// For example, the key for the `N`-th ticket for epoch `E` is `(E mod 2, N)` /// - /// Contains lists of tickets where each list represents a batch of tickets - /// received via the `submit_tickets` extrinsic. + /// Note that the ticket's index `N` doesn't correspond to the offset of the associated + /// slot within the epoch. The assignment is computed using an *outside-in* strategy + /// and correctly returned by the [`slot_ticket`] method. /// - /// Each segment has max length [`SEGMENT_MAX_SIZE`]. - #[pallet::storage] - pub type UnsortedSegments = - StorageMap<_, Identity, u32, BoundedVec>, ValueQuery>; - - /// The most recently set of tickets which are candidates to become the next - /// epoch tickets. + /// Be aware that entries within this map are never removed, but only overwritten. + /// The number of tickets available for epoch `E` is stored in the `E mod 2` entry + /// of [`TicketsCount`]. #[pallet::storage] - pub type SortedCandidates = - StorageValue<_, BoundedVec>, ValueQuery>; + #[pallet::getter(fn tickets)] + pub type Tickets = StorageMap<_, Identity, (u8, u32), TicketBody>; /// Parameters used to construct the epoch's ring verifier. /// - /// In practice: Updatable Universal Reference String and the seed. + /// In practice, this is the SNARK "Universal Reference String" (powers of tau). #[pallet::storage] #[pallet::getter(fn ring_context)] pub type RingContext = StorageValue<_, vrf::RingContext>; /// Ring verifier data for the current epoch. #[pallet::storage] - pub type RingVerifierData = StorageValue<_, vrf::RingVerifierData>; + #[pallet::getter(fn ring_verifier_key)] + pub type RingVerifierKey = StorageValue<_, vrf::RingVerifierKey>; - /// Slot claim VRF pre-output used to generate per-slot randomness. - /// - /// The value is ephemeral and is cleared on block finalization. + /// Ephemeral data we retain until block finalization. #[pallet::storage] - pub(crate) type ClaimTemporaryData = StorageValue<_, vrf::VrfPreOutput>; + pub(crate) type TemporaryData = StorageValue<_, EphemeralData>; /// Genesis configuration for Sassafras protocol. #[pallet::genesis_config] @@ -286,8 +303,6 @@ pub mod pallet { pub struct GenesisConfig { /// Genesis authorities. pub authorities: Vec, - /// Genesis epoch configuration. - pub epoch_config: EpochConfiguration, /// Phantom config #[serde(skip)] pub _phantom: core::marker::PhantomData, @@ -296,7 +311,6 @@ pub mod pallet { #[pallet::genesis_build] impl BuildGenesisConfig for GenesisConfig { fn build(&self) { - EpochConfig::::put(self.epoch_config); Pallet::::genesis_authorities_initialize(&self.authorities); #[cfg(feature = "construct-dummy-ring-context")] @@ -320,59 +334,61 @@ pub mod pallet { .find_map(|item| item.pre_runtime_try_to::(&SASSAFRAS_ENGINE_ID)) .expect("Valid block must have a slot claim. qed"); + let randomness_accumulator = Self::randomness_accumulator(); + let randomness_input = vrf::block_randomness_input(&randomness_accumulator, claim.slot); + + // Verification has already been done by the host + debug_assert!({ + use sp_core::crypto::{VrfPublic, Wraps}; + let authorities = Authorities::::get(); + let public = authorities + .get(claim.authority_idx as usize) + .expect("Bad authority index in claim"); + let data = vrf::block_randomness_sign_data(&randomness_accumulator, claim.slot); + public.as_inner_ref().vrf_verify(&data, &claim.vrf_signature) + }); + + let block_randomness = claim.vrf_signature.pre_outputs[0] + .make_bytes::(RANDOMNESS_VRF_CONTEXT, &randomness_input); + + TemporaryData::::put(EphemeralData { + prev_slot: CurrentSlot::::get(), + block_randomness, + }); + CurrentSlot::::put(claim.slot); if block_num == One::one() { - Self::post_genesis_initialize(claim.slot); + Self::post_genesis_initialize(); } - let randomness_pre_output = claim - .vrf_signature - .pre_outputs - .get(0) - .expect("Valid claim must have VRF signature; qed"); - ClaimTemporaryData::::put(randomness_pre_output); - let trigger_weight = T::EpochChangeTrigger::trigger::(block_num); T::WeightInfo::on_initialize() + trigger_weight } fn on_finalize(_: BlockNumberFor) { - // At the end of the block, we can safely include the current slot randomness + // At the end of the block, we can safely include the current block randomness // to the accumulator. If we've determined that this block was the first in // a new epoch, the changeover logic has already occurred at this point // (i.e. `enact_epoch_change` has already been called). - let randomness_input = vrf::slot_claim_input( - &Self::randomness(), - CurrentSlot::::get(), - EpochIndex::::get(), - ); - let randomness_pre_output = ClaimTemporaryData::::take() - .expect("Unconditionally populated in `on_initialize`; `on_finalize` is always called after; qed"); - let randomness = randomness_pre_output - .make_bytes::(RANDOMNESS_VRF_CONTEXT, &randomness_input); - Self::deposit_slot_randomness(&randomness); - - // Check if we are in the epoch's second half. - // If so, start sorting the next epoch tickets. - let epoch_length = T::EpochLength::get(); + let block_randomness = TemporaryData::::take() + .expect("Unconditionally populated in `on_initialize`; `on_finalize` is always called after; qed") + .block_randomness; + Self::deposit_randomness(block_randomness); + + // Check if tickets lottery is over, and if so, start sorting the next epoch tickets. + let epoch_duration = T::EpochDuration::get(); + let lottery_over_idx = T::LotteryDurationPercent::get() * epoch_duration; let current_slot_idx = Self::current_slot_index(); - if current_slot_idx >= epoch_length / 2 { - let mut metadata = TicketsMeta::::get(); - if metadata.unsorted_tickets_count != 0 { - let next_epoch_idx = EpochIndex::::get() + 1; - let next_epoch_tag = (next_epoch_idx & 1) as u8; - let slots_left = epoch_length.checked_sub(current_slot_idx).unwrap_or(1); - Self::sort_segments( - metadata - .unsorted_tickets_count - .div_ceil(SEGMENT_MAX_SIZE * slots_left as u32), - next_epoch_tag, - &mut metadata, - ); - TicketsMeta::::set(metadata); + let mut outstanding_count = TicketsAccumulator::::count() as usize; + if current_slot_idx >= lottery_over_idx && outstanding_count != 0 { + let slots_left = epoch_duration.checked_sub(current_slot_idx).unwrap_or(1); + if slots_left > 0 { + outstanding_count = outstanding_count.div_ceil(slots_left as usize); } + let next_epoch_tag = Self::current_epoch_index().next_tag(); + Self::consume_tickets_accumulator(outstanding_count, next_epoch_tag); } } } @@ -380,201 +396,110 @@ pub mod pallet { #[pallet::call] impl Pallet { /// Submit next epoch tickets candidates. - /// - /// The number of tickets allowed to be submitted in one call is equal to the epoch length. #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::submit_tickets(tickets.len() as u32))] - pub fn submit_tickets( - origin: OriginFor, - tickets: BoundedVec>, - ) -> DispatchResultWithPostInfo { + #[pallet::weight(( + T::WeightInfo::submit_tickets(envelopes.len() as u32), + DispatchClass::Mandatory + ))] + pub fn submit_tickets(origin: OriginFor, envelopes: TicketsVec) -> DispatchResult { ensure_none(origin)?; - debug!(target: LOG_TARGET, "Received {} tickets", tickets.len()); + debug!(target: LOG_TARGET, "Received {} tickets", envelopes.len()); - let epoch_length = T::EpochLength::get(); + let epoch_duration = T::EpochDuration::get(); let current_slot_idx = Self::current_slot_index(); - if current_slot_idx > epoch_length / 2 { - warn!(target: LOG_TARGET, "Tickets shall be submitted in the first epoch half",); - return Err("Tickets shall be submitted in the first epoch half".into()) + let lottery_over_idx = T::LotteryDurationPercent::get() * epoch_duration; + + if current_slot_idx >= lottery_over_idx { + warn!(target: LOG_TARGET, "Lottery is over, tickets must be submitted before slot index {}", lottery_over_idx); + return Err(Error::::TicketUnexpected.into()) } - let Some(verifier) = RingVerifierData::::get().map(|v| v.into()) else { + let Some(verifier) = RingVerifierKey::::get().map(|v| v.into()) else { warn!(target: LOG_TARGET, "Ring verifier key not initialized"); - return Err("Ring verifier key not initialized".into()) + return Err(Error::::TicketVerifierNotInitialized.into()) }; - let next_authorities = Self::next_authorities(); + // Get next epoch parameters + let randomness = Self::next_randomness(); + let authorities = Self::next_authorities(); // Compute tickets threshold - let next_config = Self::next_config().unwrap_or_else(|| Self::config()); let ticket_threshold = sp_consensus_sassafras::ticket_id_threshold( - next_config.redundancy_factor, - epoch_length as u32, - next_config.attempts_number, - next_authorities.len() as u32, + epoch_duration as u32, + authorities.len() as u32, + T::AttemptsNumber::get(), + T::RedundancyFactor::get(), ); - // Get next epoch params - let randomness = NextRandomness::::get(); - let epoch_idx = EpochIndex::::get() + 1; + let attempts_num = T::AttemptsNumber::get(); - let mut valid_tickets = BoundedVec::with_bounded_capacity(tickets.len()); - - for ticket in tickets { - debug!(target: LOG_TARGET, "Checking ring proof"); + let mut candidates = Vec::new(); + for envelope in envelopes { + if envelope.attempt >= attempts_num { + debug!(target: LOG_TARGET, "Bad ticket attempt"); + return Err(Error::::TicketBadAttempt.into()) + } - let Some(ticket_id_pre_output) = ticket.signature.pre_outputs.get(0) else { + let Some(ticket_id_pre_output) = envelope.signature.pre_outputs.get(0) else { debug!(target: LOG_TARGET, "Missing ticket VRF pre-output from ring signature"); - continue + return Err(Error::::TicketBadVrfOutput.into()) }; - let ticket_id_input = - vrf::ticket_id_input(&randomness, ticket.body.attempt_idx, epoch_idx); + let ticket_id_input = vrf::ticket_id_input(&randomness, envelope.attempt); // Check threshold constraint let ticket_id = vrf::make_ticket_id(&ticket_id_input, &ticket_id_pre_output); - if ticket_id >= ticket_threshold { - debug!(target: LOG_TARGET, "Ignoring ticket over threshold ({:032x} >= {:032x})", ticket_id, ticket_threshold); - continue - } + trace!(target: LOG_TARGET, "Checking ticket {:?}", ticket_id); - // Check for duplicates - if TicketsData::::contains_key(ticket_id) { - debug!(target: LOG_TARGET, "Ignoring duplicate ticket ({:032x})", ticket_id); - continue + if ticket_id >= ticket_threshold { + debug!(target: LOG_TARGET, "Ticket over threshold ({:?} >= {:?})", ticket_id, ticket_threshold); + return Err(Error::::TicketOverThreshold.into()) } // Check ring signature - let sign_data = vrf::ticket_body_sign_data(&ticket.body, ticket_id_input); - if !ticket.signature.ring_vrf_verify(&sign_data, &verifier) { - debug!(target: LOG_TARGET, "Proof verification failure for ticket ({:032x})", ticket_id); - continue + let sign_data = vrf::ticket_id_sign_data(ticket_id_input, &envelope.extra); + if !envelope.signature.ring_vrf_verify(&sign_data, &verifier) { + debug!(target: LOG_TARGET, "Proof verification failure for ticket ({:?})", ticket_id); + return Err(Error::::TicketBadProof.into()) } - if let Ok(_) = valid_tickets.try_push(ticket_id).defensive_proof( - "Input segment has same length as bounded destination vector; qed", - ) { - TicketsData::::set(ticket_id, Some(ticket.body)); - } - } - - if !valid_tickets.is_empty() { - Self::append_tickets(valid_tickets); + candidates.push(TicketBody { + id: ticket_id, + attempt: envelope.attempt, + extra: envelope.extra, + }); } - Ok(Pays::No.into()) - } + Self::deposit_tickets(candidates)?; - /// Plan an epoch configuration change. - /// - /// The epoch configuration change is recorded and will be announced at the beginning - /// of the next epoch together with next epoch authorities information. - /// In other words, the configuration will be enacted one epoch later. - /// - /// Multiple calls to this method will replace any existing planned config change - /// that has not been enacted yet. - #[pallet::call_index(1)] - #[pallet::weight(T::WeightInfo::plan_config_change())] - pub fn plan_config_change( - origin: OriginFor, - config: EpochConfiguration, - ) -> DispatchResult { - ensure_root(origin)?; - - ensure!( - config.redundancy_factor != 0 && config.attempts_number != 0, - Error::::InvalidConfiguration - ); - PendingEpochConfigChange::::put(config); Ok(()) } } - #[pallet::validate_unsigned] - impl ValidateUnsigned for Pallet { + #[pallet::inherent] + impl ProvideInherent for Pallet { type Call = Call; + type Error = InherentError; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; - fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { - let Call::submit_tickets { tickets } = call else { - return InvalidTransaction::Call.into() - }; - - // Discard tickets not coming from the local node or that are not included in a block - if source == TransactionSource::External { - warn!( - target: LOG_TARGET, - "Rejecting unsigned `submit_tickets` transaction from external source", - ); - return InvalidTransaction::BadSigner.into() - } + fn create_inherent(data: &InherentData) -> Option { + let envelopes = data + .get_data::(&INHERENT_IDENTIFIER) + .expect("Sassafras inherent data not correctly encoded") + .expect("Sassafras inherent data must be provided"); - // Current slot should be less than half of epoch length. - let epoch_length = T::EpochLength::get(); - let current_slot_idx = Self::current_slot_index(); - if current_slot_idx > epoch_length / 2 { - warn!(target: LOG_TARGET, "Tickets shall be proposed in the first epoch half",); - return InvalidTransaction::Stale.into() - } - - // This should be set such that it is discarded after the first epoch half - let tickets_longevity = epoch_length / 2 - current_slot_idx; - let tickets_tag = tickets.using_encoded(|bytes| hashing::blake2_256(bytes)); + let envelopes = BoundedVec::truncate_from(envelopes); + Some(Call::submit_tickets { envelopes }) + } - ValidTransaction::with_tag_prefix("Sassafras") - .priority(TransactionPriority::max_value()) - .longevity(tickets_longevity as u64) - .and_provides(tickets_tag) - .propagate(true) - .build() + fn is_inherent(call: &Self::Call) -> bool { + matches!(call, Call::submit_tickets { .. }) } } } // Inherent methods impl Pallet { - /// Determine whether an epoch change should take place at this block. - /// - /// Assumes that initialization has already taken place. - pub(crate) fn should_end_epoch(block_num: BlockNumberFor) -> bool { - // The epoch has technically ended during the passage of time between this block and the - // last, but we have to "end" the epoch now, since there is no earlier possible block we - // could have done it. - // - // The exception is for block 1: the genesis has slot 0, so we treat epoch 0 as having - // started at the slot of block 1. We want to use the same randomness and validator set as - // signalled in the genesis, so we don't rotate the epoch. - block_num > One::one() && Self::current_slot_index() >= T::EpochLength::get() - } - - /// Current slot index relative to the current epoch. - fn current_slot_index() -> u32 { - Self::slot_index(CurrentSlot::::get()) - } - - /// Slot index relative to the current epoch. - fn slot_index(slot: Slot) -> u32 { - slot.checked_sub(*Self::current_epoch_start()) - .and_then(|v| v.try_into().ok()) - .unwrap_or(u32::MAX) - } - - /// Finds the start slot of the current epoch. - /// - /// Only guaranteed to give correct results after `initialize` of the first - /// block in the chain (as its result is based off of `GenesisSlot`). - fn current_epoch_start() -> Slot { - Self::epoch_start(EpochIndex::::get()) - } - - /// Get the epoch's first slot. - fn epoch_start(epoch_index: u64) -> Slot { - const PROOF: &str = "slot number is u64; it should relate in some way to wall clock time; \ - if u64 is not enough we should crash for safety; qed."; - - let epoch_start = epoch_index.checked_mul(T::EpochLength::get() as u64).expect(PROOF); - GenesisSlot::::get().checked_add(epoch_start).expect(PROOF).into() - } - pub(crate) fn update_ring_verifier(authorities: &[AuthorityId]) { debug!(target: LOG_TARGET, "Loading ring context"); let Some(ring_ctx) = RingContext::::get() else { @@ -585,11 +510,15 @@ impl Pallet { let pks: Vec<_> = authorities.iter().map(|auth| *auth.as_ref()).collect(); debug!(target: LOG_TARGET, "Building ring verifier (ring size: {})", pks.len()); - let verifier_data = ring_ctx - .verifier_data(&pks) - .expect("Failed to build ring verifier. This is a bug"); - - RingVerifierData::::put(verifier_data); + let maybe_verifier_key = ring_ctx.verifier_key(&pks); + if maybe_verifier_key.is_none() { + error!( + target: LOG_TARGET, + "Failed to build verifier key. This should never happen,\n + falling back to AURA for next epoch as last resort" + ); + } + RingVerifierKey::::set(maybe_verifier_key); } /// Enact an epoch change. @@ -603,6 +532,8 @@ impl Pallet { authorities: WeakBoundedVec, next_authorities: WeakBoundedVec, ) { + debug_assert_eq!(authorities, NextAuthorities::::get()); + if next_authorities != authorities { Self::update_ring_verifier(&next_authorities); } @@ -612,13 +543,22 @@ impl Pallet { NextAuthorities::::put(&next_authorities); // Update epoch index - let mut epoch_idx = EpochIndex::::get() + 1; + let expected_epoch_idx = TemporaryData::::get() + .map(|cache| Self::epoch_index(cache.prev_slot) + 1) + .expect("Unconditionally populated in `on_initialize`; `enact_epoch_change` is always called after; qed"); + let mut epoch_idx = Self::current_epoch_index(); + + if epoch_idx < expected_epoch_idx { + panic!( + "Unexpected epoch value, expected: {} - found: {}, aborting", + expected_epoch_idx, epoch_idx + ); + } - let slot_idx = CurrentSlot::::get().saturating_sub(Self::epoch_start(epoch_idx)); - if slot_idx >= T::EpochLength::get() { + if expected_epoch_idx != epoch_idx { // Detected one or more skipped epochs, clear tickets data and recompute epoch index. Self::reset_tickets_data(); - let skipped_epochs = *slot_idx / T::EpochLength::get() as u64; + let skipped_epochs = epoch_idx - expected_epoch_idx; epoch_idx += skipped_epochs; warn!( target: LOG_TARGET, @@ -628,90 +568,85 @@ impl Pallet { ); } - let mut metadata = TicketsMeta::::get(); - let mut metadata_dirty = false; - - EpochIndex::::put(epoch_idx); - - let next_epoch_idx = epoch_idx + 1; - - // Updates current epoch randomness and computes the *next* epoch randomness. - let next_randomness = Self::update_epoch_randomness(next_epoch_idx); - - if let Some(config) = NextEpochConfig::::take() { - EpochConfig::::put(config); - } - - let next_config = PendingEpochConfigChange::::take(); - if let Some(next_config) = next_config { - NextEpochConfig::::put(next_config); - } - // After we update the current epoch, we signal the *next* epoch change // so that nodes can track changes. - let next_epoch = NextEpochDescriptor { - randomness: next_randomness, + let epoch_signal = NextEpochDescriptor { + randomness: Self::update_randomness_buffer(), authorities: next_authorities.into_inner(), - config: next_config, }; - Self::deposit_next_epoch_descriptor_digest(next_epoch); + Self::deposit_next_epoch_descriptor_digest(epoch_signal); - let epoch_tag = (epoch_idx & 1) as u8; + Self::consume_tickets_accumulator(usize::MAX, epoch_idx.tag()); - // Optionally finish sorting - if metadata.unsorted_tickets_count != 0 { - Self::sort_segments(u32::MAX, epoch_tag, &mut metadata); - metadata_dirty = true; - } + // Reset next epoch counter as we're start accumulating. + let mut tickets_count = TicketsCount::::get(); + tickets_count[epoch_idx.next_tag() as usize] = 0; + TicketsCount::::set(tickets_count); + } - // Clear the "prev ≡ next (mod 2)" epoch tickets counter and bodies. - // Ids are left since are just cyclically overwritten on-the-go. - let prev_epoch_tag = epoch_tag ^ 1; - let prev_epoch_tickets_count = &mut metadata.tickets_count[prev_epoch_tag as usize]; - if *prev_epoch_tickets_count != 0 { - for idx in 0..*prev_epoch_tickets_count { - if let Some(ticket_id) = TicketsIds::::get((prev_epoch_tag, idx)) { - TicketsData::::remove(ticket_id); + pub(crate) fn deposit_tickets(tickets: Vec) -> Result<(), Error> { + let prev_count = TicketsAccumulator::::count(); + let mut prev_id = None; + for ticket in &tickets { + if prev_id.map(|prev| ticket.id <= prev).unwrap_or_default() { + return Err(Error::TicketBadOrder) + } + prev_id = Some(ticket.id); + TicketsAccumulator::::insert(TicketKey::from(ticket.id), ticket); + } + let count = TicketsAccumulator::::count(); + if count != prev_count + tickets.len() as u32 { + return Err(Error::TicketDuplicate) + } + let diff = count.saturating_sub(T::EpochDuration::get()); + if diff > 0 { + let dropped_entries: Vec<_> = + TicketsAccumulator::::iter().take(diff as usize).collect(); + // Assess that no new ticket has been dropped + for (key, ticket) in dropped_entries { + if tickets.binary_search_by_key(&ticket.id, |t| t.id).is_ok() { + return Err(Error::TicketDropped) } + TicketsAccumulator::::remove(key); } - *prev_epoch_tickets_count = 0; - metadata_dirty = true; } + Ok(()) + } - if metadata_dirty { - TicketsMeta::::set(metadata); + // Consumes the tickets accumulator relative to `epoch_tag` by depositing at most + // `max_items` into the `Tickets` map. Ticket bodies are stored in the `Tickets` + // map from smaller to bigger wrt ticket identifier (as required by the protocol). + fn consume_tickets_accumulator(max_items: usize, epoch_tag: u8) { + let mut tickets_count = TicketsCount::::get(); + let mut accumulator_count = TicketsAccumulator::::count(); + let mut idx = accumulator_count; + for (_, ticket) in TicketsAccumulator::::drain().take(max_items) { + idx -= 1; + Tickets::::insert((epoch_tag, idx), ticket); } + tickets_count[epoch_tag as usize] += (accumulator_count - idx); + TicketsCount::::set(tickets_count); } // Call this function on epoch change to enact current epoch randomness. - // - // Returns the next epoch randomness. - fn update_epoch_randomness(next_epoch_index: u64) -> Randomness { - let curr_epoch_randomness = NextRandomness::::get(); - CurrentRandomness::::put(curr_epoch_randomness); - - let accumulator = RandomnessAccumulator::::get(); - - let mut buf = [0; RANDOMNESS_LENGTH + 8]; - buf[..RANDOMNESS_LENGTH].copy_from_slice(&accumulator[..]); - buf[RANDOMNESS_LENGTH..].copy_from_slice(&next_epoch_index.to_le_bytes()); - - let next_randomness = hashing::blake2_256(&buf); - NextRandomness::::put(&next_randomness); - - next_randomness + fn update_randomness_buffer() -> Randomness { + let mut randomness = RandomnessBuf::::get(); + randomness[3] = randomness[2]; + randomness[2] = randomness[1]; + randomness[1] = randomness[0]; + let announce = randomness[2]; + RandomnessBuf::::put(randomness); + announce } - // Deposit per-slot randomness. - fn deposit_slot_randomness(randomness: &Randomness) { - let accumulator = RandomnessAccumulator::::get(); - + // Deposit per-block randomness. + fn deposit_randomness(randomness: Randomness) { + let mut accumulator = RandomnessBuf::::get(); let mut buf = [0; 2 * RANDOMNESS_LENGTH]; - buf[..RANDOMNESS_LENGTH].copy_from_slice(&accumulator[..]); + buf[..RANDOMNESS_LENGTH].copy_from_slice(&accumulator[0][..]); buf[RANDOMNESS_LENGTH..].copy_from_slice(&randomness[..]); - - let accumulator = hashing::blake2_256(&buf); - RandomnessAccumulator::::put(accumulator); + accumulator[0] = hashing::blake2_256(&buf); + RandomnessBuf::::put(accumulator); } // Deposit next epoch descriptor in the block header digest. @@ -746,65 +681,34 @@ impl Pallet { } // Method to be called on first block `on_initialize` to properly populate some key parameters. - fn post_genesis_initialize(slot: Slot) { - // Keep track of the actual first slot used (may not be zero based). - GenesisSlot::::put(slot); - - // Properly initialize randomness using genesis hash and current slot. - // This is important to guarantee that a different set of tickets are produced for: - // - different chains which share the same ring parameters and - // - same chain started with a different slot base. + fn post_genesis_initialize() { + // Properly initialize randomness using genesis hash. + // This is important to guarantee that a different set of tickets are produced for + // different chains sharing the same ring parameters. let genesis_hash = frame_system::Pallet::::parent_hash(); - let mut buf = genesis_hash.as_ref().to_vec(); - buf.extend_from_slice(&slot.to_le_bytes()); - let randomness = hashing::blake2_256(buf.as_slice()); - RandomnessAccumulator::::put(randomness); - - let next_randomness = Self::update_epoch_randomness(1); + let mut accumulator = RandomnessBuffer::default(); + accumulator[0] = hashing::blake2_256(genesis_hash.as_ref()); + accumulator[1] = hashing::blake2_256(&accumulator[0]); + accumulator[2] = hashing::blake2_256(&accumulator[1]); + accumulator[3] = hashing::blake2_256(&accumulator[2]); + RandomnessBuf::::put(accumulator); // Deposit a log as this is the first block in first epoch. let next_epoch = NextEpochDescriptor { - randomness: next_randomness, + randomness: accumulator[2], authorities: Self::next_authorities().into_inner(), - config: None, }; Self::deposit_next_epoch_descriptor_digest(next_epoch); } - /// Current epoch information. - pub fn current_epoch() -> Epoch { - let index = EpochIndex::::get(); - Epoch { - index, - start: Self::epoch_start(index), - length: T::EpochLength::get(), - authorities: Self::authorities().into_inner(), - randomness: Self::randomness(), - config: Self::config(), - } - } - - /// Next epoch information. - pub fn next_epoch() -> Epoch { - let index = EpochIndex::::get() + 1; - Epoch { - index, - start: Self::epoch_start(index), - length: T::EpochLength::get(), - authorities: Self::next_authorities().into_inner(), - randomness: Self::next_randomness(), - config: Self::next_config().unwrap_or_else(|| Self::config()), - } - } - /// Fetch expected ticket-id for the given slot according to an "outside-in" sorting strategy. /// /// Given an ordered sequence of tickets [t0, t1, t2, ..., tk] to be assigned to n slots, /// with n >= k, then the tickets are assigned to the slots according to the following /// strategy: /// - /// slot-index : [ 0, 1, 2, ............ , n ] - /// tickets : [ t1, t3, t5, ... , t4, t2, t0 ]. + /// slot-index : [ 0, 1, 2, 3, .................. ,n ] + /// tickets : [ t1, tk, t2, t_{k-1} ..... ]. /// /// With slot-index computed as `epoch_start() - slot`. /// @@ -814,224 +718,177 @@ impl Pallet { /// If `slot` value falls within the next epoch then we fetch tickets from the next epoch /// tickets ids list. Note that in this case we may have not finished receiving all the tickets /// for that epoch yet. The next epoch tickets should be considered "stable" only after the - /// current epoch first half slots were elapsed (see `submit_tickets_unsigned_extrinsic`). + /// current epoch "submission period" is completed. /// /// Returns `None` if, according to the sorting strategy, there is no ticket associated to the - /// specified slot-index (happens if a ticket falls in the middle of an epoch and n > k), - /// or if the slot falls beyond the next epoch. + /// specified slot-index (may happen if n > k and we are requesting for a ticket for a slot with + /// relative index i > k) or if the slot falls beyond the next epoch. /// /// Before importing the first block this returns `None`. - pub fn slot_ticket_id(slot: Slot) -> Option { + pub fn slot_ticket(slot: Slot) -> Option { if frame_system::Pallet::::block_number().is_zero() { return None } - let epoch_idx = EpochIndex::::get(); - let epoch_len = T::EpochLength::get(); - let mut slot_idx = Self::slot_index(slot); - let mut metadata = TicketsMeta::::get(); - - let get_ticket_idx = |slot_idx| { - let ticket_idx = if slot_idx < epoch_len / 2 { - 2 * slot_idx + 1 - } else { - 2 * (epoch_len - (slot_idx + 1)) - }; - debug!( - target: LOG_TARGET, - "slot-idx {} <-> ticket-idx {}", - slot_idx, - ticket_idx - ); - ticket_idx as u32 - }; - let mut epoch_tag = (epoch_idx & 1) as u8; + let curr_epoch_idx = Self::current_epoch_index(); + let slot_epoch_idx = Self::epoch_index(slot); + if slot_epoch_idx < curr_epoch_idx || slot_epoch_idx > curr_epoch_idx + 1 { + return None + } - if epoch_len <= slot_idx && slot_idx < 2 * epoch_len { - // Try to get a ticket for the next epoch. Since its state values were not enacted yet, - // we may have to finish sorting the tickets. - epoch_tag ^= 1; - slot_idx -= epoch_len; - if metadata.unsorted_tickets_count != 0 { - Self::sort_segments(u32::MAX, epoch_tag, &mut metadata); - TicketsMeta::::set(metadata); - } - } else if slot_idx >= 2 * epoch_len { + let mut epoch_tag = slot_epoch_idx.tag(); + let slot_idx = Self::slot_index(slot); + + if slot_epoch_idx == curr_epoch_idx + 1 && TicketsAccumulator::::count() != 0 { + // JIT enactment of next epoch tickets when the accumulator has not been + // fully consumed yet. Drain and enact the accumulator for next epoch. + Self::consume_tickets_accumulator(usize::MAX, epoch_tag); + } + + let tickets_count = TicketsCount::::get()[epoch_tag as usize]; + if tickets_count <= slot_idx { + // Slot not bound to a ticket. return None } - let ticket_idx = get_ticket_idx(slot_idx); - if ticket_idx < metadata.tickets_count[epoch_tag as usize] { - TicketsIds::::get((epoch_tag, ticket_idx)) - } else { - None + // Outside-in sort. + let mut ticket_idx = slot_idx / 2; + if slot_idx & 1 != 0 { + ticket_idx = tickets_count - (ticket_idx + 1); } + + debug!( + target: LOG_TARGET, + "slot-idx {} <-> ticket-idx {}", + slot_idx, + ticket_idx + ); + Tickets::::get((epoch_tag, ticket_idx)) } - /// Returns ticket id and data associated with the given `slot`. + /// Reset tickets related data. /// - /// Refer to the `slot_ticket_id` documentation for the slot-ticket association - /// criteria. - pub fn slot_ticket(slot: Slot) -> Option<(TicketId, TicketBody)> { - Self::slot_ticket_id(slot).and_then(|id| TicketsData::::get(id).map(|body| (id, body))) + /// Optimization note: tickets are left in place, only the associated counters are resetted. + #[inline(always)] + fn reset_tickets_data() { + TicketsCount::::kill(); + let _ = TicketsAccumulator::::clear(u32::MAX, None); + } + + /// Static protocol configuration. + #[inline(always)] + pub fn protocol_config() -> Configuration { + let epoch_duration = T::EpochDuration::get(); + let lottery_duration = T::LotteryDurationPercent::get() * epoch_duration; + Configuration { + epoch_duration, + lottery_duration, + max_authorities: T::MaxAuthorities::get(), + redundancy_factor: T::RedundancyFactor::get(), + attempts_number: T::AttemptsNumber::get(), + } } - // Sort and truncate candidate tickets, cleanup storage. - fn sort_and_truncate(candidates: &mut Vec, max_tickets: usize) -> u128 { - candidates.sort_unstable(); - candidates.drain(max_tickets..).for_each(TicketsData::::remove); - candidates[max_tickets - 1] + /// Current epoch information. + #[inline(always)] + pub fn current_epoch() -> Epoch { + Epoch { + start: Self::current_epoch_start(), + authorities: Self::authorities().into_inner(), + randomness: Self::randomness_buf(), + } } - /// Sort the tickets which belong to the epoch with the specified `epoch_tag`. + /// Randomness buffer entries. /// - /// At most `max_segments` are taken from the `UnsortedSegments` structure. + /// Assuming we're executing a block during epoch with index `N`. /// - /// The tickets of the removed segments are merged with the tickets on the `SortedCandidates` - /// which is then sorted an truncated to contain at most `MaxTickets` entries. + /// Entries: + /// - 0 : randomness accumulator after execution of previous block. + /// - 1 : randomness accumulator snapshot after execution of epoch `N-1` last block. + /// - 2 : randomness accumulator snapshot after execution of epoch `N-2` last block. + /// - 3 : randomness accumulator snapshot after execution of epoch `N-3` last block. /// - /// If all the entries in `UnsortedSegments` are consumed, then `SortedCandidates` is elected - /// as the next epoch tickets, else it is saved to be used by next calls of this function. - pub(crate) fn sort_segments(max_segments: u32, epoch_tag: u8, metadata: &mut TicketsMetadata) { - let unsorted_segments_count = metadata.unsorted_tickets_count.div_ceil(SEGMENT_MAX_SIZE); - let max_segments = max_segments.min(unsorted_segments_count); - let max_tickets = Self::epoch_length() as usize; - - // Fetch the sorted candidates (if any). - let mut candidates = SortedCandidates::::take().into_inner(); - - // There is an upper bound to check only if we already sorted the max number - // of allowed tickets. - let mut upper_bound = *candidates.get(max_tickets - 1).unwrap_or(&TicketId::MAX); - - let mut require_sort = false; - - // Consume at most `max_segments` segments. - // During the process remove every stale ticket from `TicketsData` storage. - for segment_idx in (0..unsorted_segments_count).rev().take(max_segments as usize) { - let segment = UnsortedSegments::::take(segment_idx); - metadata.unsorted_tickets_count -= segment.len() as u32; - - // Push only ids with a value less than the current `upper_bound`. - let prev_len = candidates.len(); - for ticket_id in segment { - if ticket_id < upper_bound { - candidates.push(ticket_id); - } else { - TicketsData::::remove(ticket_id); - } - } - require_sort = candidates.len() != prev_len; - - // As we approach the tail of the segments buffer the `upper_bound` value is expected - // to decrease (fast). We thus expect the number of tickets pushed into the - // `candidates` vector to follow an exponential drop. - // - // Given this, sorting and truncating after processing each segment may be an overkill - // as we may find pushing few tickets more and more often. Is preferable to perform - // the sort and truncation operations only when we reach some bigger threshold - // (currently set as twice the capacity of `SortCandidate`). - // - // The more is the protocol's redundancy factor (i.e. the ratio between tickets allowed - // to be submitted and the epoch length) the more this check becomes relevant. - if candidates.len() > 2 * max_tickets { - upper_bound = Self::sort_and_truncate(&mut candidates, max_tickets); - require_sort = false; - } - } - - if candidates.len() > max_tickets { - Self::sort_and_truncate(&mut candidates, max_tickets); - } else if require_sort { - candidates.sort_unstable(); - } - - if metadata.unsorted_tickets_count == 0 { - // Sorting is over, write to next epoch map. - candidates.iter().enumerate().for_each(|(i, id)| { - TicketsIds::::insert((epoch_tag, i as u32), id); - }); - metadata.tickets_count[epoch_tag as usize] = candidates.len() as u32; - } else { - // Keep the partial result for the next calls. - SortedCandidates::::set(BoundedVec::truncate_from(candidates)); - } + /// The semantic of these entries is defined as: + /// - 3 : epoch `N` randomness + /// - 2 : epoch `N+1` randomness + /// - 1 : epoch `N+2` randomness + /// - 0 : accumulator for epoch `N+3` randomness + /// + /// If `index` is greater than 3 the `Default` is returned. + #[inline(always)] + fn randomness(index: usize) -> Randomness { + Self::randomness_buf().get(index).cloned().unwrap_or_default() } - /// Append a set of tickets to the segments map. - pub(crate) fn append_tickets(mut tickets: BoundedVec>) { - debug!(target: LOG_TARGET, "Appending batch with {} tickets", tickets.len()); - tickets.iter().for_each(|t| trace!(target: LOG_TARGET, " + {t:032x}")); - - let mut metadata = TicketsMeta::::get(); - let mut segment_idx = metadata.unsorted_tickets_count / SEGMENT_MAX_SIZE; - - while !tickets.is_empty() { - let rem = metadata.unsorted_tickets_count % SEGMENT_MAX_SIZE; - let to_be_added = tickets.len().min((SEGMENT_MAX_SIZE - rem) as usize); + /// Current epoch's randomness. + #[inline(always)] + fn current_randomness() -> Randomness { + Self::randomness(3) + } - let mut segment = UnsortedSegments::::get(segment_idx); - let _ = segment - .try_extend(tickets.drain(..to_be_added)) - .defensive_proof("We don't add more than `SEGMENT_MAX_SIZE` and this is the maximum bound for the vector."); - UnsortedSegments::::insert(segment_idx, segment); + /// Next epoch's randomness. + #[inline(always)] + fn next_randomness() -> Randomness { + Self::randomness(2) + } - metadata.unsorted_tickets_count += to_be_added as u32; - segment_idx += 1; - } + /// Randomness accumulator + #[inline(always)] + fn randomness_accumulator() -> Randomness { + Self::randomness(0) + } - TicketsMeta::::set(metadata); + /// Determine whether an epoch change should take place at this block. + #[inline(always)] + fn should_end_epoch(block_num: BlockNumberFor) -> bool { + Self::current_slot_index() == 0 && block_num != Zero::zero() } - /// Remove all tickets related data. - /// - /// May not be efficient as the calling places may repeat some of this operations - /// but is a very extraordinary operation (hopefully never happens in production) - /// and better safe than sorry. - fn reset_tickets_data() { - let metadata = TicketsMeta::::get(); + /// Current slot index relative to the current epoch. + #[inline(always)] + fn current_slot_index() -> u32 { + Self::slot_index(CurrentSlot::::get()) + } - // Remove even/odd-epoch data. - for epoch_tag in 0..=1 { - for idx in 0..metadata.tickets_count[epoch_tag] { - if let Some(id) = TicketsIds::::get((epoch_tag as u8, idx)) { - TicketsData::::remove(id); - } - } - } + /// Slot index relative to the current epoch. + #[inline(always)] + fn slot_index(slot: Slot) -> u32 { + (*slot % ::EpochDuration::get() as u64) as u32 + } - // Remove all unsorted tickets segments. - let segments_count = metadata.unsorted_tickets_count.div_ceil(SEGMENT_MAX_SIZE); - (0..segments_count).for_each(UnsortedSegments::::remove); + /// Current epoch index. + #[inline(always)] + fn current_epoch_index() -> u64 { + Self::epoch_index(Self::current_slot()) + } - // Reset sorted candidates - SortedCandidates::::kill(); + /// Epoch's index from slot. + #[inline(always)] + fn epoch_index(slot: Slot) -> u64 { + *slot / ::EpochDuration::get() as u64 + } - // Reset tickets metadata - TicketsMeta::::kill(); + /// Get current epoch first slot. + #[inline(always)] + fn current_epoch_start() -> Slot { + let curr_slot = *Self::current_slot(); + let epoch_start = curr_slot - curr_slot % ::EpochDuration::get() as u64; + Slot::from(epoch_start) } - /// Submit next epoch validator tickets via an unsigned extrinsic constructed with a call to - /// `submit_unsigned_transaction`. - /// - /// The submitted tickets are added to the next epoch outstanding tickets as long as the - /// extrinsic is called within the first half of the epoch. Tickets received during the - /// second half are dropped. - pub fn submit_tickets_unsigned_extrinsic(tickets: Vec) -> bool { - let tickets = BoundedVec::truncate_from(tickets); - let call = Call::submit_tickets { tickets }; - match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { - Ok(_) => true, - Err(e) => { - error!(target: LOG_TARGET, "Error submitting tickets {:?}", e); - false - }, - } + /// Get the epoch's first slot. + #[inline(always)] + fn epoch_start(epoch_index: u64) -> Slot { + const PROOF: &str = "slot number is u64; it should relate in some way to wall clock time; \ + if u64 is not enough we should crash for safety; qed."; + epoch_index.checked_mul(T::EpochDuration::get() as u64).expect(PROOF).into() } - /// Epoch length - pub fn epoch_length() -> u32 { - T::EpochLength::get() + /// Epoch duration. + #[inline(always)] + fn epoch_duration() -> u32 { + T::EpochDuration::get() } } @@ -1071,7 +928,7 @@ impl EpochChangeTrigger for EpochChangeInternalTrigger { let next_authorities = authorities.clone(); let len = next_authorities.len() as u32; Pallet::::enact_epoch_change(authorities, next_authorities); - T::WeightInfo::enact_epoch_change(len, T::EpochLength::get()) + T::WeightInfo::enact_epoch_change(len, T::EpochDuration::get()) } else { Weight::zero() } diff --git a/substrate/frame/sassafras/src/mock.rs b/substrate/frame/sassafras/src/mock.rs index f145bffa3a05..e260748ec6a1 100644 --- a/substrate/frame/sassafras/src/mock.rs +++ b/substrate/frame/sassafras/src/mock.rs @@ -20,17 +20,16 @@ use crate::{self as pallet_sassafras, EpochChangeInternalTrigger, *}; use frame_support::{ - derive_impl, - traits::{ConstU32, OnFinalize, OnInitialize}, + derive_impl, parameter_types, + traits::{ConstU32, ConstU8, OnFinalize, OnInitialize}, }; use sp_consensus_sassafras::{ digests::SlotClaim, vrf::{RingProver, VrfSignature}, - AuthorityIndex, AuthorityPair, EpochConfiguration, Slot, TicketBody, TicketEnvelope, TicketId, + AuthorityIndex, AuthorityPair, Slot, TicketBody, TicketEnvelope, TicketId, }; use sp_core::{ - crypto::{ByteArray, Pair, UncheckedFrom, VrfSecret, Wraps}, - ed25519::Public as EphemeralPublic, + crypto::{ByteArray, Pair, VrfSecret, Wraps}, H256, U256, }; use sp_runtime::{ @@ -40,8 +39,13 @@ use sp_runtime::{ const LOG_TARGET: &str = "sassafras::tests"; -const EPOCH_LENGTH: u32 = 10; +// Configuration constants +const EPOCH_DURATION: u32 = 10; +const LOTTERY_PERCENT: u8 = 85; const MAX_AUTHORITIES: u32 = 100; +const REDUNDANCY_FACTOR: u8 = 32; +const ATTEMPTS_NUMBER: u8 = 2; +const TICKETS_CHUNK_LENGTH: u32 = 16; #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { @@ -56,9 +60,17 @@ where type Extrinsic = TestXt; } +parameter_types! { + pub const LotteryPercent: Percent = Percent::from_percent(LOTTERY_PERCENT); +} + impl pallet_sassafras::Config for Test { - type EpochLength = ConstU32; + type EpochDuration = ConstU32; type MaxAuthorities = ConstU32; + type RedundancyFactor = ConstU8; + type AttemptsNumber = ConstU8; + type TicketsChunkLength = ConstU32; + type LotteryDurationPercent = LotteryPercent; type EpochChangeTrigger = EpochChangeInternalTrigger; type WeightInfo = (); } @@ -70,14 +82,6 @@ frame_support::construct_runtime!( } ); -// Default used for most of the tests. -// -// The redundancy factor has been set to max value to accept all submitted -// tickets without worrying about the threshold. -pub const TEST_EPOCH_CONFIGURATION: EpochConfiguration = - EpochConfiguration { redundancy_factor: u32::MAX, attempts_number: 5 }; - -/// Build and returns test storage externalities pub fn new_test_ext(authorities_len: usize) -> sp_io::TestExternalities { new_test_ext_with_pairs(authorities_len, false).1 } @@ -98,7 +102,6 @@ pub fn new_test_ext_with_pairs( pallet_sassafras::GenesisConfig:: { authorities: authorities.clone(), - epoch_config: TEST_EPOCH_CONFIGURATION, _phantom: core::marker::PhantomData, } .assimilate_storage(&mut storage) @@ -118,183 +121,68 @@ pub fn new_test_ext_with_pairs( (pairs, ext) } -fn make_ticket_with_prover( - attempt: u32, - pair: &AuthorityPair, - prover: &RingProver, -) -> TicketEnvelope { - log::debug!("attempt: {}", attempt); - - // Values are referring to the next epoch - let epoch = Sassafras::epoch_index() + 1; - let randomness = Sassafras::next_randomness(); - - // Make a dummy ephemeral public that hopefully is unique within one test instance. - // In the tests, the values within the erased public are just used to compare - // ticket bodies, so it is not important to be a valid key. - let mut raw: [u8; 32] = [0; 32]; - raw.copy_from_slice(&pair.public().as_slice()[0..32]); - let erased_public = EphemeralPublic::unchecked_from(raw); - let revealed_public = erased_public; - - let ticket_id_input = vrf::ticket_id_input(&randomness, attempt, epoch); - - let body = TicketBody { attempt_idx: attempt, erased_public, revealed_public }; - let sign_data = vrf::ticket_body_sign_data(&body, ticket_id_input); - - let signature = pair.as_ref().ring_vrf_sign(&sign_data, &prover); - - // Ticket-id can be generated via vrf-preout. - // We don't care that much about its value here. - TicketEnvelope { body, signature } +fn slot_claim_vrf_signature(slot: Slot, pair: &AuthorityPair) -> VrfSignature { + let randomness = Sassafras::randomness_accumulator(); + let data = vrf::block_randomness_sign_data(&randomness, slot); + pair.as_ref().vrf_sign(&data) } -pub fn make_prover(pair: &AuthorityPair) -> RingProver { - let public = pair.public(); - let mut prover_idx = None; - - let ring_ctx = Sassafras::ring_context().unwrap(); - - let pks: Vec = Sassafras::authorities() - .iter() - .enumerate() - .map(|(idx, auth)| { - if public == *auth { - prover_idx = Some(idx); - } - *auth.as_ref() - }) - .collect(); - - log::debug!("Building prover. Ring size: {}", pks.len()); - let prover = ring_ctx.prover(&pks, prover_idx.unwrap()).unwrap(); - log::debug!("Done"); - - prover +/// Construct a `PreDigest` instance for the given parameters. +pub fn make_slot_claim( + authority_idx: AuthorityIndex, + slot: Slot, + pair: &AuthorityPair, +) -> SlotClaim { + let vrf_signature = slot_claim_vrf_signature(slot, pair); + SlotClaim { authority_idx, slot, vrf_signature } } -/// Construct `attempts` tickets envelopes for the next epoch. -/// -/// E.g. by passing an optional threshold -pub fn make_tickets(attempts: u32, pair: &AuthorityPair) -> Vec { - let prover = make_prover(pair); - (0..attempts) - .into_iter() - .map(|attempt| make_ticket_with_prover(attempt, pair, &prover)) - .collect() +/// Construct a `Digest` with a `SlotClaim` item. +pub fn make_digest(authority_idx: AuthorityIndex, slot: Slot, pair: &AuthorityPair) -> Digest { + let claim = make_slot_claim(authority_idx, slot, pair); + Digest { logs: vec![DigestItem::from(&claim)] } } -pub fn make_ticket_body(attempt_idx: u32, pair: &AuthorityPair) -> (TicketId, TicketBody) { - // Values are referring to the next epoch - let epoch = Sassafras::epoch_index() + 1; +/// Make a ticket which is claimable during the next epoch. +pub fn make_ticket_body(attempt: u8, pair: &AuthorityPair) -> TicketBody { let randomness = Sassafras::next_randomness(); - let ticket_id_input = vrf::ticket_id_input(&randomness, attempt_idx, epoch); + let ticket_id_input = vrf::ticket_id_input(&randomness, attempt); let ticket_id_pre_output = pair.as_inner_ref().vrf_pre_output(&ticket_id_input); let id = vrf::make_ticket_id(&ticket_id_input, &ticket_id_pre_output); - // Make a dummy ephemeral public that hopefully is unique within one test instance. - // In the tests, the values within the erased public are just used to compare - // ticket bodies, so it is not important to be a valid key. - let mut raw: [u8; 32] = [0; 32]; - raw[..16].copy_from_slice(&pair.public().as_slice()[0..16]); - raw[16..].copy_from_slice(&id.to_le_bytes()); - let erased_public = EphemeralPublic::unchecked_from(raw); - let revealed_public = erased_public; + // Make dummy extra data. + let mut extra = [pair.public().as_slice(), &id.0[..]].concat(); + let extra = BoundedVec::truncate_from(extra); - let body = TicketBody { attempt_idx, erased_public, revealed_public }; - - (id, body) + TicketBody { id, attempt, extra } } -pub fn make_dummy_ticket_body(attempt_idx: u32) -> (TicketId, TicketBody) { - let hash = sp_crypto_hashing::blake2_256(&attempt_idx.to_le_bytes()); - - let erased_public = EphemeralPublic::unchecked_from(hash); - let revealed_public = erased_public; - - let body = TicketBody { attempt_idx, erased_public, revealed_public }; - - let mut bytes = [0u8; 16]; - bytes.copy_from_slice(&hash[..16]); - let id = TicketId::from_le_bytes(bytes); - - (id, body) +pub fn make_dummy_ticket_body(attempt: u8) -> TicketBody { + let hash = sp_crypto_hashing::blake2_256(&[attempt]); + let id = TicketId(hash); + let hash = sp_crypto_hashing::blake2_256(&hash); + let extra = BoundedVec::truncate_from(hash.to_vec()); + TicketBody { id, attempt, extra } } pub fn make_ticket_bodies( - number: u32, + attempts: u8, pair: Option<&AuthorityPair>, -) -> Vec<(TicketId, TicketBody)> { - (0..number) + sort: bool, +) -> Vec { + let mut bodies: Vec<_> = (0..attempts) .into_iter() .map(|i| match pair { Some(pair) => make_ticket_body(i, pair), None => make_dummy_ticket_body(i), }) - .collect() -} - -/// Persist the given tickets in the unsorted segments buffer. -/// -/// This function skips all the checks performed by the `submit_tickets` extrinsic and -/// directly appends the tickets to the `UnsortedSegments` structure. -pub fn persist_next_epoch_tickets_as_segments(tickets: &[(TicketId, TicketBody)]) { - let mut ids = Vec::with_capacity(tickets.len()); - tickets.iter().for_each(|(id, body)| { - TicketsData::::set(id, Some(body.clone())); - ids.push(*id); - }); - let max_chunk_size = Sassafras::epoch_length() as usize; - ids.chunks(max_chunk_size).for_each(|chunk| { - Sassafras::append_tickets(BoundedVec::truncate_from(chunk.to_vec())); - }) -} - -/// Calls the [`persist_next_epoch_tickets_as_segments`] and then proceeds to the -/// sorting of the candidates. -/// -/// Only "winning" tickets are left. -pub fn persist_next_epoch_tickets(tickets: &[(TicketId, TicketBody)]) { - persist_next_epoch_tickets_as_segments(tickets); - // Force sorting of next epoch tickets (enactment) by explicitly querying the first of them. - let next_epoch = Sassafras::next_epoch(); - assert_eq!(TicketsMeta::::get().unsorted_tickets_count, tickets.len() as u32); - Sassafras::slot_ticket(next_epoch.start).unwrap(); - assert_eq!(TicketsMeta::::get().unsorted_tickets_count, 0); -} - -fn slot_claim_vrf_signature(slot: Slot, pair: &AuthorityPair) -> VrfSignature { - let mut epoch = Sassafras::epoch_index(); - let mut randomness = Sassafras::randomness(); - - // Check if epoch is going to change on initialization. - let epoch_start = Sassafras::current_epoch_start(); - let epoch_length = EPOCH_LENGTH.into(); - if epoch_start != 0_u64 && slot >= epoch_start + epoch_length { - epoch += slot.saturating_sub(epoch_start).saturating_div(epoch_length); - randomness = crate::NextRandomness::::get(); + .collect(); + if sort { + bodies.sort_unstable(); } - - let data = vrf::slot_claim_sign_data(&randomness, slot, epoch); - pair.as_ref().vrf_sign(&data) -} - -/// Construct a `PreDigest` instance for the given parameters. -pub fn make_slot_claim( - authority_idx: AuthorityIndex, - slot: Slot, - pair: &AuthorityPair, -) -> SlotClaim { - let vrf_signature = slot_claim_vrf_signature(slot, pair); - SlotClaim { authority_idx, slot, vrf_signature, ticket_claim: None } -} - -/// Construct a `Digest` with a `SlotClaim` item. -pub fn make_digest(authority_idx: AuthorityIndex, slot: Slot, pair: &AuthorityPair) -> Digest { - let claim = make_slot_claim(authority_idx, slot, pair); - Digest { logs: vec![DigestItem::from(&claim)] } + bodies } pub fn initialize_block( @@ -341,3 +229,59 @@ pub fn progress_to_block(number: u64, pair: &AuthorityPair) -> Option { } digest } + +fn make_ticket_with_prover( + attempt: u8, + pair: &AuthorityPair, + prover: &RingProver, +) -> (TicketId, TicketEnvelope) { + log::debug!("attempt: {}", attempt); + + // Values are referring to the next epoch + let randomness = Sassafras::next_randomness(); + + let ticket_id_input = vrf::ticket_id_input(&randomness, attempt); + let sign_data = vrf::ticket_id_sign_data(ticket_id_input.clone(), &[]); + let signature = pair.as_ref().ring_vrf_sign(&sign_data, &prover); + let pre_output = &signature.pre_outputs[0]; + + let ticket_id = vrf::make_ticket_id(&ticket_id_input, pre_output); + let envelope = TicketEnvelope { attempt, extra: Default::default(), signature }; + + (ticket_id, envelope) +} + +pub fn make_prover(pair: &AuthorityPair) -> RingProver { + let public = pair.public(); + let mut prover_idx = None; + + let ring_ctx = Sassafras::ring_context().unwrap(); + + let pks: Vec = Sassafras::authorities() + .iter() + .enumerate() + .map(|(idx, auth)| { + if public == *auth { + prover_idx = Some(idx); + } + *auth.as_ref() + }) + .collect(); + + log::debug!("Building prover. Ring size: {}", pks.len()); + let prover = ring_ctx.prover(&pks, prover_idx.unwrap()).unwrap(); + log::debug!("Done"); + + prover +} + +/// Construct `attempts` tickets envelopes for the next epoch. +/// +/// E.g. by passing an optional threshold +pub fn make_tickets(attempts: u8, pair: &AuthorityPair) -> Vec<(TicketId, TicketEnvelope)> { + let prover = make_prover(pair); + (0..attempts) + .into_iter() + .map(|attempt| make_ticket_with_prover(attempt, pair, &prover)) + .collect() +} diff --git a/substrate/frame/sassafras/src/tests.rs b/substrate/frame/sassafras/src/tests.rs index ec3425cce7bf..68c981768414 100644 --- a/substrate/frame/sassafras/src/tests.rs +++ b/substrate/frame/sassafras/src/tests.rs @@ -21,20 +21,123 @@ use crate::*; use mock::*; use sp_consensus_sassafras::Slot; +use sp_runtime::DispatchError; -fn h2b(hex: &str) -> [u8; N] { - array_bytes::hex2array_unchecked(hex) +const TICKETS_FILE: &str = "src/data/tickets.bin"; + +const GENESIS_SLOT: u64 = 100; + +fn h2b(hex: &str) -> Vec { + array_bytes::hex2bytes_unchecked(hex) +} + +fn b2h(bytes: &[u8]) -> String { + array_bytes::bytes2hex("", bytes) } -fn b2h(bytes: [u8; N]) -> String { - array_bytes::bytes2hex("", &bytes) +macro_rules! prefix_eq { + ($a:expr, $b:expr) => {{ + let len = $a.len().min($b.len()); + if &$a[..len] != &$b[..len] { + panic!("left: {}, right: {}", b2h(&$a[..len]), b2h(&$b[..len])); + } + }}; } #[test] -fn genesis_values_assumptions_check() { +fn assumptions_check() { + let mut tickets = make_ticket_bodies(100, None, false); + + // Check that the returned tickets are not sorted to start with. + assert!(tickets.windows(2).any(|w| w[0] > w[1])); + new_test_ext(3).execute_with(|| { assert_eq!(Sassafras::authorities().len(), 3); - assert_eq!(Sassafras::config(), TEST_EPOCH_CONFIGURATION); + + // Check that entries are stored sorted (bigger first) + tickets + .iter() + .for_each(|t| TicketsAccumulator::::insert(TicketKey::from(t.id), t)); + assert_eq!(TicketsAccumulator::::count(), 100); + tickets.sort_unstable_by_key(|t| TicketKey::from(t.id)); + let accumulator: Vec<_> = TicketsAccumulator::::iter_values().collect(); + assert_eq!(tickets, accumulator); + + // Check accumulator clear + let _ = TicketsAccumulator::::clear(u32::MAX, None); + assert_eq!(TicketsAccumulator::::count(), 0); + }); +} + +#[test] +fn deposit_tickets_works() { + let mut tickets = make_ticket_bodies(15, None, false); + + new_test_ext(1).execute_with(|| { + // Try to append an unsorted chunk + let mut candidates = tickets[..5].to_vec(); + let err = Sassafras::deposit_tickets(candidates).unwrap_err(); + assert!(matches!(err, Error::TicketBadOrder)); + let _ = TicketsAccumulator::::clear(u32::MAX, None); + + // Correctly append the first sorted chunk + let mut candidates = tickets[..5].to_vec(); + candidates.sort_unstable(); + Sassafras::deposit_tickets(candidates).unwrap(); + assert_eq!(TicketsAccumulator::::count(), 5); + // Note: internally the tickets are stored in reverse order (bigger first) + let stored: Vec<_> = TicketsAccumulator::::iter_values().collect(); + let mut expected = tickets[..5].to_vec(); + expected.sort_unstable_by_key(|t| TicketKey::from(t.id)); + assert_eq!(expected, stored); + + // Try to append a chunk with a ticket already pushed + let mut candidates = tickets[4..10].to_vec(); + candidates.sort_unstable(); + let err = Sassafras::deposit_tickets(candidates).unwrap_err(); + assert!(matches!(err, Error::TicketDuplicate)); + // Restore last correct state + let _ = TicketsAccumulator::::clear(u32::MAX, None); + let mut candidates = tickets[..5].to_vec(); + candidates.sort_unstable(); + Sassafras::deposit_tickets(candidates).unwrap(); + + // Correctly push the second sorted chunk + let mut candidates = tickets[5..10].to_vec(); + candidates.sort_unstable(); + Sassafras::deposit_tickets(candidates).unwrap(); + assert_eq!(TicketsAccumulator::::count(), 10); + // Note: internally the tickets are stored in reverse order (bigger first) + let mut stored: Vec<_> = TicketsAccumulator::::iter_values().collect(); + let mut expected = tickets[..10].to_vec(); + expected.sort_unstable_by_key(|t| TicketKey::from(t.id)); + assert_eq!(expected, stored); + + // Now the buffer is full, pick only the tickets that will eventually fit. + let mut candidates = tickets[10..].to_vec(); + candidates.sort_unstable(); + let mut eligible = Vec::new(); + for candidate in candidates { + if stored.is_empty() { + break + } + let bigger = stored.remove(0); + if bigger.id <= candidate.id { + break + } + eligible.push(candidate); + } + candidates = eligible; + + // Correctly push the last candidates chunk + Sassafras::deposit_tickets(candidates).unwrap(); + + assert_eq!(TicketsAccumulator::::count(), 10); + // Note: internally the tickets are stored in reverse order (bigger first) + let mut stored: Vec<_> = TicketsAccumulator::::iter_values().collect(); + tickets.sort_unstable_by_key(|t| TicketKey::from(t.id)); + + assert_eq!(tickets[5..], stored); }); } @@ -42,244 +145,80 @@ fn genesis_values_assumptions_check() { fn post_genesis_randomness_initialization() { let (pairs, mut ext) = new_test_ext_with_pairs(1, false); let pair = &pairs[0]; + let first_slot = (GENESIS_SLOT + 1).into(); ext.execute_with(|| { - assert_eq!(Sassafras::randomness(), [0; 32]); - assert_eq!(Sassafras::next_randomness(), [0; 32]); - assert_eq!(Sassafras::randomness_accumulator(), [0; 32]); + let genesis_randomness = Sassafras::randomness_buf(); + assert_eq!(genesis_randomness, RandomnessBuffer::default()); // Test the values with a zero genesis block hash - let _ = initialize_block(1, 123.into(), [0x00; 32].into(), pair); - assert_eq!(Sassafras::randomness(), [0; 32]); - println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); - assert_eq!( - Sassafras::next_randomness(), - h2b("b9497550deeeb4adc134555930de61968a0558f8947041eb515b2f5fa68ffaf7") - ); - println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); - assert_eq!( - Sassafras::randomness_accumulator(), - h2b("febcc7fe9539fe17ed29f525831394edfb30b301755dc9bd91584a1f065faf87") - ); - let (id1, _) = make_ticket_bodies(1, Some(pair))[0]; + let _ = initialize_block(1, first_slot, [0x00; 32].into(), pair); - // Reset what is relevant - NextRandomness::::set([0; 32]); - RandomnessAccumulator::::set([0; 32]); + let randomness = Sassafras::randomness_buf(); + prefix_eq!(randomness[0], h2b("89eb0d6a")); + prefix_eq!(randomness[1], h2b("4e8c71d2")); + prefix_eq!(randomness[2], h2b("3a4c0005")); + prefix_eq!(randomness[3], h2b("0dd43c54")); - // Test the values with a non-zero genesis block hash - let _ = initialize_block(1, 123.into(), [0xff; 32].into(), pair); - - assert_eq!(Sassafras::randomness(), [0; 32]); - println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); - assert_eq!( - Sassafras::next_randomness(), - h2b("51c1e3b3a73d2043b3cabae98ff27bdd4aad8967c21ecda7b9465afaa0e70f37") - ); - println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); - assert_eq!( - Sassafras::randomness_accumulator(), - h2b("466bf3007f2e17bffee0b3c42c90f33d654f5ff61eff28b0cc650825960abd52") - ); - let (id2, _) = make_ticket_bodies(1, Some(pair))[0]; - - // Ticket ids should be different when next epoch randomness is different - assert_ne!(id1, id2); + let ticket1 = make_ticket_body(0, pair); // Reset what is relevant - NextRandomness::::set([0; 32]); - RandomnessAccumulator::::set([0; 32]); + RandomnessBuf::::set(genesis_randomness); // Test the values with a non-zero genesis block hash - let _ = initialize_block(1, 321.into(), [0x00; 32].into(), pair); - - println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); - assert_eq!( - Sassafras::next_randomness(), - h2b("d85d84a54f79453000eb62e8a17b30149bd728d3232bc2787a89d51dc9a36008") - ); - println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); - assert_eq!( - Sassafras::randomness_accumulator(), - h2b("8a035eed02b5b8642b1515ed19752df8df156627aea45c4ef6e3efa88be9a74d") - ); - let (id2, _) = make_ticket_bodies(1, Some(pair))[0]; - - // Ticket ids should be different when next epoch randomness is different - assert_ne!(id1, id2); - }); -} - -// Tests if the sorted tickets are assigned to each slot outside-in. -#[test] -fn slot_ticket_id_outside_in_fetch() { - let genesis_slot = Slot::from(100); - let tickets_count = 6; - - // Current epoch tickets - let curr_tickets: Vec = (0..tickets_count).map(|i| i as TicketId).collect(); - - // Next epoch tickets - let next_tickets: Vec = - (0..tickets_count - 1).map(|i| (i + tickets_count) as TicketId).collect(); - new_test_ext(0).execute_with(|| { - // Some corner cases - TicketsIds::::insert((0, 0_u32), 1_u128); - - // Cleanup - (0..3).for_each(|i| TicketsIds::::remove((0, i as u32))); - - curr_tickets - .iter() - .enumerate() - .for_each(|(i, id)| TicketsIds::::insert((0, i as u32), id)); - - next_tickets - .iter() - .enumerate() - .for_each(|(i, id)| TicketsIds::::insert((1, i as u32), id)); - - TicketsMeta::::set(TicketsMetadata { - tickets_count: [curr_tickets.len() as u32, next_tickets.len() as u32], - unsorted_tickets_count: 0, - }); + let _ = initialize_block(1, first_slot, [0xff; 32].into(), pair); - // Before importing the first block the pallet always return `None` - // This is a kind of special hardcoded case that should never happen in practice - // as the first thing the pallet does is to initialize the genesis slot. + let randomness = Sassafras::randomness_buf(); + prefix_eq!(randomness[0], h2b("e2021160")); + prefix_eq!(randomness[1], h2b("3b0c0905")); + prefix_eq!(randomness[2], h2b("632ac0d9")); + prefix_eq!(randomness[3], h2b("575088c3")); - assert_eq!(Sassafras::slot_ticket_id(0.into()), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 0), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 1), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 100), None); - - // Initialize genesis slot.. - GenesisSlot::::set(genesis_slot); - frame_system::Pallet::::set_block_number(One::one()); - - // Try to fetch a ticket for a slot before current epoch. - assert_eq!(Sassafras::slot_ticket_id(0.into()), None); - - // Current epoch tickets. - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 0), Some(curr_tickets[1])); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 1), Some(curr_tickets[3])); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 2), Some(curr_tickets[5])); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 3), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 4), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 5), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 6), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 7), Some(curr_tickets[4])); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 8), Some(curr_tickets[2])); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 9), Some(curr_tickets[0])); - - // Next epoch tickets (note that only 5 tickets are available) - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 10), Some(next_tickets[1])); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 11), Some(next_tickets[3])); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 12), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 13), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 14), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 15), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 16), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 17), Some(next_tickets[4])); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 18), Some(next_tickets[2])); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 19), Some(next_tickets[0])); - - // Try to fetch the tickets for slots beyond the next epoch. - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 20), None); - assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 42), None); - }); -} - -// Different test for outside-in test with more focus on corner case correctness. -#[test] -fn slot_ticket_id_outside_in_fetch_corner_cases() { - new_test_ext(0).execute_with(|| { - frame_system::Pallet::::set_block_number(One::one()); + let ticket2 = make_ticket_body(0, pair); - let mut meta = TicketsMetadata { tickets_count: [0, 0], unsorted_tickets_count: 0 }; - let curr_epoch_idx = EpochIndex::::get(); - - let mut epoch_test = |epoch_idx| { - let tag = (epoch_idx & 1) as u8; - let epoch_start = Sassafras::epoch_start(epoch_idx); - - // cleanup - meta.tickets_count = [0, 0]; - TicketsMeta::::set(meta); - assert!((0..10).all(|i| Sassafras::slot_ticket_id((epoch_start + i).into()).is_none())); - - meta.tickets_count[tag as usize] += 1; - TicketsMeta::::set(meta); - TicketsIds::::insert((tag, 0_u32), 1_u128); - assert_eq!(Sassafras::slot_ticket_id((epoch_start + 9).into()), Some(1_u128)); - assert!((0..9).all(|i| Sassafras::slot_ticket_id((epoch_start + i).into()).is_none())); - - meta.tickets_count[tag as usize] += 1; - TicketsMeta::::set(meta); - TicketsIds::::insert((tag, 1_u32), 2_u128); - assert_eq!(Sassafras::slot_ticket_id((epoch_start + 0).into()), Some(2_u128)); - assert!((1..9).all(|i| Sassafras::slot_ticket_id((epoch_start + i).into()).is_none())); - - meta.tickets_count[tag as usize] += 2; - TicketsMeta::::set(meta); - TicketsIds::::insert((tag, 2_u32), 3_u128); - assert_eq!(Sassafras::slot_ticket_id((epoch_start + 8).into()), Some(3_u128)); - assert!((1..8).all(|i| Sassafras::slot_ticket_id((epoch_start + i).into()).is_none())); - }; - - // Even epoch - epoch_test(curr_epoch_idx); - epoch_test(curr_epoch_idx + 1); + // Ticket ids should be different when next epoch randomness is different + assert_ne!(ticket1.id, ticket2.id); }); } #[test] -fn on_first_block_after_genesis() { +fn on_first_block() { let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + let start_slot = (GENESIS_SLOT + 1).into(); + let start_block = 1; ext.execute_with(|| { - let start_slot = Slot::from(100); - let start_block = 1; - - let digest = initialize_block(start_block, start_slot, Default::default(), &pairs[0]); - - let common_assertions = || { - assert_eq!(Sassafras::genesis_slot(), start_slot); + let common_assertions = |initialized| { assert_eq!(Sassafras::current_slot(), start_slot); - assert_eq!(Sassafras::epoch_index(), 0); - assert_eq!(Sassafras::current_epoch_start(), start_slot); - assert_eq!(Sassafras::current_slot_index(), 0); - assert_eq!(Sassafras::randomness(), [0; 32]); - println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); - assert_eq!( - Sassafras::next_randomness(), - h2b("a49592ef190b96f3eb87bde4c8355e33df28c75006156e8c81998158de2ed49e") - ); + assert_eq!(Sassafras::current_slot_index(), 1); + assert_eq!(TemporaryData::::exists(), initialized); }; // Post-initialization status - assert!(ClaimTemporaryData::::exists()); - common_assertions(); - println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); - assert_eq!( - Sassafras::randomness_accumulator(), - h2b("f0d42f6b7c0d157ecbd788be44847b80a96c290c04b5dfa5d1d40c98aa0c04ed") - ); + assert_eq!(Sassafras::randomness_buf(), RandomnessBuffer::default()); - let header = finalize_block(start_block); + let digest = initialize_block(start_block, start_slot, Default::default(), &pairs[0]); - // Post-finalization status + common_assertions(true); + let post_init_randomness = Sassafras::randomness_buf(); + prefix_eq!(post_init_randomness[0], h2b("89eb0d6a")); + prefix_eq!(post_init_randomness[1], h2b("4e8c71d2")); + prefix_eq!(post_init_randomness[2], h2b("3a4c0005")); + prefix_eq!(post_init_randomness[3], h2b("0dd43c54")); - assert!(!ClaimTemporaryData::::exists()); - common_assertions(); - println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); - assert_eq!( - Sassafras::randomness_accumulator(), - h2b("9f2b9fd19a772c34d437dcd8b84a927e73a5cb43d3d1cd00093223d60d2b4843"), - ); + // // Post-finalization status + + let header = finalize_block(start_block); + + common_assertions(false); + let post_fini_randomness = Sassafras::randomness_buf(); + prefix_eq!(post_fini_randomness[0], h2b("334d1a4c")); + prefix_eq!(post_fini_randomness[1], post_init_randomness[1]); + prefix_eq!(post_fini_randomness[2], post_init_randomness[2]); + prefix_eq!(post_fini_randomness[3], post_init_randomness[3]); // Header data check @@ -289,9 +228,8 @@ fn on_first_block_after_genesis() { // Genesis epoch start deposits consensus let consensus_log = sp_consensus_sassafras::digests::ConsensusLog::NextEpochData( sp_consensus_sassafras::digests::NextEpochDescriptor { - authorities: Sassafras::next_authorities().into_inner(), randomness: Sassafras::next_randomness(), - config: None, + authorities: Sassafras::next_authorities().into_inner(), }, ); let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode()); @@ -302,7 +240,7 @@ fn on_first_block_after_genesis() { #[test] fn on_normal_block() { let (pairs, mut ext) = new_test_ext_with_pairs(4, false); - let start_slot = Slot::from(100); + let start_slot = (GENESIS_SLOT + 1).into(); let start_block = 1; let end_block = start_block + 1; @@ -310,46 +248,37 @@ fn on_normal_block() { initialize_block(start_block, start_slot, Default::default(), &pairs[0]); // We don't want to trigger an epoch change in this test. - let epoch_length = Sassafras::epoch_length() as u64; - assert!(epoch_length > end_block); + let epoch_duration = Sassafras::epoch_duration() as u64; + assert!(epoch_duration > end_block); // Progress to block 2 let digest = progress_to_block(end_block, &pairs[0]).unwrap(); - let common_assertions = || { - assert_eq!(Sassafras::genesis_slot(), start_slot); + let common_assertions = |initialized| { assert_eq!(Sassafras::current_slot(), start_slot + 1); - assert_eq!(Sassafras::epoch_index(), 0); - assert_eq!(Sassafras::current_epoch_start(), start_slot); - assert_eq!(Sassafras::current_slot_index(), 1); - assert_eq!(Sassafras::randomness(), [0; 32]); - println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); - assert_eq!( - Sassafras::next_randomness(), - h2b("a49592ef190b96f3eb87bde4c8355e33df28c75006156e8c81998158de2ed49e") - ); + assert_eq!(Sassafras::current_slot_index(), 2); + assert_eq!(TemporaryData::::exists(), initialized); }; // Post-initialization status - assert!(ClaimTemporaryData::::exists()); - common_assertions(); - println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); - assert_eq!( - Sassafras::randomness_accumulator(), - h2b("9f2b9fd19a772c34d437dcd8b84a927e73a5cb43d3d1cd00093223d60d2b4843"), - ); + common_assertions(true); + let post_init_randomness = Sassafras::randomness_buf(); + prefix_eq!(post_init_randomness[0], h2b("334d1a4c")); + prefix_eq!(post_init_randomness[1], h2b("4e8c71d2")); + prefix_eq!(post_init_randomness[2], h2b("3a4c0005")); + prefix_eq!(post_init_randomness[3], h2b("0dd43c54")); let header = finalize_block(end_block); // Post-finalization status - assert!(!ClaimTemporaryData::::exists()); - common_assertions(); - assert_eq!( - Sassafras::randomness_accumulator(), - h2b("be9261adb9686dfd3f23f8a276b7acc7f4beb3137070beb64c282ac22d84cbf0"), - ); + common_assertions(false); + let post_fini_randomness = Sassafras::randomness_buf(); + prefix_eq!(post_fini_randomness[0], h2b("277138ab")); + prefix_eq!(post_fini_randomness[1], post_init_randomness[1]); + prefix_eq!(post_fini_randomness[2], post_init_randomness[2]); + prefix_eq!(post_fini_randomness[3], post_init_randomness[3]); // Header data check @@ -359,65 +288,35 @@ fn on_normal_block() { } #[test] -fn produce_epoch_change_digest_no_config() { +fn produce_epoch_change_digest() { let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + let start_slot = (GENESIS_SLOT + 1).into(); + let start_block = 1; ext.execute_with(|| { - let start_slot = Slot::from(100); - let start_block = 1; - initialize_block(start_block, start_slot, Default::default(), &pairs[0]); // We want to trigger an epoch change in this test. - let epoch_length = Sassafras::epoch_length() as u64; - let end_block = start_block + epoch_length; - - let digest = progress_to_block(end_block, &pairs[0]).unwrap(); + let epoch_duration = Sassafras::epoch_duration() as u64; + let end_block = start_block + epoch_duration - 1; - let common_assertions = || { - assert_eq!(Sassafras::genesis_slot(), start_slot); - assert_eq!(Sassafras::current_slot(), start_slot + epoch_length); - assert_eq!(Sassafras::epoch_index(), 1); - assert_eq!(Sassafras::current_epoch_start(), start_slot + epoch_length); + let common_assertions = |initialized| { + assert_eq!(Sassafras::current_slot(), GENESIS_SLOT + epoch_duration); assert_eq!(Sassafras::current_slot_index(), 0); - println!("[DEBUG] {}", b2h(Sassafras::randomness())); - assert_eq!( - Sassafras::randomness(), - h2b("a49592ef190b96f3eb87bde4c8355e33df28c75006156e8c81998158de2ed49e") - ); + assert_eq!(TemporaryData::::exists(), initialized); }; + let digest = progress_to_block(end_block, &pairs[0]).unwrap(); + // Post-initialization status - assert!(ClaimTemporaryData::::exists()); - common_assertions(); - println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); - assert_eq!( - Sassafras::next_randomness(), - h2b("d3a18b857af6ecc7b52f047107e684fff0058b5722d540a296d727e37eaa55b3"), - ); - println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); - assert_eq!( - Sassafras::randomness_accumulator(), - h2b("bf0f1228f4ff953c8c1bda2cceb668bf86ea05d7ae93e26d021c9690995d5279"), - ); + common_assertions(true); let header = finalize_block(end_block); // Post-finalization status - assert!(!ClaimTemporaryData::::exists()); - common_assertions(); - println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); - assert_eq!( - Sassafras::next_randomness(), - h2b("d3a18b857af6ecc7b52f047107e684fff0058b5722d540a296d727e37eaa55b3"), - ); - println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); - assert_eq!( - Sassafras::randomness_accumulator(), - h2b("8a1ceb346036c386d021264b10912c8b656799668004c4a487222462b394cd89"), - ); + common_assertions(false); // Header data check @@ -428,7 +327,6 @@ fn produce_epoch_change_digest_no_config() { sp_consensus_sassafras::digests::NextEpochDescriptor { authorities: Sassafras::next_authorities().into_inner(), randomness: Sassafras::next_randomness(), - config: None, }, ); let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode()); @@ -436,366 +334,307 @@ fn produce_epoch_change_digest_no_config() { }) } -#[test] -fn produce_epoch_change_digest_with_config() { - let (pairs, mut ext) = new_test_ext_with_pairs(4, false); - - ext.execute_with(|| { - let start_slot = Slot::from(100); - let start_block = 1; - - initialize_block(start_block, start_slot, Default::default(), &pairs[0]); +// Tests if the sorted tickets are assigned to each slot outside-in. +fn slot_ticket_id_outside_in_fetch(jit_accumulator_drain: bool) { + let genesis_slot = Slot::from(GENESIS_SLOT); + let curr_count = 8; + let next_count = 6; + let tickets = make_ticket_bodies(curr_count + next_count, None, false); + + // Current epoch tickets (incrementally sorted as expected by the protocol) + let mut curr_tickets = tickets[..curr_count as usize].to_vec(); + curr_tickets.sort_unstable(); + // Next epoch tickets (incrementally sorted as expected by the protocol) + let mut next_tickets = tickets[curr_count as usize..].to_vec(); + next_tickets.sort_unstable(); - let config = EpochConfiguration { redundancy_factor: 1, attempts_number: 123 }; - Sassafras::plan_config_change(RuntimeOrigin::root(), config).unwrap(); + new_test_ext(0).execute_with(|| { + // Store current epoch tickets in place. + curr_tickets + .iter() + .enumerate() + .for_each(|(i, t)| Tickets::::insert((0, i as u32), t)); + + if jit_accumulator_drain { + // Store next epoch tickets in the accumulator (to test the JIT sorting logic as well) + next_tickets + .iter() + .for_each(|t| TicketsAccumulator::::insert(TicketKey::from(t.id), t)); + TicketsCount::::set([curr_count as u32, 0]); + } else { + // Directly store in the tickets buffer + next_tickets + .iter() + .enumerate() + .for_each(|(i, t)| Tickets::::insert((1, i as u32), t)); + TicketsCount::::set([curr_count as u32, next_count as u32]); + } + + CurrentSlot::::set(genesis_slot); + + // Before importing the first block (on frame System pallet) `slot_ticket` always + // returns `None`. This is a kind of special hardcoded case that should never happen + // in practice as the first thing the pallet does is to initialize the genesis slot. + + assert_eq!(Sassafras::slot_ticket(0.into()), None); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 0), None); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 1), None); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 100), None); + + // Manually set block number to simulate that frame system initialize has been + // called for the first block. + frame_system::Pallet::::set_block_number(One::one()); - // We want to trigger an epoch change in this test. - let epoch_length = Sassafras::epoch_length() as u64; - let end_block = start_block + epoch_length; + // Try to fetch a ticket for a slot before current epoch. + assert_eq!(Sassafras::slot_ticket(0.into()), None); - let digest = progress_to_block(end_block, &pairs[0]).unwrap(); + // Current epoch tickets. + assert_eq!(Sassafras::slot_ticket(genesis_slot + 0).unwrap(), curr_tickets[0]); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 1).unwrap(), curr_tickets[7]); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 2).unwrap(), curr_tickets[1]); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 3).unwrap(), curr_tickets[6]); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 4).unwrap(), curr_tickets[2]); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 5).unwrap(), curr_tickets[5]); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 6).unwrap(), curr_tickets[3]); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 7).unwrap(), curr_tickets[4]); + assert!(Sassafras::slot_ticket(genesis_slot + 8).is_none()); + assert!(Sassafras::slot_ticket(genesis_slot + 9).is_none()); + + // Next epoch tickets. + assert_eq!(Sassafras::slot_ticket(genesis_slot + 10).unwrap(), next_tickets[0]); + if jit_accumulator_drain { + // After first fetch tickets are moved to the buffer + assert_eq!(TicketsCount::::get()[1], 6); + } + assert_eq!(Sassafras::slot_ticket(genesis_slot + 11).unwrap(), next_tickets[5]); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 12).unwrap(), next_tickets[1]); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 13).unwrap(), next_tickets[4]); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 14).unwrap(), next_tickets[2]); + assert_eq!(Sassafras::slot_ticket(genesis_slot + 15).unwrap(), next_tickets[3]); + assert!(Sassafras::slot_ticket(genesis_slot + 16).is_none()); + assert!(Sassafras::slot_ticket(genesis_slot + 17).is_none()); + assert!(Sassafras::slot_ticket(genesis_slot + 18).is_none()); + assert!(Sassafras::slot_ticket(genesis_slot + 19).is_none()); - let header = finalize_block(end_block); + // Try to fetch the tickets for slots beyond the next epoch. + assert!(Sassafras::slot_ticket(genesis_slot + 20).is_none()); + assert!(Sassafras::slot_ticket(genesis_slot + 42).is_none()); + }); +} - // Header data check. - // Skip pallet status checks that were already performed by other tests. +#[test] +fn slot_ticket_id_outside_in_fetch_jit_accumulator_drain() { + slot_ticket_id_outside_in_fetch(true); +} - assert_eq!(header.digest.logs.len(), 2); - assert_eq!(header.digest.logs[0], digest.logs[0]); - // Deposits consensus log on epoch change - let consensus_log = sp_consensus_sassafras::digests::ConsensusLog::NextEpochData( - sp_consensus_sassafras::digests::NextEpochDescriptor { - authorities: Sassafras::next_authorities().into_inner(), - randomness: Sassafras::next_randomness(), - config: Some(config), - }, - ); - let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode()); - assert_eq!(header.digest.logs[1], consensus_digest) - }) +#[test] +fn slot_ticket_id_outside_in_fetch_no_jit_accumulator_drain() { + slot_ticket_id_outside_in_fetch(false); } #[test] -fn segments_incremental_sort_works() { - let (pairs, mut ext) = new_test_ext_with_pairs(1, false); - let pair = &pairs[0]; - let segments_count = 14; - let start_slot = Slot::from(100); +fn slot_and_epoch_helpers_works() { let start_block = 1; + let start_slot = (GENESIS_SLOT + 1).into(); + + let (pairs, mut ext) = new_test_ext_with_pairs(1, false); ext.execute_with(|| { - let epoch_length = Sassafras::epoch_length() as u64; - // -3 just to have the last segment not full... - let submitted_tickets_count = segments_count * SEGMENT_MAX_SIZE - 3; + let epoch_duration = Sassafras::epoch_duration() as u64; + assert_eq!(epoch_duration, 10); + + let check = |slot, slot_idx, epoch_slot, epoch_idx| { + assert_eq!(Sassafras::current_slot(), Slot::from(slot)); + assert_eq!(Sassafras::current_slot_index(), slot_idx); + assert_eq!(Sassafras::current_epoch_start(), Slot::from(epoch_slot)); + assert_eq!(Sassafras::current_epoch_index(), epoch_idx); + }; - initialize_block(start_block, start_slot, Default::default(), pair); + // Post genesis state (before first initialization of epoch N) + check(0, 0, 0, 0); - // Manually populate the segments to skip the threshold check - let mut tickets = make_ticket_bodies(submitted_tickets_count, None); - persist_next_epoch_tickets_as_segments(&tickets); - - // Proceed to half of the epoch (sortition should not have been started yet) - let half_epoch_block = start_block + epoch_length / 2; - progress_to_block(half_epoch_block, pair); - - let mut unsorted_tickets_count = submitted_tickets_count; - - // Check that next epoch tickets sortition is not started yet - let meta = TicketsMeta::::get(); - assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count); - assert_eq!(meta.tickets_count, [0, 0]); - - // Follow the incremental sortition block by block - - progress_to_block(half_epoch_block + 1, pair); - unsorted_tickets_count -= 3 * SEGMENT_MAX_SIZE - 3; - let meta = TicketsMeta::::get(); - assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count,); - assert_eq!(meta.tickets_count, [0, 0]); - - progress_to_block(half_epoch_block + 2, pair); - unsorted_tickets_count -= 3 * SEGMENT_MAX_SIZE; - let meta = TicketsMeta::::get(); - assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count); - assert_eq!(meta.tickets_count, [0, 0]); - - progress_to_block(half_epoch_block + 3, pair); - unsorted_tickets_count -= 3 * SEGMENT_MAX_SIZE; - let meta = TicketsMeta::::get(); - assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count); - assert_eq!(meta.tickets_count, [0, 0]); - - progress_to_block(half_epoch_block + 4, pair); - unsorted_tickets_count -= 3 * SEGMENT_MAX_SIZE; - let meta = TicketsMeta::::get(); - assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count); - assert_eq!(meta.tickets_count, [0, 0]); - - let header = finalize_block(half_epoch_block + 4); - - // Sort should be finished now. - // Check that next epoch tickets count have the correct value. - // Bigger ticket ids were discarded during sortition. - unsorted_tickets_count -= 2 * SEGMENT_MAX_SIZE; - assert_eq!(unsorted_tickets_count, 0); - let meta = TicketsMeta::::get(); - assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count); - assert_eq!(meta.tickets_count, [0, epoch_length as u32]); - // Epoch change log should have been pushed as well - assert_eq!(header.digest.logs.len(), 1); - // No tickets for the current epoch - assert_eq!(TicketsIds::::get((0, 0)), None); - - // Check persistence of "winning" tickets - tickets.sort_by_key(|t| t.0); - (0..epoch_length as usize).into_iter().for_each(|i| { - let id = TicketsIds::::get((1, i as u32)).unwrap(); - let body = TicketsData::::get(id).unwrap(); - assert_eq!((id, body), tickets[i]); - }); - // Check removal of "loosing" tickets - (epoch_length as usize..tickets.len()).into_iter().for_each(|i| { - assert!(TicketsIds::::get((1, i as u32)).is_none()); - assert!(TicketsData::::get(tickets[i].0).is_none()); - }); + // Epoch N first block + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); + check(101, 1, 100, 10); - // The next block will be the first produced on the new epoch. - // At this point the tickets are found already sorted and ready to be used. - let slot = Sassafras::current_slot() + 1; - let number = System::block_number() + 1; - initialize_block(number, slot, header.hash(), pair); - let header = finalize_block(number); - // Epoch changes digest is also produced - assert_eq!(header.digest.logs.len(), 2); - }); -} + // Progress to epoch N last block + let end_block = start_block + epoch_duration - 2; + progress_to_block(end_block, &pairs[0]).unwrap(); + check(109, 9, 100, 10); -#[test] -fn tickets_fetch_works_after_epoch_change() { - let (pairs, mut ext) = new_test_ext_with_pairs(4, false); - let pair = &pairs[0]; - let start_slot = Slot::from(100); - let start_block = 1; - let submitted_tickets = 300; + // Progres to epoch N+1 first block + progress_to_block(end_block + 1, &pairs[0]).unwrap(); + check(110, 0, 110, 11); - ext.execute_with(|| { - initialize_block(start_block, start_slot, Default::default(), pair); + // Progress to epoch N+1 last block + let end_block = end_block + epoch_duration; + progress_to_block(end_block, &pairs[0]).unwrap(); + check(119, 9, 110, 11); - // We don't want to trigger an epoch change in this test. - let epoch_length = Sassafras::epoch_length() as u64; - assert!(epoch_length > 2); - progress_to_block(2, &pairs[0]).unwrap(); - - // Persist tickets as three different segments. - let tickets = make_ticket_bodies(submitted_tickets, None); - persist_next_epoch_tickets_as_segments(&tickets); - - let meta = TicketsMeta::::get(); - assert_eq!(meta.unsorted_tickets_count, submitted_tickets); - assert_eq!(meta.tickets_count, [0, 0]); - - // Progress up to the last epoch slot (do not enact epoch change) - progress_to_block(epoch_length, &pairs[0]).unwrap(); - - // At this point next epoch tickets should have been sorted and ready to be used - let meta = TicketsMeta::::get(); - assert_eq!(meta.unsorted_tickets_count, 0); - assert_eq!(meta.tickets_count, [0, epoch_length as u32]); - - // Compute and sort the tickets ids (aka tickets scores) - let mut expected_ids: Vec<_> = tickets.into_iter().map(|(id, _)| id).collect(); - expected_ids.sort(); - expected_ids.truncate(epoch_length as usize); - - // Check if we can fetch next epoch tickets ids (outside-in). - let slot = Sassafras::current_slot(); - assert_eq!(Sassafras::slot_ticket_id(slot + 1).unwrap(), expected_ids[1]); - assert_eq!(Sassafras::slot_ticket_id(slot + 2).unwrap(), expected_ids[3]); - assert_eq!(Sassafras::slot_ticket_id(slot + 3).unwrap(), expected_ids[5]); - assert_eq!(Sassafras::slot_ticket_id(slot + 4).unwrap(), expected_ids[7]); - assert_eq!(Sassafras::slot_ticket_id(slot + 7).unwrap(), expected_ids[6]); - assert_eq!(Sassafras::slot_ticket_id(slot + 8).unwrap(), expected_ids[4]); - assert_eq!(Sassafras::slot_ticket_id(slot + 9).unwrap(), expected_ids[2]); - assert_eq!(Sassafras::slot_ticket_id(slot + 10).unwrap(), expected_ids[0]); - assert!(Sassafras::slot_ticket_id(slot + 11).is_none()); - - // Enact epoch change by progressing one more block - - progress_to_block(epoch_length + 1, &pairs[0]).unwrap(); - - let meta = TicketsMeta::::get(); - assert_eq!(meta.unsorted_tickets_count, 0); - assert_eq!(meta.tickets_count, [0, 10]); - - // Check if we can fetch current epoch tickets ids (outside-in). - let slot = Sassafras::current_slot(); - assert_eq!(Sassafras::slot_ticket_id(slot).unwrap(), expected_ids[1]); - assert_eq!(Sassafras::slot_ticket_id(slot + 1).unwrap(), expected_ids[3]); - assert_eq!(Sassafras::slot_ticket_id(slot + 2).unwrap(), expected_ids[5]); - assert_eq!(Sassafras::slot_ticket_id(slot + 3).unwrap(), expected_ids[7]); - assert_eq!(Sassafras::slot_ticket_id(slot + 6).unwrap(), expected_ids[6]); - assert_eq!(Sassafras::slot_ticket_id(slot + 7).unwrap(), expected_ids[4]); - assert_eq!(Sassafras::slot_ticket_id(slot + 8).unwrap(), expected_ids[2]); - assert_eq!(Sassafras::slot_ticket_id(slot + 9).unwrap(), expected_ids[0]); - assert!(Sassafras::slot_ticket_id(slot + 10).is_none()); - - // Enact another epoch change, for which we don't have any ticket - progress_to_block(2 * epoch_length + 1, &pairs[0]).unwrap(); - let meta = TicketsMeta::::get(); - assert_eq!(meta.unsorted_tickets_count, 0); - assert_eq!(meta.tickets_count, [0, 0]); - }); + // Progres to epoch N+2 first block + progress_to_block(end_block + 1, &pairs[0]).unwrap(); + check(120, 0, 120, 12); + }) } #[test] -fn block_allowed_to_skip_epochs() { - let (pairs, mut ext) = new_test_ext_with_pairs(4, false); - let pair = &pairs[0]; - let start_slot = Slot::from(100); +fn tickets_accumulator_works() { let start_block = 1; + let start_slot = (GENESIS_SLOT + 1).into(); + let e1_count = 6; + let e2_count = 10; + let tickets = make_ticket_bodies(e1_count + e2_count, None, false); + let e1_tickets = tickets[..e1_count as usize].to_vec(); + let e2_tickets = tickets[e1_count as usize..].to_vec(); + + let (pairs, mut ext) = new_test_ext_with_pairs(1, false); ext.execute_with(|| { - let epoch_length = Sassafras::epoch_length() as u64; + let epoch_duration = Sassafras::epoch_duration() as u64; - initialize_block(start_block, start_slot, Default::default(), pair); + let epoch_idx = Sassafras::current_epoch_index(); + let epoch_tag = (epoch_idx % 2) as u8; + let next_epoch_tag = epoch_tag ^ 1; - let tickets = make_ticket_bodies(3, Some(pair)); - persist_next_epoch_tickets(&tickets); + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); - let next_random = Sassafras::next_randomness(); + // Append some tickets to the accumulator + e1_tickets + .iter() + .for_each(|t| TicketsAccumulator::::insert(TicketKey::from(t.id), t)); - // We want to skip 3 epochs in this test. - let offset = 4 * epoch_length; - go_to_block(start_block + offset, start_slot + offset, &pairs[0]); + // Progress to epoch's last block + let end_block = start_block + epoch_duration - 2; + progress_to_block(end_block, &pairs[0]).unwrap(); - // Post-initialization status + let tickets_count = TicketsCount::::get(); + assert_eq!(tickets_count[epoch_tag as usize], 0); + assert!( + 0 < tickets_count[next_epoch_tag as usize] && + tickets_count[next_epoch_tag as usize] < e1_count as u32 + ); - assert!(ClaimTemporaryData::::exists()); - assert_eq!(Sassafras::genesis_slot(), start_slot); - assert_eq!(Sassafras::current_slot(), start_slot + offset); - assert_eq!(Sassafras::epoch_index(), 4); - assert_eq!(Sassafras::current_epoch_start(), start_slot + offset); - assert_eq!(Sassafras::current_slot_index(), 0); + finalize_block(end_block); - // Tickets data has been discarded - assert_eq!(TicketsMeta::::get(), TicketsMetadata::default()); - assert!(tickets.iter().all(|(id, _)| TicketsData::::get(id).is_none())); - assert_eq!(SortedCandidates::::get().len(), 0); + let tickets_count = TicketsCount::::get(); + assert_eq!(tickets_count[epoch_tag as usize], 0); + assert_eq!(tickets_count[next_epoch_tag as usize], e1_count as u32); - // We used the last known next epoch randomness as a fallback - assert_eq!(next_random, Sassafras::randomness()); - }); -} + // Start new epoch -#[test] -fn obsolete_tickets_are_removed_on_epoch_change() { - let (pairs, mut ext) = new_test_ext_with_pairs(4, false); - let pair = &pairs[0]; - let start_slot = Slot::from(100); - let start_block = 1; + initialize_block( + end_block + 1, + Sassafras::current_slot() + 1, + Default::default(), + &pairs[0], + ); - ext.execute_with(|| { - let epoch_length = Sassafras::epoch_length() as u64; + let next_epoch_tag = epoch_tag; + let epoch_tag = epoch_tag ^ 1; + let tickets_count = TicketsCount::::get(); + assert_eq!(tickets_count[epoch_tag as usize], e1_count as u32); + assert_eq!(tickets_count[next_epoch_tag as usize], 0); - initialize_block(start_block, start_slot, Default::default(), pair); + // Append some tickets to the accumulator + e2_tickets + .iter() + .for_each(|t| TicketsAccumulator::::insert(TicketKey::from(t.id), t)); - let tickets = make_ticket_bodies(10, Some(pair)); - let mut epoch1_tickets = tickets[..4].to_vec(); - let mut epoch2_tickets = tickets[4..].to_vec(); - - // Persist some tickets for next epoch (N) - persist_next_epoch_tickets(&epoch1_tickets); - assert_eq!(TicketsMeta::::get().tickets_count, [0, 4]); - // Check next epoch tickets presence - epoch1_tickets.sort_by_key(|t| t.0); - (0..epoch1_tickets.len()).into_iter().for_each(|i| { - let id = TicketsIds::::get((1, i as u32)).unwrap(); - let body = TicketsData::::get(id).unwrap(); - assert_eq!((id, body), epoch1_tickets[i]); - }); + // Progress to epoch's last block + let end_block = end_block + epoch_duration; + progress_to_block(end_block, &pairs[0]).unwrap(); - // Advance one epoch to enact the tickets - go_to_block(start_block + epoch_length, start_slot + epoch_length, pair); - assert_eq!(TicketsMeta::::get().tickets_count, [0, 4]); - - // Persist some tickets for next epoch (N+1) - persist_next_epoch_tickets(&epoch2_tickets); - assert_eq!(TicketsMeta::::get().tickets_count, [6, 4]); - epoch2_tickets.sort_by_key(|t| t.0); - // Check for this epoch and next epoch tickets presence - (0..epoch1_tickets.len()).into_iter().for_each(|i| { - let id = TicketsIds::::get((1, i as u32)).unwrap(); - let body = TicketsData::::get(id).unwrap(); - assert_eq!((id, body), epoch1_tickets[i]); - }); - (0..epoch2_tickets.len()).into_iter().for_each(|i| { - let id = TicketsIds::::get((0, i as u32)).unwrap(); - let body = TicketsData::::get(id).unwrap(); - assert_eq!((id, body), epoch2_tickets[i]); - }); + let tickets_count = TicketsCount::::get(); + assert_eq!(tickets_count[epoch_tag as usize], e1_count as u32); + assert!( + 0 < tickets_count[next_epoch_tag as usize] && + tickets_count[next_epoch_tag as usize] < e2_count as u32 + ); - // Advance to epoch 2 and check for cleanup + finalize_block(end_block); - go_to_block(start_block + 2 * epoch_length, start_slot + 2 * epoch_length, pair); - assert_eq!(TicketsMeta::::get().tickets_count, [6, 0]); + let tickets_count = TicketsCount::::get(); + assert_eq!(tickets_count[epoch_tag as usize], e1_count as u32); + assert_eq!(tickets_count[next_epoch_tag as usize], e2_count as u32); - (0..epoch1_tickets.len()).into_iter().for_each(|i| { - let id = TicketsIds::::get((1, i as u32)).unwrap(); - assert!(TicketsData::::get(id).is_none()); - }); - (0..epoch2_tickets.len()).into_iter().for_each(|i| { - let id = TicketsIds::::get((0, i as u32)).unwrap(); - let body = TicketsData::::get(id).unwrap(); - assert_eq!((id, body), epoch2_tickets[i]); - }); - }) + // Start new epoch + initialize_block( + end_block + 1, + Sassafras::current_slot() + 1, + Default::default(), + &pairs[0], + ); + + let next_epoch_tag = epoch_tag; + let epoch_tag = epoch_tag ^ 1; + let tickets_count = TicketsCount::::get(); + assert_eq!(tickets_count[epoch_tag as usize], e2_count as u32); + assert_eq!(tickets_count[next_epoch_tag as usize], 0); + }); } -const TICKETS_FILE: &str = "src/data/25_tickets_100_auths.bin"; +#[test] +fn incremental_accumulator_drain() { + let tickets = make_ticket_bodies(10, None, false); -fn data_read(filename: &str) -> T { - use std::{fs::File, io::Read}; - let mut file = File::open(filename).unwrap(); - let mut buf = Vec::new(); - file.read_to_end(&mut buf).unwrap(); - T::decode(&mut &buf[..]).unwrap() -} + new_test_ext(0).execute_with(|| { + tickets + .iter() + .for_each(|t| TicketsAccumulator::::insert(TicketKey::from(t.id), t)); -fn data_write(filename: &str, data: T) { - use std::{fs::File, io::Write}; - let mut file = File::create(filename).unwrap(); - let buf = data.encode(); - file.write_all(&buf).unwrap(); -} + let accumulator: Vec<_> = TicketsAccumulator::::iter_values().collect(); + // Assess accumulator expected order (bigger id first) + assert!(accumulator.windows(2).all(|chunk| chunk[0].id > chunk[1].id)); + + let mut onchain_expected = accumulator.clone(); + onchain_expected.sort_unstable(); + + Sassafras::consume_tickets_accumulator(5, 0); + let tickets_count = TicketsCount::::get(); + assert_eq!(tickets_count[0], 5); + assert_eq!(tickets_count[1], 0); -// We don't want to implement anything secure here. -// Just a trivial shuffle for the tests. -fn trivial_fisher_yates_shuffle(vector: &mut Vec, random_seed: u64) { - let mut rng = random_seed as usize; - for i in (1..vector.len()).rev() { - let j = rng % (i + 1); - vector.swap(i, j); - rng = (rng.wrapping_mul(6364793005) + 1) as usize; // Some random number generation - } + accumulator.iter().rev().enumerate().skip(5).for_each(|(i, t)| { + let t2 = Tickets::::get((0, i as u32)).unwrap(); + assert_eq!(t.id, t2.id); + }); + + Sassafras::consume_tickets_accumulator(3, 0); + let tickets_count = TicketsCount::::get(); + assert_eq!(tickets_count[0], 8); + assert_eq!(tickets_count[1], 0); + accumulator.iter().rev().enumerate().skip(2).for_each(|(i, t)| { + let t2 = Tickets::::get((0, i as u32)).unwrap(); + assert_eq!(t.id, t2.id); + }); + + Sassafras::consume_tickets_accumulator(5, 0); + let tickets_count = TicketsCount::::get(); + assert_eq!(tickets_count[0], 10); + assert_eq!(tickets_count[1], 0); + accumulator.iter().rev().enumerate().for_each(|(i, t)| { + let t2 = Tickets::::get((0, i as u32)).unwrap(); + assert_eq!(t.id, t2.id); + }); + }); } -// For this test we use a set of pre-constructed tickets from a file. -// Creating a large set of tickets on the fly takes time, and may be annoying -// for test execution. -// -// A valid ring-context is required for this test since we are passing through the -// `submit_ticket` call which tests for ticket validity. #[test] -fn submit_tickets_with_ring_proof_check_works() { +fn submit_tickets_works() { use sp_core::Pair as _; - // env_logger::init(); + let _ = env_logger::try_init(); + let start_block = 1; + let start_slot = (GENESIS_SLOT + 1).into(); + + let (randomness, authorities, mut candidates): ( + Randomness, + Vec, + Vec, + ) = data_read(TICKETS_FILE); - let (authorities, mut tickets): (Vec, Vec) = - data_read(TICKETS_FILE); + let config = Sassafras::protocol_config(); // Also checks that duplicates are discarded - tickets.extend(tickets.clone()); - trivial_fisher_yates_shuffle(&mut tickets, 321); let (pairs, mut ext) = new_test_ext_with_pairs(authorities.len(), true); let pair = &pairs[0]; @@ -803,72 +642,123 @@ fn submit_tickets_with_ring_proof_check_works() { assert!(authorities.iter().zip(pairs.iter()).all(|(auth, pair)| auth == &pair.public())); ext.execute_with(|| { - let start_slot = Slot::from(0); - let start_block = 1; - - // Tweak the config to discard ~half of the tickets. - let mut config = EpochConfig::::get(); - config.redundancy_factor = 25; - EpochConfig::::set(config); - initialize_block(start_block, start_slot, Default::default(), pair); - NextRandomness::::set([0; 32]); - // Check state before tickets submission - assert_eq!( - TicketsMeta::::get(), - TicketsMetadata { unsorted_tickets_count: 0, tickets_count: [0, 0] }, - ); + // Use the same values as the pre-built tickets + Sassafras::update_ring_verifier(&authorities); + let mut randomness_buf = RandomnessBuf::::get(); + randomness_buf[2] = randomness; + RandomnessBuf::::set(randomness_buf); + NextAuthorities::::set(WeakBoundedVec::force_from(authorities, None)); // Submit the tickets - let max_tickets_per_call = Sassafras::epoch_length() as usize; - tickets.chunks(max_tickets_per_call).for_each(|chunk| { - let chunk = BoundedVec::truncate_from(chunk.to_vec()); - Sassafras::submit_tickets(RuntimeOrigin::none(), chunk).unwrap(); - }); + let candidates_per_call = 4; + let mut chunks: Vec<_> = candidates + .chunks(candidates_per_call) + .map(|chunk| BoundedVec::truncate_from(chunk.to_vec())) + .collect(); + assert_eq!(chunks.len(), 5); + + // Try to submit a candidate with an invalid signature. + let mut chunk = chunks[2].clone(); + chunk[0].signature.signature[0] ^= 1; + let e = Sassafras::submit_tickets(RuntimeOrigin::none(), chunk).unwrap_err(); + assert_eq!(e, DispatchError::from(Error::::TicketBadProof)); + assert_eq!(TicketsAccumulator::::count(), 0); + + // Try to submit with invalid attempt number. + let mut chunk = chunks[2].clone(); + chunk[0].attempt = u8::MAX; + let e = Sassafras::submit_tickets(RuntimeOrigin::none(), chunk).unwrap_err(); + assert_eq!(e, DispatchError::from(Error::::TicketBadAttempt)); + assert_eq!(TicketsAccumulator::::count(), 0); + + // Start submitting from the mid valued chunks. + Sassafras::submit_tickets(RuntimeOrigin::none(), chunks[2].clone()).unwrap(); + assert_eq!(TicketsAccumulator::::count(), 4); + + // Submit something bigger, but we have space for all the candidates. + Sassafras::submit_tickets(RuntimeOrigin::none(), chunks[3].clone()).unwrap(); + assert_eq!(TicketsAccumulator::::count(), 8); + + // Try to submit a ticket duplicate + let e = Sassafras::submit_tickets(RuntimeOrigin::none(), chunks[2].clone()).unwrap_err(); + assert_eq!(e, DispatchError::from(Error::::TicketDuplicate)); + assert_eq!(TicketsAccumulator::::count(), 8); + + // Submit something smaller. This is accepted (2 old tickets removed). + Sassafras::submit_tickets(RuntimeOrigin::none(), chunks[1].clone()).unwrap(); + assert_eq!(TicketsAccumulator::::count(), 10); + + // Try to submit a chunk with bigger tickets. This is discarded + let e = Sassafras::submit_tickets(RuntimeOrigin::none(), chunks[4].clone()).unwrap_err(); + assert_eq!(e, DispatchError::from(Error::::TicketDropped)); + assert_eq!(TicketsAccumulator::::count(), 10); + + // Submit the smaller candidates chunks. This is accepted (4 old tickets removed). + Sassafras::submit_tickets(RuntimeOrigin::none(), chunks[0].clone()).unwrap(); + assert_eq!(TicketsAccumulator::::count(), 10); + + // Try to submit a chunk after when the contest is over. + progress_to_block(start_block + (config.epoch_duration as u64 - 2), &pairs[0]).unwrap(); + let e = Sassafras::submit_tickets(RuntimeOrigin::none(), chunks[0].clone()).unwrap_err(); + assert_eq!(e, DispatchError::from(Error::::TicketUnexpected)); + }) +} - // Check state after submission - assert_eq!( - TicketsMeta::::get(), - TicketsMetadata { unsorted_tickets_count: 16, tickets_count: [0, 0] }, - ); - assert_eq!(UnsortedSegments::::get(0).len(), 16); - assert_eq!(UnsortedSegments::::get(1).len(), 0); +fn data_read(filename: &str) -> T { + use std::{fs::File, io::Read}; + let mut file = File::open(filename).unwrap(); + let mut buf = Vec::new(); + file.read_to_end(&mut buf).unwrap(); + T::decode(&mut &buf[..]).unwrap() +} - finalize_block(start_block); - }) +fn data_write(filename: &str, data: T) { + use std::{fs::File, io::Write}; + let mut file = File::create(filename).unwrap(); + let buf = data.encode(); + file.write_all(&buf).unwrap(); } #[test] -#[ignore = "test tickets data generator"] -fn make_tickets_data() { +#[ignore = "test tickets generator"] +fn generate_test_tickets() { use super::*; use sp_core::crypto::Pair; - // Number of authorities who produces tickets (for the sake of this test) - let tickets_authors_count = 5; + let start_block = 1; + let start_slot = (GENESIS_SLOT + 1).into(); + // Total number of authorities (the ring) - let authorities_count = 100; + let authorities_count = 10; let (pairs, mut ext) = new_test_ext_with_pairs(authorities_count, true); let authorities: Vec<_> = pairs.iter().map(|sk| sk.public()).collect(); + let mut tickets = Vec::new(); ext.execute_with(|| { - let config = EpochConfig::::get(); + let config = Sassafras::protocol_config(); + assert!(authorities_count < config.max_authorities as usize); - let tickets_count = tickets_authors_count * config.attempts_number as usize; - let mut tickets = Vec::with_capacity(tickets_count); + let tickets_count = authorities_count * config.attempts_number as usize; - // Construct pre-built tickets with a well known `NextRandomness` value. - NextRandomness::::set([0; 32]); + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); println!("Constructing {} tickets", tickets_count); - pairs.iter().take(tickets_authors_count).enumerate().for_each(|(i, pair)| { + + pairs.iter().take(authorities_count).enumerate().for_each(|(i, pair)| { let t = make_tickets(config.attempts_number, pair); tickets.extend(t); - println!("{:.2}%", 100f32 * ((i + 1) as f32 / tickets_authors_count as f32)); + println!("{:.2}%", 100f32 * ((i + 1) as f32 / authorities_count as f32)); }); - data_write(TICKETS_FILE, (authorities, tickets)); + tickets.sort_unstable_by_key(|t| t.0); + let envelopes: Vec<_> = tickets.into_iter().map(|t| t.1).collect(); + + // Tickets were generated using `next_randomness` + let randomness = Sassafras::next_randomness(); + + data_write(TICKETS_FILE, (randomness, authorities, envelopes)); }); } diff --git a/substrate/frame/sassafras/src/weights.rs b/substrate/frame/sassafras/src/weights.rs index 32ea2d29a180..41a005b26cc5 100644 --- a/substrate/frame/sassafras/src/weights.rs +++ b/substrate/frame/sassafras/src/weights.rs @@ -17,14 +17,14 @@ //! Autogenerated weights for `pallet_sassafras` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-16, STEPS: `20`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-07-24, STEPS: `20`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `behemoth`, CPU: `AMD Ryzen Threadripper 3970X 32-Core Processor` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/release/node-template +// ./target/release/solochain-template-node // benchmark // pallet // --chain @@ -55,157 +55,115 @@ pub trait WeightInfo { fn on_initialize() -> Weight; fn enact_epoch_change(x: u32, y: u32, ) -> Weight; fn submit_tickets(x: u32, ) -> Weight; - fn plan_config_change() -> Weight; fn update_ring_verifier(x: u32, ) -> Weight; fn load_ring_context() -> Weight; - fn sort_segments(x: u32, ) -> Weight; } /// Weights for `pallet_sassafras` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: `System::Digest` (r:1 w:1) + /// Storage: `System::Digest` (r:1 w:0) /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Sassafras::NextRandomness` (r:1 w:0) - /// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::NextAuthorities` (r:1 w:0) - /// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::CurrentRandomness` (r:1 w:0) - /// Proof: `Sassafras::CurrentRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::EpochIndex` (r:1 w:0) - /// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::RandomnessAccumulator` (r:1 w:1) - /// Proof: `Sassafras::RandomnessAccumulator` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::CurrentSlot` (r:0 w:1) + /// Storage: `Sassafras::RandomnessBuf` (r:1 w:1) + /// Proof: `Sassafras::RandomnessBuf` (`max_values`: Some(1), `max_size`: Some(128), added: 623, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::CurrentSlot` (r:1 w:1) /// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::ClaimTemporaryData` (r:0 w:1) - /// Proof: `Sassafras::ClaimTemporaryData` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::GenesisSlot` (r:0 w:1) - /// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::CounterForTicketsAccumulator` (r:1 w:0) + /// Proof: `Sassafras::CounterForTicketsAccumulator` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TemporaryData` (r:0 w:1) + /// Proof: `Sassafras::TemporaryData` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) fn on_initialize() -> Weight { // Proof Size summary in bytes: - // Measured: `302` - // Estimated: `4787` - // Minimum execution time: 438_039_000 picoseconds. - Weight::from_parts(439_302_000, 4787) - .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().writes(5_u64)) + // Measured: `270` + // Estimated: `1755` + // Minimum execution time: 382_223_000 picoseconds. + Weight::from_parts(383_656_000, 1755) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Sassafras::CurrentSlot` (r:1 w:0) /// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::EpochIndex` (r:1 w:1) - /// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::GenesisSlot` (r:1 w:0) - /// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) /// Storage: `Sassafras::NextAuthorities` (r:1 w:1) /// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) /// Storage: `Sassafras::RingContext` (r:1 w:0) /// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsMeta` (r:1 w:1) - /// Proof: `Sassafras::TicketsMeta` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::NextRandomness` (r:1 w:1) - /// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::RandomnessAccumulator` (r:1 w:0) - /// Proof: `Sassafras::RandomnessAccumulator` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::NextEpochConfig` (r:1 w:1) - /// Proof: `Sassafras::NextEpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::PendingEpochConfigChange` (r:1 w:1) - /// Proof: `Sassafras::PendingEpochConfigChange` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TemporaryData` (r:1 w:0) + /// Proof: `Sassafras::TemporaryData` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RandomnessBuf` (r:1 w:1) + /// Proof: `Sassafras::RandomnessBuf` (`max_values`: Some(1), `max_size`: Some(128), added: 623, mode: `MaxEncodedLen`) /// Storage: `System::Digest` (r:1 w:1) /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Sassafras::SortedCandidates` (r:1 w:0) - /// Proof: `Sassafras::SortedCandidates` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::UnsortedSegments` (r:79 w:79) - /// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsIds` (r:5000 w:200) - /// Proof: `Sassafras::TicketsIds` (`max_values`: None, `max_size`: Some(21), added: 2496, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsCount` (r:1 w:1) + /// Proof: `Sassafras::TicketsCount` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::CounterForTicketsAccumulator` (r:1 w:1) + /// Proof: `Sassafras::CounterForTicketsAccumulator` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsAccumulator` (r:1001 w:1000) + /// Proof: `Sassafras::TicketsAccumulator` (`max_values`: None, `max_size`: Some(195), added: 2670, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::Tickets` (r:0 w:1000) + /// Proof: `Sassafras::Tickets` (`max_values`: None, `max_size`: Some(168), added: 2643, mode: `MaxEncodedLen`) /// Storage: `Sassafras::Authorities` (r:0 w:1) /// Proof: `Sassafras::Authorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsData` (r:0 w:9896) - /// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::RingVerifierData` (r:0 w:1) - /// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::EpochConfig` (r:0 w:1) - /// Proof: `Sassafras::EpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::CurrentRandomness` (r:0 w:1) - /// Proof: `Sassafras::CurrentRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingVerifierKey` (r:0 w:1) + /// Proof: `Sassafras::RingVerifierKey` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) /// The range of component `x` is `[1, 100]`. - /// The range of component `y` is `[1000, 5000]`. + /// The range of component `y` is `[100, 1000]`. fn enact_epoch_change(x: u32, y: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `594909 + x * (33 ±0) + y * (53 ±0)` - // Estimated: `593350 + x * (24 ±1) + y * (2496 ±0)` - // Minimum execution time: 121_279_846_000 picoseconds. - Weight::from_parts(94_454_851_972, 593350) - // Standard Error: 24_177_301 - .saturating_add(Weight::from_parts(8_086_191, 0).saturating_mul(x.into())) - // Standard Error: 601_053 - .saturating_add(Weight::from_parts(15_578_413, 0).saturating_mul(y.into())) - .saturating_add(T::DbWeight::get().reads(13_u64)) + // Measured: `590613 + x * (33 ±0) + y * (68 ±0)` + // Estimated: `592099 + x * (33 ±0) + y * (2670 ±0)` + // Minimum execution time: 142_623_107_000 picoseconds. + Weight::from_parts(135_944_664_003, 592099) + // Standard Error: 3_660_095 + .saturating_add(Weight::from_parts(174_904_510, 0).saturating_mul(x.into())) + // Standard Error: 404_219 + .saturating_add(Weight::from_parts(7_440_688, 0).saturating_mul(y.into())) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(y.into()))) - .saturating_add(T::DbWeight::get().writes(112_u64)) + .saturating_add(T::DbWeight::get().writes(7_u64)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(y.into()))) - .saturating_add(Weight::from_parts(0, 24).saturating_mul(x.into())) - .saturating_add(Weight::from_parts(0, 2496).saturating_mul(y.into())) + .saturating_add(Weight::from_parts(0, 33).saturating_mul(x.into())) + .saturating_add(Weight::from_parts(0, 2670).saturating_mul(y.into())) } /// Storage: `Sassafras::CurrentSlot` (r:1 w:0) /// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::EpochIndex` (r:1 w:0) - /// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::GenesisSlot` (r:1 w:0) - /// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::RingVerifierData` (r:1 w:0) - /// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingVerifierKey` (r:1 w:0) + /// Proof: `Sassafras::RingVerifierKey` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RandomnessBuf` (r:1 w:0) + /// Proof: `Sassafras::RandomnessBuf` (`max_values`: Some(1), `max_size`: Some(128), added: 623, mode: `MaxEncodedLen`) /// Storage: `Sassafras::NextAuthorities` (r:1 w:0) /// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::NextEpochConfig` (r:1 w:0) - /// Proof: `Sassafras::NextEpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::NextRandomness` (r:1 w:0) - /// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsData` (r:25 w:25) - /// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsMeta` (r:1 w:1) - /// Proof: `Sassafras::TicketsMeta` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::UnsortedSegments` (r:1 w:1) - /// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`) - /// The range of component `x` is `[1, 25]`. + /// Storage: `Sassafras::CounterForTicketsAccumulator` (r:1 w:1) + /// Proof: `Sassafras::CounterForTicketsAccumulator` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsAccumulator` (r:16 w:16) + /// Proof: `Sassafras::TicketsAccumulator` (`max_values`: None, `max_size`: Some(195), added: 2670, mode: `MaxEncodedLen`) + /// The range of component `x` is `[1, 16]`. fn submit_tickets(x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `3869` - // Estimated: `5519 + x * (2559 ±0)` - // Minimum execution time: 36_904_934_000 picoseconds. - Weight::from_parts(25_822_957_295, 5519) - // Standard Error: 11_047_832 - .saturating_add(Weight::from_parts(11_338_353_299, 0).saturating_mul(x.into())) - .saturating_add(T::DbWeight::get().reads(9_u64)) + // Measured: `1029` + // Estimated: `4787 + x * (2670 ±0)` + // Minimum execution time: 52_363_693_000 picoseconds. + Weight::from_parts(38_029_460_770, 4787) + // Standard Error: 15_839_361 + .saturating_add(Weight::from_parts(14_567_084_979, 0).saturating_mul(x.into())) + .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(x.into()))) - .saturating_add(T::DbWeight::get().writes(2_u64)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(x.into()))) - .saturating_add(Weight::from_parts(0, 2559).saturating_mul(x.into())) - } - /// Storage: `Sassafras::PendingEpochConfigChange` (r:0 w:1) - /// Proof: `Sassafras::PendingEpochConfigChange` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - fn plan_config_change() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_038_000 picoseconds. - Weight::from_parts(4_499_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(x.into()))) + .saturating_add(Weight::from_parts(0, 2670).saturating_mul(x.into())) } /// Storage: `Sassafras::RingContext` (r:1 w:0) /// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::RingVerifierData` (r:0 w:1) - /// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingVerifierKey` (r:0 w:1) + /// Proof: `Sassafras::RingVerifierKey` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) /// The range of component `x` is `[1, 100]`. fn update_ring_verifier(x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `590485` + // Measured: `590458` // Estimated: `591809` - // Minimum execution time: 105_121_424_000 picoseconds. - Weight::from_parts(105_527_334_385, 591809) - // Standard Error: 2_933_910 - .saturating_add(Weight::from_parts(96_136_261, 0).saturating_mul(x.into())) + // Minimum execution time: 135_738_430_000 picoseconds. + Weight::from_parts(135_840_809_672, 591809) + // Standard Error: 3_319_979 + .saturating_add(Weight::from_parts(173_092_727, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -213,180 +171,118 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`) fn load_ring_context() -> Weight { // Proof Size summary in bytes: - // Measured: `590485` + // Measured: `590458` // Estimated: `591809` - // Minimum execution time: 44_005_681_000 picoseconds. - Weight::from_parts(44_312_079_000, 591809) + // Minimum execution time: 55_326_215_000 picoseconds. + Weight::from_parts(55_332_809_000, 591809) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: `Sassafras::SortedCandidates` (r:1 w:0) - /// Proof: `Sassafras::SortedCandidates` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::UnsortedSegments` (r:100 w:100) - /// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsIds` (r:0 w:200) - /// Proof: `Sassafras::TicketsIds` (`max_values`: None, `max_size`: Some(21), added: 2496, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsData` (r:0 w:12600) - /// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// The range of component `x` is `[1, 100]`. - fn sort_segments(x: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `222 + x * (2060 ±0)` - // Estimated: `4687 + x * (4529 ±0)` - // Minimum execution time: 183_501_000 picoseconds. - Weight::from_parts(183_501_000, 4687) - // Standard Error: 1_426_363 - .saturating_add(Weight::from_parts(169_156_241, 0).saturating_mul(x.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(x.into()))) - .saturating_add(T::DbWeight::get().writes((129_u64).saturating_mul(x.into()))) - .saturating_add(Weight::from_parts(0, 4529).saturating_mul(x.into())) - } } // For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: `System::Digest` (r:1 w:1) + /// Storage: `System::Digest` (r:1 w:0) /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Sassafras::NextRandomness` (r:1 w:0) - /// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::NextAuthorities` (r:1 w:0) - /// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::CurrentRandomness` (r:1 w:0) - /// Proof: `Sassafras::CurrentRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::EpochIndex` (r:1 w:0) - /// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::RandomnessAccumulator` (r:1 w:1) - /// Proof: `Sassafras::RandomnessAccumulator` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::CurrentSlot` (r:0 w:1) + /// Storage: `Sassafras::RandomnessBuf` (r:1 w:1) + /// Proof: `Sassafras::RandomnessBuf` (`max_values`: Some(1), `max_size`: Some(128), added: 623, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::CurrentSlot` (r:1 w:1) /// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::ClaimTemporaryData` (r:0 w:1) - /// Proof: `Sassafras::ClaimTemporaryData` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::GenesisSlot` (r:0 w:1) - /// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::CounterForTicketsAccumulator` (r:1 w:0) + /// Proof: `Sassafras::CounterForTicketsAccumulator` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TemporaryData` (r:0 w:1) + /// Proof: `Sassafras::TemporaryData` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) fn on_initialize() -> Weight { // Proof Size summary in bytes: - // Measured: `302` - // Estimated: `4787` - // Minimum execution time: 438_039_000 picoseconds. - Weight::from_parts(439_302_000, 4787) - .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().writes(5_u64)) + // Measured: `270` + // Estimated: `1755` + // Minimum execution time: 382_223_000 picoseconds. + Weight::from_parts(383_656_000, 1755) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Sassafras::CurrentSlot` (r:1 w:0) /// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::EpochIndex` (r:1 w:1) - /// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::GenesisSlot` (r:1 w:0) - /// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) /// Storage: `Sassafras::NextAuthorities` (r:1 w:1) /// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) /// Storage: `Sassafras::RingContext` (r:1 w:0) /// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsMeta` (r:1 w:1) - /// Proof: `Sassafras::TicketsMeta` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::NextRandomness` (r:1 w:1) - /// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::RandomnessAccumulator` (r:1 w:0) - /// Proof: `Sassafras::RandomnessAccumulator` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::NextEpochConfig` (r:1 w:1) - /// Proof: `Sassafras::NextEpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::PendingEpochConfigChange` (r:1 w:1) - /// Proof: `Sassafras::PendingEpochConfigChange` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TemporaryData` (r:1 w:0) + /// Proof: `Sassafras::TemporaryData` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RandomnessBuf` (r:1 w:1) + /// Proof: `Sassafras::RandomnessBuf` (`max_values`: Some(1), `max_size`: Some(128), added: 623, mode: `MaxEncodedLen`) /// Storage: `System::Digest` (r:1 w:1) /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Sassafras::SortedCandidates` (r:1 w:0) - /// Proof: `Sassafras::SortedCandidates` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::UnsortedSegments` (r:79 w:79) - /// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsIds` (r:5000 w:200) - /// Proof: `Sassafras::TicketsIds` (`max_values`: None, `max_size`: Some(21), added: 2496, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsCount` (r:1 w:1) + /// Proof: `Sassafras::TicketsCount` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::CounterForTicketsAccumulator` (r:1 w:1) + /// Proof: `Sassafras::CounterForTicketsAccumulator` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsAccumulator` (r:1001 w:1000) + /// Proof: `Sassafras::TicketsAccumulator` (`max_values`: None, `max_size`: Some(195), added: 2670, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::Tickets` (r:0 w:1000) + /// Proof: `Sassafras::Tickets` (`max_values`: None, `max_size`: Some(168), added: 2643, mode: `MaxEncodedLen`) /// Storage: `Sassafras::Authorities` (r:0 w:1) /// Proof: `Sassafras::Authorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsData` (r:0 w:9896) - /// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::RingVerifierData` (r:0 w:1) - /// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::EpochConfig` (r:0 w:1) - /// Proof: `Sassafras::EpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::CurrentRandomness` (r:0 w:1) - /// Proof: `Sassafras::CurrentRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingVerifierKey` (r:0 w:1) + /// Proof: `Sassafras::RingVerifierKey` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) /// The range of component `x` is `[1, 100]`. - /// The range of component `y` is `[1000, 5000]`. + /// The range of component `y` is `[100, 1000]`. fn enact_epoch_change(x: u32, y: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `594909 + x * (33 ±0) + y * (53 ±0)` - // Estimated: `593350 + x * (24 ±1) + y * (2496 ±0)` - // Minimum execution time: 121_279_846_000 picoseconds. - Weight::from_parts(94_454_851_972, 593350) - // Standard Error: 24_177_301 - .saturating_add(Weight::from_parts(8_086_191, 0).saturating_mul(x.into())) - // Standard Error: 601_053 - .saturating_add(Weight::from_parts(15_578_413, 0).saturating_mul(y.into())) - .saturating_add(RocksDbWeight::get().reads(13_u64)) + // Measured: `590613 + x * (33 ±0) + y * (68 ±0)` + // Estimated: `592099 + x * (33 ±0) + y * (2670 ±0)` + // Minimum execution time: 142_623_107_000 picoseconds. + Weight::from_parts(135_944_664_003, 592099) + // Standard Error: 3_660_095 + .saturating_add(Weight::from_parts(174_904_510, 0).saturating_mul(x.into())) + // Standard Error: 404_219 + .saturating_add(Weight::from_parts(7_440_688, 0).saturating_mul(y.into())) + .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(y.into()))) - .saturating_add(RocksDbWeight::get().writes(112_u64)) + .saturating_add(RocksDbWeight::get().writes(7_u64)) .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(y.into()))) - .saturating_add(Weight::from_parts(0, 24).saturating_mul(x.into())) - .saturating_add(Weight::from_parts(0, 2496).saturating_mul(y.into())) + .saturating_add(Weight::from_parts(0, 33).saturating_mul(x.into())) + .saturating_add(Weight::from_parts(0, 2670).saturating_mul(y.into())) } /// Storage: `Sassafras::CurrentSlot` (r:1 w:0) /// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::EpochIndex` (r:1 w:0) - /// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::GenesisSlot` (r:1 w:0) - /// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::RingVerifierData` (r:1 w:0) - /// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingVerifierKey` (r:1 w:0) + /// Proof: `Sassafras::RingVerifierKey` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RandomnessBuf` (r:1 w:0) + /// Proof: `Sassafras::RandomnessBuf` (`max_values`: Some(1), `max_size`: Some(128), added: 623, mode: `MaxEncodedLen`) /// Storage: `Sassafras::NextAuthorities` (r:1 w:0) /// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::NextEpochConfig` (r:1 w:0) - /// Proof: `Sassafras::NextEpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::NextRandomness` (r:1 w:0) - /// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsData` (r:25 w:25) - /// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsMeta` (r:1 w:1) - /// Proof: `Sassafras::TicketsMeta` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::UnsortedSegments` (r:1 w:1) - /// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`) - /// The range of component `x` is `[1, 25]`. + /// Storage: `Sassafras::CounterForTicketsAccumulator` (r:1 w:1) + /// Proof: `Sassafras::CounterForTicketsAccumulator` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsAccumulator` (r:16 w:16) + /// Proof: `Sassafras::TicketsAccumulator` (`max_values`: None, `max_size`: Some(195), added: 2670, mode: `MaxEncodedLen`) + /// The range of component `x` is `[1, 16]`. fn submit_tickets(x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `3869` - // Estimated: `5519 + x * (2559 ±0)` - // Minimum execution time: 36_904_934_000 picoseconds. - Weight::from_parts(25_822_957_295, 5519) - // Standard Error: 11_047_832 - .saturating_add(Weight::from_parts(11_338_353_299, 0).saturating_mul(x.into())) - .saturating_add(RocksDbWeight::get().reads(9_u64)) + // Measured: `1029` + // Estimated: `4787 + x * (2670 ±0)` + // Minimum execution time: 52_363_693_000 picoseconds. + Weight::from_parts(38_029_460_770, 4787) + // Standard Error: 15_839_361 + .saturating_add(Weight::from_parts(14_567_084_979, 0).saturating_mul(x.into())) + .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(x.into()))) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(x.into()))) - .saturating_add(Weight::from_parts(0, 2559).saturating_mul(x.into())) - } - /// Storage: `Sassafras::PendingEpochConfigChange` (r:0 w:1) - /// Proof: `Sassafras::PendingEpochConfigChange` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - fn plan_config_change() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_038_000 picoseconds. - Weight::from_parts(4_499_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(x.into()))) + .saturating_add(Weight::from_parts(0, 2670).saturating_mul(x.into())) } /// Storage: `Sassafras::RingContext` (r:1 w:0) /// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::RingVerifierData` (r:0 w:1) - /// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingVerifierKey` (r:0 w:1) + /// Proof: `Sassafras::RingVerifierKey` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) /// The range of component `x` is `[1, 100]`. fn update_ring_verifier(x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `590485` + // Measured: `590458` // Estimated: `591809` - // Minimum execution time: 105_121_424_000 picoseconds. - Weight::from_parts(105_527_334_385, 591809) - // Standard Error: 2_933_910 - .saturating_add(Weight::from_parts(96_136_261, 0).saturating_mul(x.into())) + // Minimum execution time: 135_738_430_000 picoseconds. + Weight::from_parts(135_840_809_672, 591809) + // Standard Error: 3_319_979 + .saturating_add(Weight::from_parts(173_092_727, 0).saturating_mul(x.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -394,32 +290,10 @@ impl WeightInfo for () { /// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`) fn load_ring_context() -> Weight { // Proof Size summary in bytes: - // Measured: `590485` + // Measured: `590458` // Estimated: `591809` - // Minimum execution time: 44_005_681_000 picoseconds. - Weight::from_parts(44_312_079_000, 591809) + // Minimum execution time: 55_326_215_000 picoseconds. + Weight::from_parts(55_332_809_000, 591809) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: `Sassafras::SortedCandidates` (r:1 w:0) - /// Proof: `Sassafras::SortedCandidates` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::UnsortedSegments` (r:100 w:100) - /// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsIds` (r:0 w:200) - /// Proof: `Sassafras::TicketsIds` (`max_values`: None, `max_size`: Some(21), added: 2496, mode: `MaxEncodedLen`) - /// Storage: `Sassafras::TicketsData` (r:0 w:12600) - /// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// The range of component `x` is `[1, 100]`. - fn sort_segments(x: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `222 + x * (2060 ±0)` - // Estimated: `4687 + x * (4529 ±0)` - // Minimum execution time: 183_501_000 picoseconds. - Weight::from_parts(183_501_000, 4687) - // Standard Error: 1_426_363 - .saturating_add(Weight::from_parts(169_156_241, 0).saturating_mul(x.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(x.into()))) - .saturating_add(RocksDbWeight::get().writes((129_u64).saturating_mul(x.into()))) - .saturating_add(Weight::from_parts(0, 4529).saturating_mul(x.into())) - } } diff --git a/substrate/primitives/consensus/sassafras/Cargo.toml b/substrate/primitives/consensus/sassafras/Cargo.toml index 12bcbc1b3392..eac4f29a0683 100644 --- a/substrate/primitives/consensus/sassafras/Cargo.toml +++ b/substrate/primitives/consensus/sassafras/Cargo.toml @@ -26,6 +26,7 @@ sp-application-crypto = { features = ["bandersnatch-experimental"], workspace = sp-consensus-slots = { workspace = true } sp-core = { features = ["bandersnatch-experimental"], workspace = true } sp-runtime = { workspace = true } +sp-inherents = { workspace = true } [features] default = ["std"] @@ -37,6 +38,7 @@ std = [ "sp-application-crypto/std", "sp-consensus-slots/std", "sp-core/std", + "sp-inherents/std", "sp-runtime/std", ] diff --git a/substrate/primitives/consensus/sassafras/src/digests.rs b/substrate/primitives/consensus/sassafras/src/digests.rs index bac31f57f2da..08889201179b 100644 --- a/substrate/primitives/consensus/sassafras/src/digests.rs +++ b/substrate/primitives/consensus/sassafras/src/digests.rs @@ -18,8 +18,8 @@ //! Sassafras digests structures and helpers. use crate::{ - ticket::TicketClaim, vrf::VrfSignature, AuthorityId, AuthorityIndex, AuthoritySignature, - EpochConfiguration, Randomness, Slot, SASSAFRAS_ENGINE_ID, + vrf::VrfSignature, AuthorityId, AuthorityIndex, AuthoritySignature, Randomness, Slot, + SASSAFRAS_ENGINE_ID, }; use codec::{Decode, Encode, MaxEncodedLen}; @@ -34,14 +34,12 @@ use sp_runtime::{DigestItem, RuntimeDebug}; /// This is mandatory for each block. #[derive(Clone, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] pub struct SlotClaim { - /// Authority index that claimed the slot. - pub authority_idx: AuthorityIndex, /// Corresponding slot number. pub slot: Slot, + /// Authority index that claimed the slot. + pub authority_idx: AuthorityIndex, /// Slot claim VRF signature. pub vrf_signature: VrfSignature, - /// Ticket auxiliary information for claim check. - pub ticket_claim: Option, } /// Information about the next epoch. @@ -53,10 +51,6 @@ pub struct NextEpochDescriptor { pub randomness: Randomness, /// Authorities list. pub authorities: Vec, - /// Epoch configuration. - /// - /// If not present previous epoch parameters are used. - pub config: Option, } /// Runtime digest entries. diff --git a/substrate/primitives/consensus/sassafras/src/lib.rs b/substrate/primitives/consensus/sassafras/src/lib.rs index d7880c4de9e8..5254064f104f 100644 --- a/substrate/primitives/consensus/sassafras/src/lib.rs +++ b/substrate/primitives/consensus/sassafras/src/lib.rs @@ -27,6 +27,7 @@ use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_core::crypto::KeyTypeId; +use sp_inherents::{InherentIdentifier, MakeFatalError}; use sp_runtime::{ConsensusEngineId, RuntimeDebug}; pub use sp_consensus_slots::{Slot, SlotDuration}; @@ -39,8 +40,7 @@ pub mod ticket; pub mod vrf; pub use ticket::{ - ticket_id_threshold, EphemeralPublic, EphemeralSignature, TicketBody, TicketClaim, - TicketEnvelope, TicketId, + ticket_id_threshold, EphemeralPublic, EphemeralSignature, TicketBody, TicketEnvelope, TicketId, }; mod app { @@ -48,6 +48,15 @@ mod app { app_crypto!(bandersnatch, SASSAFRAS); } +/// Errors that can occur while checking the inherent. +pub type InherentError = MakeFatalError<()>; + +/// The type of the inherent +pub type InherentType = Vec; + +/// The identifier for the protocol inherent. +pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"SASSAFRA"; + /// Key type identifier. pub const KEY_TYPE: KeyTypeId = sp_application_crypto::key_types::SASSAFRAS; @@ -82,45 +91,45 @@ pub type EquivocationProof = sp_consensus_slots::EquivocationProof, - /// Epoch configuration. - pub config: EpochConfiguration, } /// An opaque type used to represent the key ownership proof at the runtime API boundary. diff --git a/substrate/primitives/consensus/sassafras/src/ticket.rs b/substrate/primitives/consensus/sassafras/src/ticket.rs index fd025f1d53ea..334d8553da54 100644 --- a/substrate/primitives/consensus/sassafras/src/ticket.rs +++ b/substrate/primitives/consensus/sassafras/src/ticket.rs @@ -20,8 +20,12 @@ use crate::vrf::RingVrfSignature; use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; +use sp_core::{bounded::BoundedVec, ConstU32}; pub use sp_core::ed25519::{Public as EphemeralPublic, Signature as EphemeralSignature}; +use sp_core::U256; + +const TICKET_EXTRA_MAX_LEN: u32 = 128; /// Ticket identifier. /// @@ -30,38 +34,66 @@ pub use sp_core::ed25519::{Public as EphemeralPublic, Signature as EphemeralSign /// Because of this, it is also used as the ticket score to compare against /// the epoch ticket's threshold to decide if the ticket is worth being considered /// for slot assignment (refer to [`ticket_id_threshold`]). -pub type TicketId = u128; +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub struct TicketId(pub [u8; 32]); + +impl core::fmt::Debug for TicketId { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "{}", sp_core::hexdisplay::HexDisplay::from(&self.0)) + } +} + +impl From for TicketId { + fn from(value: U256) -> Self { + let mut inner = [0; 32]; + value.to_big_endian(&mut inner); + Self(inner) + } +} + +impl From for U256 { + fn from(ticket: TicketId) -> U256 { + U256::from_big_endian(&ticket.0[..]) + } +} /// Ticket data persisted on-chain. #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] pub struct TicketBody { + /// Ticket identifier. + pub id: TicketId, /// Attempt index. - pub attempt_idx: u32, - /// Ephemeral public key which gets erased when the ticket is claimed. - pub erased_public: EphemeralPublic, - /// Ephemeral public key which gets exposed when the ticket is claimed. - pub revealed_public: EphemeralPublic, + pub attempt: u8, + /// User opaque extra data. + pub extra: BoundedVec>, +} + +impl Ord for TicketBody { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.id.cmp(&other.id) + } +} + +impl PartialOrd for TicketBody { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } } /// Ticket ring vrf signature. pub type TicketSignature = RingVrfSignature; -/// Ticket envelope used on during submission. +/// Ticket envelope used during submission. #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] pub struct TicketEnvelope { - /// Ticket body. - pub body: TicketBody, + /// Attempt index. + pub attempt: u8, + /// User opaque extra data. + pub extra: BoundedVec>, /// Ring signature. pub signature: TicketSignature, } -/// Ticket claim information filled by the block author. -#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] -pub struct TicketClaim { - /// Signature verified via `TicketBody::erased_public`. - pub erased_signature: EphemeralSignature, -} - /// Computes a boundary for [`TicketId`] maximum allowed value for a given epoch. /// /// Only ticket identifiers below this threshold should be considered as candidates @@ -80,45 +112,52 @@ pub struct TicketClaim { /// For details about the formula and implications refer to /// [*probabilities an parameters*](https://research.web3.foundation/Polkadot/protocols/block-production/SASSAFRAS#probabilities-and-parameters) /// paragraph of the w3f introduction to the protocol. -// TODO: replace with [RFC-26](https://github.com/polkadot-fellows/RFCs/pull/26) -// "Tickets Threshold" paragraph once is merged -pub fn ticket_id_threshold( - redundancy: u32, - slots: u32, - attempts: u32, - validators: u32, -) -> TicketId { - let num = redundancy as u64 * slots as u64; +pub fn ticket_id_threshold(slots: u32, validators: u32, attempts: u8, redundancy: u8) -> TicketId { let den = attempts as u64 * validators as u64; - TicketId::max_value() + let num = redundancy as u64 * slots as u64; + U256::MAX .checked_div(den.into()) .unwrap_or_default() .saturating_mul(num.into()) + .into() } #[cfg(test)] mod tests { use super::*; + fn normalize_u256(bytes: [u8; 32]) -> f64 { + let max_u128 = u128::MAX as f64; + let base = max_u128 + 1.0; + let max = max_u128 * (base + 1.0); + + // Extract two u128 segments from the byte array + let h = u128::from_be_bytes(bytes[..16].try_into().unwrap()) as f64; + let l = u128::from_be_bytes(bytes[16..].try_into().unwrap()) as f64; + (h * base + l) / max + } + // This is a trivial example/check which just better explain explains the rationale // behind the threshold. // // After this reading the formula should become obvious. #[test] fn ticket_id_threshold_trivial_check() { - // For an epoch with `s` slots we want to accept a number of tickets equal to ~s·r + // For an epoch with `s` slots, with a redundancy factor `r`, we want to accept + // a number of tickets equal to ~s·r. let redundancy = 2; let slots = 1000; let attempts = 100; let validators = 500; - let threshold = ticket_id_threshold(redundancy, slots, attempts, validators); - let threshold = threshold as f64 / TicketId::MAX as f64; + let threshold = ticket_id_threshold(slots, validators, attempts, redundancy); + println!("{:?}", threshold); + let threshold = normalize_u256(threshold.0); + println!("{}", threshold); - // We expect that the total number of tickets allowed to be submitted - // is slots*redundancy - let avt = ((attempts * validators) as f64 * threshold) as u32; - assert_eq!(avt, slots * redundancy); + // We expect that the total number of tickets allowed to be submitted is slots*redundancy + let avt = ((attempts as u32 * validators) as f64 * threshold) as u32; + assert_eq!(avt, slots * redundancy as u32); println!("threshold: {}", threshold); println!("avt = {}", avt); diff --git a/substrate/primitives/consensus/sassafras/src/vrf.rs b/substrate/primitives/consensus/sassafras/src/vrf.rs index f8def1b5f189..afc001318080 100644 --- a/substrate/primitives/consensus/sassafras/src/vrf.rs +++ b/substrate/primitives/consensus/sassafras/src/vrf.rs @@ -17,77 +17,54 @@ //! Utilities related to VRF input, pre-output and signatures. -use crate::{Randomness, TicketBody, TicketId}; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; -use codec::Encode; +use crate::{Randomness, TicketId}; use sp_consensus_slots::Slot; pub use sp_core::bandersnatch::{ - ring_vrf::{RingProver, RingVerifier, RingVerifierData, RingVrfSignature}, + ring_vrf::{RingProver, RingVerifier, RingVerifierKey, RingVrfSignature}, vrf::{VrfInput, VrfPreOutput, VrfSignData, VrfSignature}, }; /// Ring VRF domain size for Sassafras consensus. pub const RING_VRF_DOMAIN_SIZE: u32 = 2048; +const TICKET_SEAL_CONTEXT: &[u8] = b"sassafras_ticket_seal"; +// const FALLBACK_SEAL_CONTEXT: &[u8] = b"sassafras_fallback_seal"; +const BLOCK_ENTROPY_CONTEXT: &[u8] = b"sassafras_entropy"; + /// Bandersnatch VRF [`RingContext`] specialization for Sassafras using [`RING_VRF_DOMAIN_SIZE`]. pub type RingContext = sp_core::bandersnatch::ring_vrf::RingContext; -fn vrf_input_from_data( - domain: &[u8], - data: impl IntoIterator>, -) -> VrfInput { - let buf = data.into_iter().fold(Vec::new(), |mut buf, item| { - let bytes = item.as_ref(); - buf.extend_from_slice(bytes); - let len = u8::try_from(bytes.len()).expect("private function with well known inputs; qed"); - buf.push(len); - buf - }); - VrfInput::new(domain, buf) -} - -/// VRF input to claim slot ownership during block production. -pub fn slot_claim_input(randomness: &Randomness, slot: Slot, epoch: u64) -> VrfInput { - vrf_input_from_data( - b"sassafras-claim-v1.0", - [randomness.as_slice(), &slot.to_le_bytes(), &epoch.to_le_bytes()], - ) +/// VRF input to generate the ticket id. +pub fn ticket_id_input(randomness: &Randomness, attempt: u8) -> VrfInput { + VrfInput::new(b"sassafras", [TICKET_SEAL_CONTEXT, randomness.as_slice(), &[attempt]].concat()) } -/// Signing-data to claim slot ownership during block production. -pub fn slot_claim_sign_data(randomness: &Randomness, slot: Slot, epoch: u64) -> VrfSignData { - let input = slot_claim_input(randomness, slot, epoch); +/// Data to be signed via ring-vrf. +pub fn ticket_id_sign_data(ticket_id_input: VrfInput, extra_data: &[u8]) -> VrfSignData { VrfSignData::new_unchecked( - b"sassafras-slot-claim-transcript-v1.0", - Option::<&[u8]>::None, - Some(input), - ) -} - -/// VRF input to generate the ticket id. -pub fn ticket_id_input(randomness: &Randomness, attempt: u32, epoch: u64) -> VrfInput { - vrf_input_from_data( - b"sassafras-ticket-v1.0", - [randomness.as_slice(), &attempt.to_le_bytes(), &epoch.to_le_bytes()], + b"sassafras-ticket-body-transcript", + Some(extra_data), + Some(ticket_id_input), ) } -/// VRF input to generate the revealed key. -pub fn revealed_key_input(randomness: &Randomness, attempt: u32, epoch: u64) -> VrfInput { - vrf_input_from_data( - b"sassafras-revealed-v1.0", - [randomness.as_slice(), &attempt.to_le_bytes(), &epoch.to_le_bytes()], +/// VRF input to produce randomness. +pub fn block_randomness_input(randomness: &Randomness, slot: Slot) -> VrfInput { + // TODO: @davxy: implement as JAM + VrfInput::new( + b"sassafras", + [BLOCK_ENTROPY_CONTEXT, randomness.as_slice(), &slot.to_le_bytes()].concat(), ) } -/// Data to be signed via ring-vrf. -pub fn ticket_body_sign_data(ticket_body: &TicketBody, ticket_id_input: VrfInput) -> VrfSignData { +/// Signing-data to claim slot ownership during block production. +pub fn block_randomness_sign_data(randomness: &Randomness, slot: Slot) -> VrfSignData { + let input = block_randomness_input(randomness, slot); VrfSignData::new_unchecked( - b"sassafras-ticket-body-transcript-v1.0", - Some(ticket_body.encode().as_slice()), - Some(ticket_id_input), + b"sassafras-randomness-transcript", + Option::<&[u8]>::None, + Some(input), ) } @@ -97,15 +74,5 @@ pub fn ticket_body_sign_data(ticket_body: &TicketBody, ticket_id_input: VrfInput /// Pre-output should have been obtained from the input directly using the vrf /// secret key or from the vrf signature pre-outputs. pub fn make_ticket_id(input: &VrfInput, pre_output: &VrfPreOutput) -> TicketId { - let bytes = pre_output.make_bytes::<16>(b"ticket-id", input); - u128::from_le_bytes(bytes) -} - -/// Make revealed key seed from a given VRF input and pre-output. -/// -/// Input should have been obtained via [`revealed_key_input`]. -/// Pre-output should have been obtained from the input directly using the vrf -/// secret key or from the vrf signature pre-outputs. -pub fn make_revealed_key_seed(input: &VrfInput, pre_output: &VrfPreOutput) -> [u8; 32] { - pre_output.make_bytes::<32>(b"revealed-seed", input) + TicketId(pre_output.make_bytes::<32>(b"ticket-id", input)) } diff --git a/substrate/primitives/core/src/bandersnatch.rs b/substrate/primitives/core/src/bandersnatch.rs index 25bf4657030f..3001a1a55541 100644 --- a/substrate/primitives/core/src/bandersnatch.rs +++ b/substrate/primitives/core/src/bandersnatch.rs @@ -204,7 +204,7 @@ pub mod vrf { /// This object is used to produce an arbitrary number of verifiable pseudo random /// bytes and is often called pre-output to emphasize that this is not the actual /// output of the VRF but an object capable of generating the output. - #[derive(Clone, Debug, PartialEq, Eq)] + #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct VrfPreOutput(pub(super) bandersnatch_vrfs::VrfPreOut); impl Encode for VrfPreOutput { @@ -227,8 +227,6 @@ pub mod vrf { } } - impl EncodeLike for VrfPreOutput {} - impl MaxEncodedLen for VrfPreOutput { fn max_encoded_len() -> usize { <[u8; PREOUT_SERIALIZED_SIZE]>::max_encoded_len() @@ -503,20 +501,20 @@ pub mod ring_vrf { pub(crate) const RING_SIGNATURE_SERIALIZED_SIZE: usize = 755; /// remove as soon as soon as serialization is implemented by the backend - pub struct RingVerifierData { + pub struct RingVerifierKey { /// Domain size. pub domain_size: u32, /// Verifier key. pub verifier_key: VerifierKey, } - impl From for RingVerifier { - fn from(vd: RingVerifierData) -> RingVerifier { + impl From for RingVerifier { + fn from(vd: RingVerifierKey) -> RingVerifier { bandersnatch_vrfs::ring::make_ring_verifier(vd.verifier_key, vd.domain_size as usize) } } - impl Encode for RingVerifierData { + impl Encode for RingVerifierKey { fn encode(&self) -> Vec { const ERR_STR: &str = "serialization length is constant and checked by test; qed"; let mut buf = [0; RING_VERIFIER_DATA_SERIALIZED_SIZE]; @@ -526,7 +524,7 @@ pub mod ring_vrf { } } - impl Decode for RingVerifierData { + impl Decode for RingVerifierKey { fn decode(i: &mut R) -> Result { const ERR_STR: &str = "serialization length is constant and checked by test; qed"; let buf = <[u8; RING_VERIFIER_DATA_SERIALIZED_SIZE]>::decode(i)?; @@ -535,19 +533,19 @@ pub mod ring_vrf { .expect(ERR_STR); let verifier_key = ::deserialize_compressed_unchecked(&mut &buf[4..]).expect(ERR_STR); - Ok(RingVerifierData { domain_size, verifier_key }) + Ok(RingVerifierKey { domain_size, verifier_key }) } } - impl EncodeLike for RingVerifierData {} + impl EncodeLike for RingVerifierKey {} - impl MaxEncodedLen for RingVerifierData { + impl MaxEncodedLen for RingVerifierKey { fn max_encoded_len() -> usize { <[u8; RING_VERIFIER_DATA_SERIALIZED_SIZE]>::max_encoded_len() } } - impl TypeInfo for RingVerifierData { + impl TypeInfo for RingVerifierKey { type Identity = [u8; RING_VERIFIER_DATA_SERIALIZED_SIZE]; fn type_info() -> scale_info::Type { @@ -601,13 +599,13 @@ pub mod ring_vrf { } /// Information required for a lazy construction of a ring verifier. - pub fn verifier_data(&self, public_keys: &[Public]) -> Option { + pub fn verifier_key(&self, public_keys: &[Public]) -> Option { let mut pks = Vec::with_capacity(public_keys.len()); for public_key in public_keys { let pk = PublicKey::deserialize_compressed_unchecked(public_key.as_slice()).ok()?; pks.push(pk.0.into()); } - Some(RingVerifierData { + Some(RingVerifierKey { verifier_key: self.0.verifier_key(pks), domain_size: self.0.domain_size, }) @@ -1070,19 +1068,19 @@ mod tests { } #[test] - fn encode_decode_verifier_data() { + fn encode_decode_verifier_key() { let ring_ctx = TestRingContext::new_testing(); let pks: Vec<_> = (0..16).map(|i| Pair::from_seed(&[i as u8; 32]).public()).collect(); assert!(pks.len() <= ring_ctx.max_keyset_size()); - let verifier_data = ring_ctx.verifier_data(&pks).unwrap(); - let enc1 = verifier_data.encode(); + let verifier_key = ring_ctx.verifier_key(&pks).unwrap(); + let enc1 = verifier_key.encode(); assert_eq!(enc1.len(), RING_VERIFIER_DATA_SERIALIZED_SIZE); - assert_eq!(RingVerifierData::max_encoded_len(), RING_VERIFIER_DATA_SERIALIZED_SIZE); + assert_eq!(RingVerifierKey::max_encoded_len(), RING_VERIFIER_DATA_SERIALIZED_SIZE); - let vd2 = RingVerifierData::decode(&mut enc1.as_slice()).unwrap(); + let vd2 = RingVerifierKey::decode(&mut enc1.as_slice()).unwrap(); let enc2 = vd2.encode(); assert_eq!(enc1, enc2);