From 85b3c2e1b9959f11b598c7f191c8526e13963195 Mon Sep 17 00:00:00 2001 From: Aramik Date: Thu, 9 Nov 2023 16:17:06 -0800 Subject: [PATCH 1/9] Messages v2 included migration (#1738) # Goal The goal of this PR is to propose and implement messages v2 compatible with PoV Closes #198 # Discussion - Refactored Messages to minimize used PoV - Added storage migration (single block) # Migration Details - Based on data used in rococo and main-net and calculations we don't need to do a multi-block migration. (only around 15%) of the block is being used. - Was not able to test with upgrading on local due to getting errors when running relay nodes - Was able to successfully run try-run-time cli tool against rococo # Checklist - [x] Chain spec updated - [x] Design doc(s) updated - [x] Tests added - [x] Benchmarks added - [x] Weights updated --- Cargo.lock | 1 + designdocs/message_storage_v2.md | 47 +++++ e2e/capacity/transactions.test.ts | 4 +- e2e/messages/addIPFSMessage.test.ts | 8 +- e2e/package-lock.json | 2 +- e2e/scaffolding/extrinsicHelpers.ts | 4 +- node/cli/Cargo.toml | 2 + node/cli/src/command.rs | 32 +-- pallets/messages/src/benchmarking.rs | 25 +-- pallets/messages/src/lib.rs | 112 +++++----- pallets/messages/src/migration/mod.rs | 2 + pallets/messages/src/migration/v2.rs | 143 +++++++++++++ pallets/messages/src/tests/mock.rs | 3 +- pallets/messages/src/tests/other_tests.rs | 237 +++++++++++----------- pallets/messages/src/types.rs | 11 +- runtime/common/src/constants.rs | 3 - runtime/frequency/src/lib.rs | 13 +- 17 files changed, 407 insertions(+), 242 deletions(-) create mode 100644 designdocs/message_storage_v2.md create mode 100644 pallets/messages/src/migration/mod.rs create mode 100644 pallets/messages/src/migration/v2.rs diff --git a/Cargo.lock b/Cargo.lock index efc307189b..5b97f3b262 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3715,6 +3715,7 @@ dependencies = [ "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "derive_more", + "frame-benchmarking", "frame-benchmarking-cli", "frame-support", "frame-system", diff --git a/designdocs/message_storage_v2.md b/designdocs/message_storage_v2.md new file mode 100644 index 0000000000..bb2b8c3ec5 --- /dev/null +++ b/designdocs/message_storage_v2.md @@ -0,0 +1,47 @@ +# On Chain Message Storage + +## Context and Scope +The proposed feature consists of changes that is going to be one (or more) pallet(s) in runtime of a +Substrate based blockchain, and it will be used in all environments including production. + +## Problem Statement +After introduction of **Proof of Validity** or **PoV** in runtime weights, all pallets should be +re-evaluated and refactored if necessary to minimize the usage of **PoV**. This is to ensure all +important operations are scalable. +This document tries to propose some changes on **Messages** pallet to optimize the **PoV** size. + +## Goals +- Minimizing Weights including **execution times** and **PoV** size. + +## Proposal +Storing messages on chain using **BlockNumber** and **SchemaId** and **MessageIndex** as main and secondary + and tertiary keys using [StorageNMap](https://paritytech.github.io/substrate/master/frame_support/storage/trait.StorageNMap.html) data structure provided in Substrate. + +### Main Storage types +- **MessagesV2** + - _Type_: `StorageNMap<(BlockNumber, SchemaId, MessageIndex), Message>` + - _Purpose_: Main structure To store all messages for a certain block number and schema id and + index + + +### On Chain Structure +Following is a proposed data structure for storing a Message on chain. +```rust +/// only `index` is removed from old structure +pub struct Message { + pub payload: Vec, // Serialized data in a user-defined schemas format + pub provider_key: AccountId, // Signature of the signer + pub msa_id: u64, // Message source account id (the original source of the message) +} +``` +## Description + +The idea is to use existing **whitelisted** storage with `BlockMessageIndex` type to store and get +the index of each message to be able to use it as our third key for `StorageNMap`. + +We would store each message separately into `StorageNMap` with following keys +- primary key would be `block_number` +- secondary key would be `schema_id` +- tertiary key would be the `index` of the message for current block which starts from 0 + + diff --git a/e2e/capacity/transactions.test.ts b/e2e/capacity/transactions.test.ts index 1d25e85780..9fd536f6e6 100644 --- a/e2e/capacity/transactions.test.ts +++ b/e2e/capacity/transactions.test.ts @@ -218,7 +218,7 @@ describe('Capacity Transactions', function () { const { eventMap } = await call.payWithCapacity(); assertEvent(eventMap, 'capacity.CapacityWithdrawn'); - assertEvent(eventMap, 'messages.MessagesStored'); + assertEvent(eventMap, 'messages.MessagesInBlock'); }); it('successfully pays with Capacity for eligible transaction - addOnchainMessage', async function () { @@ -227,7 +227,7 @@ describe('Capacity Transactions', function () { const call = ExtrinsicHelper.addOnChainMessage(capacityKeys, dummySchemaId, '0xdeadbeef'); const { eventMap } = await call.payWithCapacity(); assertEvent(eventMap, 'capacity.CapacityWithdrawn'); - assertEvent(eventMap, 'messages.MessagesStored'); + assertEvent(eventMap, 'messages.MessagesInBlock'); const get = await ExtrinsicHelper.apiPromise.rpc.messages.getBySchemaId(dummySchemaId, { from_block: starting_block, from_index: 0, diff --git a/e2e/messages/addIPFSMessage.test.ts b/e2e/messages/addIPFSMessage.test.ts index 02ebf64482..f88d028cec 100644 --- a/e2e/messages/addIPFSMessage.test.ts +++ b/e2e/messages/addIPFSMessage.test.ts @@ -107,9 +107,7 @@ describe('Add Offchain Message', function () { const f = ExtrinsicHelper.addIPFSMessage(keys, schemaId, ipfs_cid_64, ipfs_payload_len); const { target: event } = await f.fundAndSend(fundingSource); - assert.notEqual(event, undefined, 'should have returned a MessagesStored event'); - assert.deepEqual(event?.data.schemaId, schemaId, 'schema ids should be equal'); - assert.notEqual(event?.data.blockNumber, undefined, 'should have a block number'); + assert.notEqual(event, undefined, 'should have returned a MessagesInBlock event'); }); it('should successfully retrieve added message and returned CID should have Base32 encoding', async function () { @@ -130,9 +128,7 @@ describe('Add Offchain Message', function () { const f = ExtrinsicHelper.addOnChainMessage(keys, dummySchemaId, '0xdeadbeef'); const { target: event } = await f.fundAndSend(fundingSource); - assert.notEqual(event, undefined, 'should have returned a MessagesStored event'); - assert.deepEqual(event?.data.schemaId, dummySchemaId, 'schema ids should be equal'); - assert.notEqual(event?.data.blockNumber, undefined, 'should have a block number'); + assert.notEqual(event, undefined, 'should have returned a MessagesInBlock event'); const get = await ExtrinsicHelper.apiPromise.rpc.messages.getBySchemaId(dummySchemaId, { from_block: starting_block, diff --git a/e2e/package-lock.json b/e2e/package-lock.json index 6c3f0f1131..f30e18810d 100644 --- a/e2e/package-lock.json +++ b/e2e/package-lock.json @@ -260,7 +260,7 @@ "node_modules/@frequency-chain/api-augment": { "version": "0.0.0", "resolved": "file:../js/api-augment/dist/frequency-chain-api-augment-0.0.0.tgz", - "integrity": "sha512-wcyYIFMu8I2RiqEs664Acp+IdltgIXSi/5VL7WB64YYt3b9krI6CkXst757sBd0aDuQhIjHX35UOaaOyNdZMvw==", + "integrity": "sha512-y5oeksTwmIpVJgZCWj7D+yVoN4TZggsMA5Gv9YmV5DCgCdpXiF/JQ/DcfEs4JUYIlB/P/ccLJkj4x+TJCYhPoA==", "license": "Apache-2.0", "dependencies": { "@polkadot/api": "^10.9.1", diff --git a/e2e/scaffolding/extrinsicHelpers.ts b/e2e/scaffolding/extrinsicHelpers.ts index c794d3eb6b..3ab94997b1 100644 --- a/e2e/scaffolding/extrinsicHelpers.ts +++ b/e2e/scaffolding/extrinsicHelpers.ts @@ -483,7 +483,7 @@ export class ExtrinsicHelper { return new Extrinsic( () => ExtrinsicHelper.api.tx.messages.addIpfsMessage(schemaId, cid, payload_length), keys, - ExtrinsicHelper.api.events.messages.MessagesStored + ExtrinsicHelper.api.events.messages.MessagesInBlock ); } @@ -668,7 +668,7 @@ export class ExtrinsicHelper { return new Extrinsic( () => ExtrinsicHelper.api.tx.messages.addOnchainMessage(null, schemaId, payload), keys, - ExtrinsicHelper.api.events.messages.MessagesStored + ExtrinsicHelper.api.events.messages.MessagesInBlock ); } diff --git a/node/cli/Cargo.toml b/node/cli/Cargo.toml index cfe2e5b224..4e111f3857 100644 --- a/node/cli/Cargo.toml +++ b/node/cli/Cargo.toml @@ -29,6 +29,7 @@ cli-opt = { default-features = false, path = "../cli-opt" } # Substrate frame-benchmarking-cli = { git = "https://github.com/paritytech/polkadot-sdk", optional = true, branch = "release-polkadot-v1.1.0" } +frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", optional = true, branch = "release-polkadot-v1.1.0" } frame-support = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } frame-system = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } @@ -70,6 +71,7 @@ cli = [ "sc-cli", "sc-service", "frame-benchmarking-cli", + "frame-benchmarking", "try-runtime-cli" ] default = ["std", "cli"] diff --git a/node/cli/src/command.rs b/node/cli/src/command.rs index 3912022cc7..22536f0e0e 100644 --- a/node/cli/src/command.rs +++ b/node/cli/src/command.rs @@ -371,22 +371,24 @@ pub fn run() -> Result<()> { #[cfg(feature = "try-runtime")] Some(Subcommand::TryRuntime(cmd)) => { - use sc_executor::{sp_wasm_interface::ExtendedHostFunctions, NativeExecutionDispatch}; + use common_runtime::constants::MILLISECS_PER_BLOCK; + use try_runtime_cli::block_building_info::timestamp_with_aura_info; + let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - // we don't need any of the components of new_partial, just a runtime, or a task - // manager to do `async_run`. - let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); - let task_manager = - sc_service::TaskManager::new(config.tokio_handle.clone(), registry) - .map_err(|e| sc_cli::Error::Service(sc_service::Error::Prometheus(e)))?; - Ok(( - cmd.run::::ExtendHostFunctions, - >>(), - task_manager, - )) + + type HostFunctions = + (sp_io::SubstrateHostFunctions, frame_benchmarking::benchmarking::HostFunctions); + + // grab the task manager. + let registry = &runner.config().prometheus_config.as_ref().map(|cfg| &cfg.registry); + let task_manager = + sc_service::TaskManager::new(runner.config().tokio_handle.clone(), *registry) + .map_err(|e| format!("Error: {:?}", e))?; + + let info_provider = timestamp_with_aura_info(MILLISECS_PER_BLOCK); + + runner.async_run(|_| { + Ok((cmd.run::(Some(info_provider)), task_manager)) }) }, Some(Subcommand::ExportRuntimeVersion(cmd)) => { diff --git a/pallets/messages/src/benchmarking.rs b/pallets/messages/src/benchmarking.rs index 44ef4466a1..632bad471c 100644 --- a/pallets/messages/src/benchmarking.rs +++ b/pallets/messages/src/benchmarking.rs @@ -15,6 +15,7 @@ use sp_runtime::traits::One; const IPFS_SCHEMA_ID: u16 = 50; const IPFS_PAYLOAD_LENGTH: u32 = 10; +const MAX_MESSAGES_IN_BLOCK: u32 = 500; fn onchain_message(schema_id: SchemaId) -> DispatchResult { let message_source_id = DelegatorId(1); @@ -62,8 +63,6 @@ fn create_schema(location: PayloadLocation) -> DispatchResult { } benchmarks! { - // this is temporary to avoid massive PoV sizes which will break the chain until rework on messages - #[pov_mode = Measured] add_onchain_message { let n in 0 .. T::MessagesMaxPayloadSizeBytes::get() - 1; let message_source_id = DelegatorId(2); @@ -78,21 +77,17 @@ benchmarks! { assert_ok!(T::MsaBenchmarkHelper::set_delegation_relationship(ProviderId(1), message_source_id.into(), [schema_id].to_vec())); let payload = vec![1; n as usize]; - let average_messages_per_block: u32 = T::MaxMessagesPerBlock::get() / 2; - for j in 1 .. average_messages_per_block { + for j in 1 .. MAX_MESSAGES_IN_BLOCK { assert_ok!(onchain_message::(schema_id)); } }: _ (RawOrigin::Signed(caller), Some(message_source_id.into()), schema_id, payload) verify { - assert_eq!( - MessagesPallet::::get_messages( - BlockNumberFor::::one(), schema_id).len(), - average_messages_per_block as usize + assert_eq!(MessagesPallet::::get_messages_by_schema_and_block( + schema_id, PayloadLocation::OnChain, BlockNumberFor::::one()).len(), + MAX_MESSAGES_IN_BLOCK as usize ); } - // this is temporary to avoid massive PoV sizes which will break the chain until rework on messages - #[pov_mode = Measured] add_ipfs_message { let caller: T::AccountId = whitelisted_caller(); let cid = "bafkreidgvpkjawlxz6sffxzwgooowe5yt7i6wsyg236mfoks77nywkptdq".as_bytes().to_vec(); @@ -102,16 +97,14 @@ benchmarks! { assert_ok!(create_schema::(PayloadLocation::IPFS)); } assert_ok!(T::MsaBenchmarkHelper::add_key(ProviderId(1).into(), caller.clone())); - let average_messages_per_block: u32 = T::MaxMessagesPerBlock::get() / 2; - for j in 1 .. average_messages_per_block { + for j in 1 .. MAX_MESSAGES_IN_BLOCK { assert_ok!(ipfs_message::(IPFS_SCHEMA_ID)); } }: _ (RawOrigin::Signed(caller),IPFS_SCHEMA_ID, cid, IPFS_PAYLOAD_LENGTH) verify { - assert_eq!( - MessagesPallet::::get_messages( - BlockNumberFor::::one(), IPFS_SCHEMA_ID).len(), - average_messages_per_block as usize + assert_eq!(MessagesPallet::::get_messages_by_schema_and_block( + IPFS_SCHEMA_ID, PayloadLocation::IPFS, BlockNumberFor::::one()).len(), + MAX_MESSAGES_IN_BLOCK as usize ); } diff --git a/pallets/messages/src/lib.rs b/pallets/messages/src/lib.rs index e89010827f..e87677cc8b 100644 --- a/pallets/messages/src/lib.rs +++ b/pallets/messages/src/lib.rs @@ -46,6 +46,8 @@ #[cfg(feature = "runtime-benchmarks")] mod benchmarking; +/// migration module +pub mod migration; #[cfg(test)] mod tests; @@ -77,11 +79,16 @@ pub use weights::*; use cid::Cid; use frame_system::pallet_prelude::*; +const LOG_TARGET: &str = "runtime::messages"; + #[frame_support::pallet] pub mod pallet { use super::*; use frame_support::pallet_prelude::*; + /// The current storage version. + pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); + #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. @@ -99,10 +106,6 @@ pub mod pallet { /// A type that will supply schema related information. type SchemaProvider: SchemaProvider; - /// The maximum number of messages in a block. - #[pallet::constant] - type MaxMessagesPerBlock: Get; - /// The maximum size of a message payload bytes. #[pallet::constant] type MessagesMaxPayloadSizeBytes: Get + Clone + Debug + MaxEncodedLen; @@ -117,33 +120,32 @@ pub mod pallet { } #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); - /// A permanent storage for messages mapped by block number and schema id. - /// - Keys: BlockNumber, Schema Id - /// - Value: List of Messages - #[pallet::storage] - #[pallet::getter(fn get_messages)] - pub(super) type Messages = StorageDoubleMap< - _, - Twox64Concat, - BlockNumberFor, - Twox64Concat, - SchemaId, - BoundedVec, T::MaxMessagesPerBlock>, - ValueQuery, - >; - /// A temporary storage for getting the index for messages /// At the start of the next block this storage is set to 0 #[pallet::storage] #[pallet::whitelist_storage] #[pallet::getter(fn get_message_index)] - pub(super) type BlockMessageIndex = StorageValue<_, u16, ValueQuery>; + pub(super) type BlockMessageIndex = StorageValue<_, MessageIndex, ValueQuery>; + + #[pallet::storage] + #[pallet::getter(fn get_messages_v2)] + pub(super) type MessagesV2 = StorageNMap< + _, + ( + storage::Key>, + storage::Key, + storage::Key, + ), + Message, + OptionQuery, + >; #[pallet::error] pub enum Error { - /// Too many messages are added to existing block + /// Deprecated: Too many messages are added to existing block TooManyMessagesInBlock, /// Message payload size is too large @@ -174,6 +176,7 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { + /// Deprecated: please use [`Event::MessagesInBlock`] /// Messages are stored for a specified schema id and block number MessagesStored { /// The schema for these messages @@ -181,6 +184,8 @@ pub mod pallet { /// The block number for these messages block_number: BlockNumberFor, }, + /// Messages stored in the current block + MessagesInBlock, } #[pallet::hooks] @@ -202,14 +207,13 @@ pub mod pallet { /// The actual message content will be on IPFS. /// /// # Events - /// * [`Event::MessagesStored`] - In the next block + /// * [`Event::MessagesInBlock`] - Messages Stored in the block /// /// # Errors /// * [`Error::ExceedsMaxMessagePayloadSizeBytes`] - Payload is too large /// * [`Error::InvalidSchemaId`] - Schema not found /// * [`Error::InvalidPayloadLocation`] - The schema is not an IPFS payload location /// * [`Error::InvalidMessageSourceAccount`] - Origin must be from an MSA - /// * [`Error::TooManyMessagesInBlock`] - Block is full of messages already /// * [`Error::TypeConversionOverflow`] - Failed to add the message to storage as it is very full /// * [`Error::UnsupportedCidVersion`] - CID version is not supported (V0) /// * [`Error::InvalidCid`] - Unable to parse provided CID @@ -245,10 +249,7 @@ pub mod pallet { schema_id, current_block, )? { - Self::deposit_event(Event::MessagesStored { - schema_id, - block_number: current_block, - }); + Self::deposit_event(Event::MessagesInBlock); } Ok(()) } else { @@ -259,7 +260,7 @@ pub mod pallet { /// Add an on-chain message for a given schema id. /// /// # Events - /// * [`Event::MessagesStored`] - In the next block + /// * [`Event::MessagesInBlock`] - In the next block /// /// # Errors /// * [`Error::ExceedsMaxMessagePayloadSizeBytes`] - Payload is too large @@ -267,7 +268,6 @@ pub mod pallet { /// * [`Error::InvalidPayloadLocation`] - The schema is not an IPFS payload location /// * [`Error::InvalidMessageSourceAccount`] - Origin must be from an MSA /// * [`Error::UnAuthorizedDelegate`] - Trying to add a message without a proper delegation between the origin and the on_behalf_of MSA - /// * [`Error::TooManyMessagesInBlock`] - Block is full of messages already /// * [`Error::TypeConversionOverflow`] - Failed to add the message to storage as it is very full /// #[pallet::call_index(1)] @@ -316,10 +316,7 @@ pub mod pallet { schema_id, current_block, )? { - Self::deposit_event(Event::MessagesStored { - schema_id, - block_number: current_block, - }); + Self::deposit_event(Event::MessagesInBlock); } Ok(()) @@ -334,7 +331,6 @@ impl Pallet { /// Stores a message for a given schema id. /// returns true if it needs to emit an event /// # Errors - /// * [`Error::TooManyMessagesInBlock`] /// * [`Error::TypeConversionOverflow`] /// pub fn add_message( @@ -344,28 +340,17 @@ impl Pallet { schema_id: SchemaId, current_block: BlockNumberFor, ) -> Result { - >::try_mutate( - current_block, - schema_id, - |existing_messages| -> Result { - // first message for any schema_id is going to trigger an event - let need_event = existing_messages.len() == 0; - let index = BlockMessageIndex::::get(); - let msg = Message { - payload, // size is checked on top of extrinsic - provider_msa_id, - msa_id, - index, - }; - - existing_messages - .try_push(msg) - .map_err(|_| Error::::TooManyMessagesInBlock)?; - - BlockMessageIndex::::put(index.saturating_add(1)); - Ok(need_event) - }, - ) + let index = BlockMessageIndex::::get(); + let first = index == 0; + let msg = Message { + payload, // size is checked on top of extrinsic + provider_msa_id, + msa_id, + }; + + >::insert((current_block, schema_id, index), msg); + BlockMessageIndex::::set(index.saturating_add(1)); + Ok(first) } /// Resolve an MSA from an account key(key) @@ -394,12 +379,15 @@ impl Pallet { match schema_payload_location { PayloadLocation::Itemized | PayloadLocation::Paginated => return Vec::new(), - _ => - return >::get(block_number, schema_id) - .into_inner() - .iter() - .map(|msg| msg.map_to_response(block_number_value, schema_payload_location)) - .collect(), + _ => { + let mut messages: Vec<_> = >::iter_prefix((block_number, schema_id)) + .map(|(index, msg)| { + msg.map_to_response(block_number_value, schema_payload_location, index) + }) + .collect(); + messages.sort_by(|a, b| a.index.cmp(&b.index)); + return messages + }, } } diff --git a/pallets/messages/src/migration/mod.rs b/pallets/messages/src/migration/mod.rs new file mode 100644 index 0000000000..c34354a101 --- /dev/null +++ b/pallets/messages/src/migration/mod.rs @@ -0,0 +1,2 @@ +/// migrations to v2 +pub mod v2; diff --git a/pallets/messages/src/migration/v2.rs b/pallets/messages/src/migration/v2.rs new file mode 100644 index 0000000000..ff33b5f418 --- /dev/null +++ b/pallets/messages/src/migration/v2.rs @@ -0,0 +1,143 @@ +use crate::{BlockNumberFor, Config, Message, MessagesV2, Pallet, SchemaId, LOG_TARGET}; +use frame_support::{pallet_prelude::*, storage_alias, traits::OnRuntimeUpgrade, weights::Weight}; +use log; +use sp_runtime::Saturating; + +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; +#[cfg(feature = "try-runtime")] +use sp_std::vec::Vec; + +/// old structures and storages +pub mod old { + use super::*; + use common_primitives::msa::MessageSourceId; + use sp_std::fmt::Debug; + + /// old message structure that was stored + #[derive(Default, Encode, Decode, PartialEq, Debug, TypeInfo, Eq, MaxEncodedLen)] + #[scale_info(skip_type_params(MaxDataSize))] + #[codec(mel_bound(MaxDataSize: MaxEncodedLen))] + pub struct OldMessage + where + MaxDataSize: Get + Debug, + { + /// Data structured by the associated schema's model. + pub payload: BoundedVec, + /// Message source account id of the Provider. This may be the same id as contained in `msa_id`, + /// indicating that the original source MSA is acting as its own provider. An id differing from that + /// of `msa_id` indicates that `provider_msa_id` was delegated by `msa_id` to send this message on + /// its behalf. + pub provider_msa_id: MessageSourceId, + /// Message source account id (the original source). + pub msa_id: Option, + /// Stores index of message in block to keep total order. + pub index: u16, + } + + /// old permanent storage for messages mapped by block number and schema id. + #[storage_alias] + pub(crate) type Messages = StorageDoubleMap< + Pallet, + Twox64Concat, + BlockNumberFor, + Twox64Concat, + SchemaId, + BoundedVec< + OldMessage<::MessagesMaxPayloadSizeBytes>, + ConstU32<200>, + >, + ValueQuery, + >; +} +/// migration to v2 implementation +pub struct MigrateToV2(PhantomData); + +impl OnRuntimeUpgrade for MigrateToV2 { + fn on_runtime_upgrade() -> Weight { + migrate_to_v2::() + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, TryRuntimeError> { + use frame_support::storage::generator::StorageDoubleMap; + log::info!(target: LOG_TARGET, "Running pre_upgrade..."); + + let pallet_prefix = old::Messages::::module_prefix(); + let storage_prefix = old::Messages::::storage_prefix(); + assert_eq!(&b"Messages"[..], pallet_prefix); + assert_eq!(&b"Messages"[..], storage_prefix); + + let mut count = 0u32; + for (_, _, messages) in old::Messages::::iter() { + count += messages.len() as u32; + } + log::info!(target: LOG_TARGET, "Finish pre_upgrade for {:?}", count); + Ok(count.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), TryRuntimeError> { + log::info!(target: LOG_TARGET, "Running post_upgrade..."); + + let old_count: u32 = Decode::decode(&mut state.as_slice()) + .expect("the state parameter should be something that was generated by pre_upgrade"); + + let count = old::Messages::::iter().count(); + let moved_count = MessagesV2::::iter().count(); + + log::info!(target: LOG_TARGET, "Finish post_upgrade for {:?}", moved_count); + let onchain_version = Pallet::::on_chain_storage_version(); + + assert_eq!(count, 0usize); + assert_eq!(moved_count, old_count as usize); + assert_eq!(onchain_version, crate::pallet::STORAGE_VERSION); + Ok(()) + } +} +/// migrating to v2 +pub fn migrate_to_v2() -> Weight { + log::info!(target: LOG_TARGET, "Running storage migration..."); + let onchain_version = Pallet::::on_chain_storage_version(); + let current_version = Pallet::::current_storage_version(); + log::info!(target: LOG_TARGET, "onchain_version= {:?}, current_version={:?}", onchain_version, current_version); + + if onchain_version < 2 { + let mut reads = 1u64; + let mut writes = 0u64; + let mut bytes = 0u64; + for (block_number, schema_id, messages) in old::Messages::::drain() { + bytes = bytes.saturating_add(messages.encode().len() as u64); + + for message in &messages { + let new_msg = Message { + provider_msa_id: message.provider_msa_id, + msa_id: message.msa_id, + payload: message.payload.clone(), + }; + bytes = bytes.saturating_add(new_msg.encode().len() as u64); + MessagesV2::::insert((block_number, schema_id, message.index), new_msg); + } + + reads.saturating_inc(); + writes = writes.saturating_add(messages.len() as u64 + 1); + } + + // Set storage version to `2`. + StorageVersion::new(2).put::>(); + writes.saturating_inc(); + + log::info!(target: LOG_TARGET, "Storage migrated to version 2 read={:?}, write={:?}, bytes={:?}", reads, writes, bytes); + let weights = T::DbWeight::get().reads_writes(reads, writes).add_proof_size(bytes); + log::info!(target: LOG_TARGET, "Migration Calculated weights={:?}",weights); + weights + } else { + log::info!( + target: LOG_TARGET, + "Migration did not execute. This probably should be removed onchain:{:?}, current:{:?}", + onchain_version, + current_version + ); + T::DbWeight::get().reads(1) + } +} diff --git a/pallets/messages/src/tests/mock.rs b/pallets/messages/src/tests/mock.rs index afcafdbde5..9842e59016 100644 --- a/pallets/messages/src/tests/mock.rs +++ b/pallets/messages/src/tests/mock.rs @@ -67,7 +67,6 @@ impl system::Config for Test { type MaxConsumers = ConstU32<16>; } -pub type MaxMessagesPerBlock = ConstU32<500>; pub type MaxSchemaGrantsPerDelegation = ConstU32<30>; // Needs parameter_types! for the impls below @@ -79,6 +78,7 @@ parameter_types! { // Take care when adding new tests for on-chain (not IPFS) messages that the payload // is not too big. pub const MessagesMaxPayloadSizeBytes: u32 = 73; + } impl std::fmt::Debug for MessagesMaxPayloadSizeBytes { @@ -221,7 +221,6 @@ impl pallet_messages::Config for Test { type SchemaGrantValidator = SchemaGrantValidationHandler; type SchemaProvider = SchemaHandler; type WeightInfo = (); - type MaxMessagesPerBlock = MaxMessagesPerBlock; type MessagesMaxPayloadSizeBytes = MessagesMaxPayloadSizeBytes; /// A set of helper functions for benchmarking. diff --git a/pallets/messages/src/tests/other_tests.rs b/pallets/messages/src/tests/other_tests.rs index 9546f2fb28..8ccc31dde2 100644 --- a/pallets/messages/src/tests/other_tests.rs +++ b/pallets/messages/src/tests/other_tests.rs @@ -1,16 +1,23 @@ use crate::{ - tests::mock::*, BlockMessageIndex, Config, Error, Event as MessageEvent, Message, Messages, + migration::{v2, v2::old::OldMessage}, + tests::mock::*, + BlockMessageIndex, Error, Event as MessageEvent, Message, MessagesV2, }; use codec::Encode; use common_primitives::{messages::MessageResponse, schema::*}; -use frame_support::{assert_err, assert_noop, assert_ok, traits::OnInitialize, BoundedVec}; +use frame_support::{ + assert_err, assert_noop, assert_ok, + pallet_prelude::{GetStorageVersion, StorageVersion}, + traits::OnInitialize, + BoundedVec, +}; use frame_system::{EventRecord, Phase}; use multibase::Base; #[allow(unused_imports)] use pretty_assertions::{assert_eq, assert_ne, assert_str_eq}; use rand::Rng; use serde::Serialize; -use sp_core::{ConstU32, Get}; +use sp_core::ConstU32; use sp_std::vec::Vec; #[derive(Serialize)] @@ -54,18 +61,17 @@ fn populate_messages( let mut counter = 0; for (idx, count) in message_per_block.iter().enumerate() { - let mut list = BoundedVec::default(); for _ in 0..*count { - list.try_push(Message { - msa_id: Some(10), - payload: payload.clone().try_into().unwrap(), - index: counter, - provider_msa_id: 1, - }) - .unwrap(); + MessagesV2::::set( + (idx as u32, schema_id, counter), + Some(Message { + msa_id: Some(10), + payload: payload.clone().try_into().unwrap(), + provider_msa_id: 1, + }), + ); counter += 1; } - Messages::::insert(idx as u32, schema_id, list); } } @@ -127,61 +133,46 @@ fn add_message_should_store_message_in_storage() { )); // assert messages - let list1 = Messages::::get(1, schema_id_1).into_inner(); - let list2 = Messages::::get(1, schema_id_2).into_inner(); - assert_eq!(list1.len(), 1); - assert_eq!(list2.len(), 2); + let msg1 = MessagesV2::::get((1, schema_id_1, 0u16)); + let msg2 = MessagesV2::::get((1, schema_id_2, 1u16)); + let msg3 = MessagesV2::::get((1, schema_id_2, 2u16)); assert_eq!( - list1[0], - Message { + msg1, + Some(Message { msa_id: Some(get_msa_from_account(caller_1)), payload: message_payload_1.try_into().unwrap(), - index: 0, provider_msa_id: get_msa_from_account(caller_1) - } + }) ); assert_eq!( - list2, - vec![ - Message { - msa_id: Some(get_msa_from_account(caller_2)), - payload: message_payload_2.try_into().unwrap(), - index: 1, - provider_msa_id: get_msa_from_account(caller_2) - }, - Message { - msa_id: Some(get_msa_from_account(caller_2)), - payload: message_payload_3.try_into().unwrap(), - index: 2, - provider_msa_id: get_msa_from_account(caller_2) - }, - ] + msg2, + Some(Message { + msa_id: Some(get_msa_from_account(caller_2)), + payload: message_payload_2.try_into().unwrap(), + provider_msa_id: get_msa_from_account(caller_2) + }) + ); + + assert_eq!( + msg3, + Some(Message { + msa_id: Some(get_msa_from_account(caller_2)), + payload: message_payload_3.try_into().unwrap(), + provider_msa_id: get_msa_from_account(caller_2) + }) ); // assert events - let events_occured = System::events(); + let events_occurred = System::events(); assert_eq!( - events_occured, - vec![ - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::MessagesPallet(MessageEvent::MessagesStored { - block_number: 1, - schema_id: schema_id_1, - }), - topics: vec![] - }, - EventRecord { - phase: Phase::Initialization, - event: RuntimeEvent::MessagesPallet(MessageEvent::MessagesStored { - block_number: 1, - schema_id: schema_id_2, - }), - topics: vec![] - }, - ] + events_occurred, + vec![EventRecord { + phase: Phase::Initialization, + event: RuntimeEvent::MessagesPallet(MessageEvent::MessagesInBlock), + topics: vec![] + },] ); }); } @@ -248,63 +239,6 @@ fn add_ipfs_message_with_invalid_msa_account_errors() { }); } -#[test] -fn add_message_with_maxed_out_storage_errors() { - new_test_ext().execute_with(|| { - // arrange - let caller_1 = 5; - let schema_id_1: SchemaId = 1; - let message_payload_1 = generate_payload(1, None); - - // act - for _ in 0..::MaxMessagesPerBlock::get() { - assert_ok!(MessagesPallet::add_onchain_message( - RuntimeOrigin::signed(caller_1), - None, - schema_id_1, - message_payload_1.clone() - )); - } - assert_noop!( - MessagesPallet::add_onchain_message( - RuntimeOrigin::signed(caller_1), - None, - schema_id_1, - message_payload_1 - ), - Error::::TooManyMessagesInBlock - ); - }); -} - -#[test] -fn add_ipfs_message_with_maxed_out_storage_errors() { - new_test_ext().execute_with(|| { - // arrange - let caller_1 = 5; - let schema_id_1: SchemaId = IPFS_SCHEMA_ID; - - // act - for _ in 0..::MaxMessagesPerBlock::get() { - assert_ok!(MessagesPallet::add_ipfs_message( - RuntimeOrigin::signed(caller_1), - schema_id_1, - DUMMY_CID_BASE32.to_vec(), - 15 - )); - } - assert_noop!( - MessagesPallet::add_ipfs_message( - RuntimeOrigin::signed(caller_1), - schema_id_1, - DUMMY_CID_BASE32.to_vec(), - 15 - ), - Error::::TooManyMessagesInBlock - ); - }); -} - /// Assert that MessageResponse for IPFS messages returns the payload_length of the offchain message. #[test] fn get_messages_by_schema_with_ipfs_payload_location_should_return_offchain_payload_length() { @@ -369,9 +303,8 @@ fn get_messages_by_schema_with_ipfs_payload_location_should_fail_bad_schema() { .unwrap(), msa_id: Some(0), provider_msa_id: 1, - index: 0, }; - let mapped_response = bad_message.map_to_response(0, PayloadLocation::IPFS); + let mapped_response = bad_message.map_to_response(0, PayloadLocation::IPFS, 0); assert_eq!( mapped_response.cid, Some(multibase::encode(Base::Base32Lower, Vec::new()).as_bytes().to_vec()) @@ -399,8 +332,8 @@ fn add_message_via_non_delegate_should_fail() { ); // assert - let list = Messages::::get(1, schema_id_1).into_inner(); - assert_eq!(list.len(), 0); + let msg = MessagesV2::::get((1, schema_id_1, 0)); + assert_eq!(msg, None); }); } @@ -671,8 +604,7 @@ fn validate_cid_unwrap_panics() { fn map_to_response_on_chain() { let payload_vec = b"123456789012345678901234567890".to_vec(); let payload_bounded = BoundedVec::>::try_from(payload_vec.clone()).unwrap(); - let msg = - Message { payload: payload_bounded, provider_msa_id: 10u64, msa_id: None, index: 1u16 }; + let msg = Message { payload: payload_bounded, provider_msa_id: 10u64, msa_id: None }; let expected = MessageResponse { provider_msa_id: 10u64, index: 1u16, @@ -682,7 +614,7 @@ fn map_to_response_on_chain() { cid: None, payload_length: None, }; - assert_eq!(msg.map_to_response(42, PayloadLocation::OnChain), expected); + assert_eq!(msg.map_to_response(42, PayloadLocation::OnChain, 1), expected); } #[test] @@ -690,7 +622,7 @@ fn map_to_response_ipfs() { let cid = DUMMY_CID_SHA512; let payload_tuple: crate::OffchainPayloadType = (multibase::decode(cid).unwrap().1, 10); let payload = BoundedVec::>::try_from(payload_tuple.encode()).unwrap(); - let msg = Message { payload, provider_msa_id: 10u64, msa_id: None, index: 1u16 }; + let msg = Message { payload, provider_msa_id: 10u64, msa_id: None }; let expected = MessageResponse { provider_msa_id: 10u64, index: 1u16, @@ -700,5 +632,68 @@ fn map_to_response_ipfs() { cid: Some(cid.as_bytes().to_vec()), payload_length: Some(10), }; - assert_eq!(msg.map_to_response(42, PayloadLocation::IPFS), expected); + assert_eq!(msg.map_to_response(42, PayloadLocation::IPFS, 1), expected); +} + +#[test] +fn migration_to_v2_should_work_as_expected() { + new_test_ext().execute_with(|| { + // Setup + let schema_id: SchemaId = IPFS_SCHEMA_ID; + let cid = &DUMMY_CID_BASE32[..]; + let message_per_block = vec![3, 4, 5, 6]; + let payload = ( + multibase::decode(sp_std::str::from_utf8(cid).unwrap()).unwrap().1, + IPFS_PAYLOAD_LENGTH, + ) + .encode(); + + let mut counter = 0; + for (idx, count) in message_per_block.iter().enumerate() { + let mut list = BoundedVec::default(); + for _ in 0..*count { + list.try_push(OldMessage { + msa_id: Some(10), + payload: payload.clone().try_into().unwrap(), + index: counter, + provider_msa_id: 1, + }) + .unwrap(); + counter += 1; + } + v2::old::Messages::::insert(idx as u32, schema_id, list); + } + + let _ = v2::migrate_to_v2::(); + + let old_count = v2::old::Messages::::iter().count(); + let new_count = MessagesV2::::iter().count(); + let current_version = MessagesPallet::current_storage_version(); + + assert_eq!(old_count, 0); + assert_eq!(new_count, message_per_block.iter().sum::()); + assert_eq!(current_version, StorageVersion::new(2)); + + let mut total_index = 0u16; + for (block, count) in message_per_block.iter().enumerate() { + for _ in 0..*count { + assert!(MessagesV2::::get((block as u32, schema_id, total_index)).is_some()); + total_index += 1; + } + // should not exist + assert!(MessagesV2::::get((block as u32, schema_id, total_index)).is_none()); + } + }); +} + +#[test] +fn migration_to_v2_should_have_correct_prefix() { + new_test_ext().execute_with(|| { + use frame_support::storage::generator::StorageDoubleMap; + let pallet_prefix = v2::old::Messages::::module_prefix(); + let storage_prefix = v2::old::Messages::::storage_prefix(); + + assert_eq!(&b"MessagesPallet"[..], pallet_prefix); + assert_eq!(&b"Messages"[..], storage_prefix); + }); } diff --git a/pallets/messages/src/types.rs b/pallets/messages/src/types.rs index bf5deab6b5..a63a8139ff 100644 --- a/pallets/messages/src/types.rs +++ b/pallets/messages/src/types.rs @@ -9,6 +9,8 @@ use sp_std::{fmt::Debug, prelude::*}; /// Payloads stored offchain contain a tuple of (bytes(the payload reference), payload length). pub type OffchainPayloadType = (Vec, u32); +/// Index of message in the block +pub type MessageIndex = u16; /// A single message type definition. #[derive(Default, Encode, Decode, PartialEq, Debug, TypeInfo, Eq, MaxEncodedLen)] @@ -27,8 +29,6 @@ where pub provider_msa_id: MessageSourceId, /// Message source account id (the original source). pub msa_id: Option, - /// Stores index of message in block to keep total order. - pub index: u16, } impl Message @@ -40,11 +40,12 @@ where &self, block_number: BlockNumber, payload_location: PayloadLocation, + index: u16, ) -> MessageResponse { match payload_location { PayloadLocation::OnChain => MessageResponse { provider_msa_id: self.provider_msa_id, - index: self.index, + index, block_number, msa_id: self.msa_id, payload: Some(self.payload.to_vec()), @@ -56,7 +57,7 @@ where OffchainPayloadType::decode(&mut &self.payload[..]).unwrap_or_default(); MessageResponse { provider_msa_id: self.provider_msa_id, - index: self.index, + index, block_number, cid: Some(multibase::encode(Base::Base32Lower, binary_cid).as_bytes().to_vec()), payload_length: Some(payload_length), @@ -66,7 +67,7 @@ where }, // Message types of Itemized and Paginated are retrieved differently _ => MessageResponse { provider_msa_id: self.provider_msa_id, - index: self.index, + index, block_number, msa_id: None, payload: None, diff --git a/runtime/common/src/constants.rs b/runtime/common/src/constants.rs index 3d86b63bd5..24cec370f4 100644 --- a/runtime/common/src/constants.rs +++ b/runtime/common/src/constants.rs @@ -268,9 +268,6 @@ parameter_types! { // -end- Collator Selection Pallet --- // --- Messages Pallet --- -/// The maximum number of messages per block -pub type MessagesMaxPerBlock = ConstU32<200>; - impl Clone for MessagesMaxPayloadSizeBytes { fn clone(&self) -> Self { MessagesMaxPayloadSizeBytes {} diff --git a/runtime/frequency/src/lib.rs b/runtime/frequency/src/lib.rs index b552628a46..0f78d6a7ed 100644 --- a/runtime/frequency/src/lib.rs +++ b/runtime/frequency/src/lib.rs @@ -79,7 +79,7 @@ pub use common_runtime::{ use frame_support::traits::Contains; #[cfg(feature = "try-runtime")] -use frame_support::traits::TryStateSelect; +use frame_support::traits::{TryStateSelect, UpgradeCheckSelect}; /// Interface to collective pallet to propose a proposal. pub struct CouncilProposalProvider; @@ -220,6 +220,7 @@ pub type Executive = frame_executive::Executive< frame_system::ChainContext, Runtime, AllPalletsWithSystem, + (pallet_messages::migration::v2::MigrateToV2,), >; /// Opaque types. These are used by the CLI to instantiate machinery that don't need to know @@ -257,7 +258,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("frequency"), impl_name: create_runtime_str!("frequency"), authoring_version: 1, - spec_version: 60, + spec_version: 61, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -271,7 +272,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("frequency-rococo"), impl_name: create_runtime_str!("frequency"), authoring_version: 1, - spec_version: 60, + spec_version: 61, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -920,8 +921,6 @@ impl pallet_messages::Config for Runtime { type SchemaGrantValidator = Msa; // The type that provides schema info type SchemaProvider = Schemas; - // The maximum number of messages per block - type MaxMessagesPerBlock = MessagesMaxPerBlock; // The maximum message payload in bytes type MessagesMaxPayloadSizeBytes = MessagesMaxPayloadSizeBytes; @@ -1321,9 +1320,9 @@ impl_runtime_apis! { #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { - fn on_runtime_upgrade(_checks: bool) -> (Weight, Weight) { + fn on_runtime_upgrade(checks: UpgradeCheckSelect) -> (Weight, Weight) { log::info!("try-runtime::on_runtime_upgrade frequency."); - let weight = Executive::try_runtime_upgrade(true).unwrap(); + let weight = Executive::try_runtime_upgrade(checks).unwrap(); (weight, RuntimeBlockWeights::get().max_block) } From 194b1b1e502ed47b915b1736e2affced29520ec9 Mon Sep 17 00:00:00 2001 From: Dmitri <4452412+demisx@users.noreply.github.com> Date: Fri, 10 Nov 2023 05:09:04 -0800 Subject: [PATCH 2/9] Fix GPG Signing error in building bins during release (#1775) # Goal The goal of this PR is to fix GPG binary signing error in the release workflow. Closes #1774 --- .github/workflows/release.yml | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 2a65dddac8..3464b34452 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -179,6 +179,8 @@ jobs: # arch: arm64 runs-on: ${{matrix.os}} container: ghcr.io/libertydsnp/frequency/ci-base-image:1.0.0 + env: + SIGNING_SUBKEY_FINGERPRINT: B6327D1474C6392032870E8EFA4FD1E73A0FE707 steps: - name: Check Out Repo uses: actions/checkout@v4 @@ -212,10 +214,21 @@ jobs: with: gpg_private_key: ${{secrets.FREQUENCY_PGP_SECRET_SUBKEYS}} passphrase: ${{secrets.FREQUENCY_PGP_MASTER_KEY_PASSWORD}} - fingerprint: B6327D1474C6392032870E8EFA4FD1E73A0FE707 # signing subkey + fingerprint: ${{env.SIGNING_SUBKEY_FINGERPRINT}} + - name: List GPG Keys + run: gpg -k; gpg -K + # The error in this step may be due to expired signing subkey + # See https://github.com/LibertyDSNP/frequency/issues/1695 - name: Generate Binary Signature working-directory: ${{env.BIN_DIR}} - run: gpg --detach-sign --armor ${{env.RELEASE_BIN_FILENAME}} + run: | + gpg --version + gpg --local-user ${{env.SIGNING_SUBKEY_FINGERPRINT}} \ + --sign --armor \ + --pinentry-mode=loopback \ + --passphrase="${{secrets.FREQUENCY_PGP_MASTER_KEY_PASSWORD}}" \ + --detach-sig \ + ${{env.RELEASE_BIN_FILENAME}} - name: Verify Binary working-directory: ${{env.BIN_DIR}} run: gpg --verify ${{env.RELEASE_BIN_FILENAME}}.asc From d1111d9c4681eb1f454aa4c496fdce3a6de41966 Mon Sep 17 00:00:00 2001 From: Aramik Date: Mon, 13 Nov 2023 11:39:31 -0800 Subject: [PATCH 3/9] schemas: PoV compatible changes (#1743) # Goal The goal of this PR is to split schemas and the model into 2 separate storages so that we can limit the size of PoV being accessed from other pallets. Closes #1742 # Checklist - [x] Chain spec updated - [x] Design doc(s) updated - [x] Tests added - [x] Benchmarks added - [x] Weights updated --------- Co-authored-by: Frequency CI [bot] --- Cargo.lock | 1 + common/primitives/src/schema.rs | 17 ++ designdocs/schema_v2.md | 46 ++++++ e2e/package-lock.json | 2 +- pallets/messages/src/lib.rs | 4 +- pallets/messages/src/tests/mock.rs | 11 ++ pallets/messages/src/weights.rs | 96 +++++------ pallets/schemas/Cargo.toml | 1 + pallets/schemas/src/benchmarking.rs | 6 +- pallets/schemas/src/lib.rs | 71 ++++++-- pallets/schemas/src/migration/mod.rs | 2 + pallets/schemas/src/migration/v2.rs | 138 ++++++++++++++++ pallets/schemas/src/tests/other_tests.rs | 82 +++++++++- pallets/schemas/src/types.rs | 18 +-- pallets/schemas/src/weights.rs | 120 +++++++------- pallets/stateful-storage/src/lib.rs | 8 +- pallets/stateful-storage/src/tests/mock.rs | 16 +- pallets/stateful-storage/src/weights.rs | 180 ++++++++++----------- runtime/frequency/src/lib.rs | 9 +- 19 files changed, 589 insertions(+), 239 deletions(-) create mode 100644 designdocs/schema_v2.md create mode 100644 pallets/schemas/src/migration/mod.rs create mode 100644 pallets/schemas/src/migration/v2.rs diff --git a/Cargo.lock b/Cargo.lock index 5b97f3b262..7db51ad47f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7379,6 +7379,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log", "numtoa", "pallet-collective", "parity-scale-codec", diff --git a/common/primitives/src/schema.rs b/common/primitives/src/schema.rs index 0bd2d29ecf..7af0022bfd 100644 --- a/common/primitives/src/schema.rs +++ b/common/primitives/src/schema.rs @@ -73,10 +73,27 @@ pub struct SchemaResponse { pub settings: Vec, } +/// RPC Response form for a Schema Info +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[derive(Clone, Encode, Decode, PartialEq, Debug, TypeInfo, Eq)] +pub struct SchemaInfoResponse { + /// The unique identifier for this Schema + pub schema_id: SchemaId, + /// The model format type for how the schema model is represented + pub model_type: ModelType, + /// The payload location + pub payload_location: PayloadLocation, + /// grants for the schema + pub settings: Vec, +} + /// This allows other pallets to resolve Schema information. With generic SchemaId pub trait SchemaProvider { /// Gets the Schema details associated with this `SchemaId` if any fn get_schema_by_id(schema_id: SchemaId) -> Option; + + /// Gets the Schema Info associated with this `SchemaId` if any + fn get_schema_info_by_id(schema_id: SchemaId) -> Option; } /// This allows other Pallets to check validity of schema ids. diff --git a/designdocs/schema_v2.md b/designdocs/schema_v2.md new file mode 100644 index 0000000000..aa114ca3b2 --- /dev/null +++ b/designdocs/schema_v2.md @@ -0,0 +1,46 @@ +# On Chain Message Storage + +## Context and Scope +The proposed feature consists of changes that is going to be one (or more) pallet(s) in runtime of a +Substrate based blockchain, and it will be used in all environments including production. + +## Problem Statement +After introduction of **Proof of Validity** or **PoV** in runtime weights, all pallets should be +re-evaluated and refactored if necessary to minimize the usage of **PoV**. This is to ensure all +important operations are scalable. +This document tries to propose some changes on **Schemas** pallet to optimize the **PoV** size. + +## Goals +- Minimizing Weights including **execution times** and **PoV** size. + +## Proposal +Split Schemas into `SchemaInfo` and `payload` would allow lower **PoV** when verifying schema existence +or compatibility. + +### Main Storage types +- **SchemaInfos** + - _Type_: `StorageMap` + - _Purpose_: Main structure To store related properties of any schema + index +- **SchemaPayloads** + - _Type_: `StorageMap>` + - _Purpose_: Stores the payload or model for each schema + + +### On Chain Structure +Following is a proposed data structure for storing schema information on chain. +```rust +pub struct SchemaInfo { + /// The type of model (AvroBinary, Parquet, etc.) + pub model_type: ModelType, + /// The payload location + pub payload_location: PayloadLocation, + /// additional control settings for the schema + pub settings: SchemaSettings, +} +``` +### Expected PoV improvements +This PoV improvement would not affect extrinsic weights in this pallet, but it would directly affect any +pallet that is dependent on **Schemas** pallet. Some of these pallets are **Messages** and +**Stateful-Storage**. After these changes we are expecting see to see around 30-60KiB decrease in PoV +for `MaxEncodedLen` mode. diff --git a/e2e/package-lock.json b/e2e/package-lock.json index f30e18810d..f2b313a7ce 100644 --- a/e2e/package-lock.json +++ b/e2e/package-lock.json @@ -260,7 +260,7 @@ "node_modules/@frequency-chain/api-augment": { "version": "0.0.0", "resolved": "file:../js/api-augment/dist/frequency-chain-api-augment-0.0.0.tgz", - "integrity": "sha512-y5oeksTwmIpVJgZCWj7D+yVoN4TZggsMA5Gv9YmV5DCgCdpXiF/JQ/DcfEs4JUYIlB/P/ccLJkj4x+TJCYhPoA==", + "integrity": "sha512-SjELGw36ccBPvWV19CU73HAOU1hiYJfQGqY1G3Qd7MJUuH3EaaB7Qr85dqjKcwIt37L7hYZ29LjddBw9//jRkw==", "license": "Apache-2.0", "dependencies": { "@polkadot/api": "^10.9.1", diff --git a/pallets/messages/src/lib.rs b/pallets/messages/src/lib.rs index e87677cc8b..fafbb5d756 100644 --- a/pallets/messages/src/lib.rs +++ b/pallets/messages/src/lib.rs @@ -234,7 +234,7 @@ pub mod pallet { .try_into() .map_err(|_| Error::::ExceedsMaxMessagePayloadSizeBytes)?; - if let Some(schema) = T::SchemaProvider::get_schema_by_id(schema_id) { + if let Some(schema) = T::SchemaProvider::get_schema_info_by_id(schema_id) { ensure!( schema.payload_location == PayloadLocation::IPFS, Error::::InvalidPayloadLocation @@ -283,7 +283,7 @@ pub mod pallet { let bounded_payload: BoundedVec = payload.try_into().map_err(|_| Error::::ExceedsMaxMessagePayloadSizeBytes)?; - if let Some(schema) = T::SchemaProvider::get_schema_by_id(schema_id) { + if let Some(schema) = T::SchemaProvider::get_schema_info_by_id(schema_id) { ensure!( schema.payload_location == PayloadLocation::OnChain, Error::::InvalidPayloadLocation diff --git a/pallets/messages/src/tests/mock.rs b/pallets/messages/src/tests/mock.rs index 9842e59016..fe55e06983 100644 --- a/pallets/messages/src/tests/mock.rs +++ b/pallets/messages/src/tests/mock.rs @@ -213,6 +213,17 @@ impl SchemaProvider for SchemaHandler { settings: Vec::new(), }) } + + fn get_schema_info_by_id(schema_id: u16) -> Option { + Self::get_schema_by_id(schema_id).and_then(|schema| { + Some(SchemaInfoResponse { + schema_id: schema.schema_id, + settings: schema.settings, + model_type: schema.model_type, + payload_location: schema.payload_location, + }) + }) + } } impl pallet_messages::Config for Test { diff --git a/pallets/messages/src/weights.rs b/pallets/messages/src/weights.rs index afebb58b51..b20bf16e69 100644 --- a/pallets/messages/src/weights.rs +++ b/pallets/messages/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_messages //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-30, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `benchmark-runner-44wtw-bw25f`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! HOSTNAME: `benchmark-runner-44wtw-d4nrm`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("frequency-bench"), DB CACHE: 1024 // Executed Command: @@ -56,78 +56,78 @@ pub trait WeightInfo { /// Weights for pallet_messages using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: `Schemas::Schemas` (r:1 w:0) - /// Proof: `Schemas::Schemas` (`max_values`: None, `max_size`: Some(65518), added: 67993, mode: `Measured`) + /// Storage: `Schemas::SchemaInfos` (r:1 w:0) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `Msa::PublicKeyToMsaId` (r:1 w:0) - /// Proof: `Msa::PublicKeyToMsaId` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `Measured`) + /// Proof: `Msa::PublicKeyToMsaId` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// Storage: `Msa::DelegatorAndProviderToDelegation` (r:1 w:0) - /// Proof: `Msa::DelegatorAndProviderToDelegation` (`max_values`: None, `max_size`: Some(217), added: 2692, mode: `Measured`) - /// Storage: `Messages::Messages` (r:1 w:1) - /// Proof: `Messages::Messages` (`max_values`: None, `max_size`: Some(618624), added: 621099, mode: `Measured`) + /// Proof: `Msa::DelegatorAndProviderToDelegation` (`max_values`: None, `max_size`: Some(217), added: 2692, mode: `MaxEncodedLen`) + /// Storage: `Messages::MessagesV2` (r:0 w:1) + /// Proof: `Messages::MessagesV2` (`max_values`: None, `max_size`: Some(3123), added: 5598, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 3071]`. fn add_onchain_message(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `9664` - // Estimated: `22039` - // Minimum execution time: 47_640_000 picoseconds. - Weight::from_parts(49_189_621, 22039) - // Standard Error: 82 - .saturating_add(Weight::from_parts(938, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) + // Measured: `402` + // Estimated: `12592` + // Minimum execution time: 32_164_000 picoseconds. + Weight::from_parts(33_345_645, 12592) + // Standard Error: 43 + .saturating_add(Weight::from_parts(848, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Schemas::Schemas` (r:1 w:0) - /// Proof: `Schemas::Schemas` (`max_values`: None, `max_size`: Some(65518), added: 67993, mode: `Measured`) + /// Storage: `Schemas::SchemaInfos` (r:1 w:0) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `Msa::PublicKeyToMsaId` (r:1 w:0) - /// Proof: `Msa::PublicKeyToMsaId` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `Measured`) - /// Storage: `Messages::Messages` (r:1 w:1) - /// Proof: `Messages::Messages` (`max_values`: None, `max_size`: Some(618624), added: 621099, mode: `Measured`) + /// Proof: `Msa::PublicKeyToMsaId` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Messages::MessagesV2` (r:0 w:1) + /// Proof: `Messages::MessagesV2` (`max_values`: None, `max_size`: Some(3123), added: 5598, mode: `MaxEncodedLen`) fn add_ipfs_message() -> Weight { // Proof Size summary in bytes: - // Measured: `7958` - // Estimated: `20333` - // Minimum execution time: 43_681_000 picoseconds. - Weight::from_parts(45_218_000, 20333) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Measured: `790` + // Estimated: `12423` + // Minimum execution time: 31_839_000 picoseconds. + Weight::from_parts(32_576_000, 12423) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } } // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: `Schemas::Schemas` (r:1 w:0) - /// Proof: `Schemas::Schemas` (`max_values`: None, `max_size`: Some(65518), added: 67993, mode: `Measured`) + /// Storage: `Schemas::SchemaInfos` (r:1 w:0) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `Msa::PublicKeyToMsaId` (r:1 w:0) - /// Proof: `Msa::PublicKeyToMsaId` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `Measured`) + /// Proof: `Msa::PublicKeyToMsaId` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// Storage: `Msa::DelegatorAndProviderToDelegation` (r:1 w:0) - /// Proof: `Msa::DelegatorAndProviderToDelegation` (`max_values`: None, `max_size`: Some(217), added: 2692, mode: `Measured`) - /// Storage: `Messages::Messages` (r:1 w:1) - /// Proof: `Messages::Messages` (`max_values`: None, `max_size`: Some(618624), added: 621099, mode: `Measured`) + /// Proof: `Msa::DelegatorAndProviderToDelegation` (`max_values`: None, `max_size`: Some(217), added: 2692, mode: `MaxEncodedLen`) + /// Storage: `Messages::MessagesV2` (r:0 w:1) + /// Proof: `Messages::MessagesV2` (`max_values`: None, `max_size`: Some(3123), added: 5598, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 3071]`. fn add_onchain_message(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `9664` - // Estimated: `22039` - // Minimum execution time: 47_640_000 picoseconds. - Weight::from_parts(49_189_621, 22039) - // Standard Error: 82 - .saturating_add(Weight::from_parts(938, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) + // Measured: `402` + // Estimated: `12592` + // Minimum execution time: 32_164_000 picoseconds. + Weight::from_parts(33_345_645, 12592) + // Standard Error: 43 + .saturating_add(Weight::from_parts(848, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Schemas::Schemas` (r:1 w:0) - /// Proof: `Schemas::Schemas` (`max_values`: None, `max_size`: Some(65518), added: 67993, mode: `Measured`) + /// Storage: `Schemas::SchemaInfos` (r:1 w:0) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `Msa::PublicKeyToMsaId` (r:1 w:0) - /// Proof: `Msa::PublicKeyToMsaId` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `Measured`) - /// Storage: `Messages::Messages` (r:1 w:1) - /// Proof: `Messages::Messages` (`max_values`: None, `max_size`: Some(618624), added: 621099, mode: `Measured`) + /// Proof: `Msa::PublicKeyToMsaId` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Messages::MessagesV2` (r:0 w:1) + /// Proof: `Messages::MessagesV2` (`max_values`: None, `max_size`: Some(3123), added: 5598, mode: `MaxEncodedLen`) fn add_ipfs_message() -> Weight { // Proof Size summary in bytes: - // Measured: `7958` - // Estimated: `20333` - // Minimum execution time: 43_681_000 picoseconds. - Weight::from_parts(45_218_000, 20333) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Measured: `790` + // Estimated: `12423` + // Minimum execution time: 31_839_000 picoseconds. + Weight::from_parts(32_576_000, 12423) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/pallets/schemas/Cargo.toml b/pallets/schemas/Cargo.toml index 3fba980e08..7474e52357 100644 --- a/pallets/schemas/Cargo.toml +++ b/pallets/schemas/Cargo.toml @@ -17,6 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } +log = { version = "0.4.17", default-features = false } frame-benchmarking = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", optional = true, branch = "release-polkadot-v1.1.0" } frame-support = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } frame-system = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } diff --git a/pallets/schemas/src/benchmarking.rs b/pallets/schemas/src/benchmarking.rs index 2d7d8d5320..79b1396313 100644 --- a/pallets/schemas/src/benchmarking.rs +++ b/pallets/schemas/src/benchmarking.rs @@ -42,7 +42,7 @@ benchmarks! { }: _(RawOrigin::Signed(sender), schema_input, model_type, payload_location) verify { ensure!(SchemasPallet::::get_current_schema_identifier_maximum() > 0, "Created schema count should be > 0"); - ensure!(SchemasPallet::::get_schema(1).is_some(), "Created schema should exist"); + ensure!(SchemasPallet::::get_schema_info(1).is_some(), "Created schema should exist"); } create_schema_via_governance { @@ -55,7 +55,7 @@ benchmarks! { }: _(RawOrigin::Root, sender.clone(), schema_input, model_type, payload_location, BoundedVec::default()) verify { ensure!(SchemasPallet::::get_current_schema_identifier_maximum() > 0, "Created schema count should be > 0"); - ensure!(SchemasPallet::::get_schema(1).is_some(), "Created schema should exist"); + ensure!(SchemasPallet::::get_schema_info(1).is_some(), "Created schema should exist"); } propose_to_create_schema { @@ -80,7 +80,7 @@ benchmarks! { }: _(RawOrigin::Signed(sender), schema_input, model_type, payload_location, BoundedVec::default()) verify { ensure!(SchemasPallet::::get_current_schema_identifier_maximum() > 0, "Created schema count should be > 0"); - ensure!(SchemasPallet::::get_schema(1).is_some(), "Created schema should exist"); + ensure!(SchemasPallet::::get_schema_info(1).is_some(), "Created schema should exist"); } set_max_schema_model_bytes { diff --git a/pallets/schemas/src/lib.rs b/pallets/schemas/src/lib.rs index 6d12bba7dd..c0662f0112 100644 --- a/pallets/schemas/src/lib.rs +++ b/pallets/schemas/src/lib.rs @@ -75,7 +75,9 @@ mod tests; mod benchmarking; #[cfg(feature = "runtime-benchmarks")] use common_primitives::benchmarks::SchemaBenchmarkHelper; - +use common_primitives::schema::SchemaInfoResponse; +/// migration module +pub mod migration; mod types; pub use pallet::*; @@ -85,6 +87,8 @@ pub use weights::*; mod serde; +const LOG_TARGET: &str = "runtime::schemas"; + #[frame_support::pallet] pub mod pallet { use super::*; @@ -184,16 +188,24 @@ pub mod pallet { pub(super) type CurrentSchemaIdentifierMaximum = StorageValue<_, SchemaId, ValueQuery>; + /// Storage for message schema info struct data + /// - Key: Schema Id + /// - Value: [`SchemaInfo`](SchemaInfo) + #[pallet::storage] + #[pallet::getter(fn get_schema_info)] + pub(super) type SchemaInfos = + StorageMap<_, Twox64Concat, SchemaId, SchemaInfo, OptionQuery>; + /// Storage for message schema struct data /// - Key: Schema Id - /// - Value: [`Schema`](Schema) + /// - Value: [`BoundedVec`](BoundedVec) #[pallet::storage] - #[pallet::getter(fn get_schema)] - pub(super) type Schemas = StorageMap< + #[pallet::getter(fn get_schema_payload)] + pub(super) type SchemaPayloads = StorageMap< _, Twox64Concat, SchemaId, - Schema, + BoundedVec, OptionQuery, >; @@ -203,7 +215,7 @@ pub mod pallet { pub initial_max_schema_model_size: u32, /// Phantom type #[serde(skip)] - pub _config: sp_std::marker::PhantomData, + pub _config: PhantomData, } impl sp_std::default::Default for GenesisConfig { @@ -383,7 +395,7 @@ pub mod pallet { >::set(n); } - /// Build the [`Schema`] and insert it into storage + /// Inserts both the [`SchemaInfo`] and Schema Payload into storage /// Updates the `CurrentSchemaIdentifierMaximum` storage pub fn add_schema( model: BoundedVec, @@ -398,23 +410,46 @@ pub mod pallet { set_settings.set(i); } } - let schema = Schema { model_type, model, payload_location, settings: set_settings }; + let schema_info = SchemaInfo { model_type, payload_location, settings: set_settings }; >::set(schema_id); - >::insert(schema_id, schema); + >::insert(schema_id, schema_info); + >::insert(schema_id, model); Ok(schema_id) } /// Retrieve a schema by id pub fn get_schema_by_id(schema_id: SchemaId) -> Option { - if let Some(schema) = Self::get_schema(schema_id) { - let model_vec: Vec = schema.model.into_inner(); - let saved_settings = schema.settings; + match (Self::get_schema_info(schema_id), Self::get_schema_payload(schema_id)) { + (Some(schema_info), Some(payload)) => { + let model_vec: Vec = payload.into_inner(); + let saved_settings = schema_info.settings; + let settings = saved_settings.0.iter().collect::>(); + let response = SchemaResponse { + schema_id, + model: model_vec, + model_type: schema_info.model_type, + payload_location: schema_info.payload_location, + settings, + }; + Some(response) + }, + (None, Some(_)) | (Some(_), None) => { + log::error!("Corrupted state for schema {:?}, Should never happen!", schema_id); + None + }, + (None, None) => None, + } + } + + /// Retrieve a schema info by id + pub fn get_schema_info_by_id(schema_id: SchemaId) -> Option { + if let Some(schema_info) = Self::get_schema_info(schema_id) { + let saved_settings = schema_info.settings; let settings = saved_settings.0.iter().collect::>(); - let response = SchemaResponse { + let response = SchemaInfoResponse { schema_id, - model: model_vec, - model_type: schema.model_type, - payload_location: schema.payload_location, + model_type: schema_info.model_type, + payload_location: schema_info.payload_location, settings, }; return Some(response) @@ -532,4 +567,8 @@ impl SchemaProvider for Pallet { fn get_schema_by_id(schema_id: SchemaId) -> Option { Self::get_schema_by_id(schema_id) } + + fn get_schema_info_by_id(schema_id: SchemaId) -> Option { + Self::get_schema_info_by_id(schema_id) + } } diff --git a/pallets/schemas/src/migration/mod.rs b/pallets/schemas/src/migration/mod.rs new file mode 100644 index 0000000000..c34354a101 --- /dev/null +++ b/pallets/schemas/src/migration/mod.rs @@ -0,0 +1,2 @@ +/// migrations to v2 +pub mod v2; diff --git a/pallets/schemas/src/migration/v2.rs b/pallets/schemas/src/migration/v2.rs new file mode 100644 index 0000000000..8a45c05364 --- /dev/null +++ b/pallets/schemas/src/migration/v2.rs @@ -0,0 +1,138 @@ +#[cfg(feature = "try-runtime")] +use crate::types::SCHEMA_STORAGE_VERSION; +use crate::{ + pallet::{SchemaInfos, SchemaPayloads}, + Config, Pallet, SchemaId, SchemaInfo, LOG_TARGET, +}; +use frame_support::{pallet_prelude::*, storage_alias, traits::OnRuntimeUpgrade, weights::Weight}; +use log; +use sp_runtime::Saturating; +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; +#[cfg(feature = "try-runtime")] +use sp_std::vec::Vec; + +/// old module storages +pub mod old { + use super::*; + use common_primitives::schema::{ModelType, PayloadLocation, SchemaSettings}; + + #[derive(Clone, Encode, Decode, PartialEq, Debug, TypeInfo, Eq, MaxEncodedLen)] + #[scale_info(skip_type_params(MaxModelSize))] + #[codec(mel_bound(MaxModelSize: MaxEncodedLen))] + /// A structure defining a Schema + pub struct Schema + where + MaxModelSize: Get, + { + /// The type of model (AvroBinary, Parquet, etc.) + pub model_type: ModelType, + /// Defines the structure of the message payload using model_type + pub model: BoundedVec, + /// The payload location + pub payload_location: PayloadLocation, + /// additional control settings for the schema + pub settings: SchemaSettings, + } + + /// Storage for message schema struct data + /// - Key: Schema Id + /// - Value: [`Schema`](Schema) + #[storage_alias] + pub(crate) type Schemas = StorageMap< + Pallet, + Twox64Concat, + SchemaId, + Schema<::SchemaModelMaxBytesBoundedVecLimit>, + OptionQuery, + >; +} + +/// migration to v2 implementation +pub struct MigrateToV2(PhantomData); + +impl OnRuntimeUpgrade for MigrateToV2 { + fn on_runtime_upgrade() -> Weight { + migrate_to_v2::() + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, TryRuntimeError> { + log::info!(target: LOG_TARGET, "Running pre_upgrade..."); + let count = old::Schemas::::iter().count() as u32; + log::info!(target: LOG_TARGET, "Finish pre_upgrade for {:?}", count); + Ok(count.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), TryRuntimeError> { + log::info!(target: LOG_TARGET, "Running post_upgrade..."); + + let old_count: u32 = Decode::decode(&mut state.as_slice()) + .expect("the state parameter should be something that was generated by pre_upgrade"); + + let current_count = old::Schemas::::iter().count(); + let info_count = SchemaInfos::::iter().count(); + let payload_count = SchemaPayloads::::iter().count(); + + log::info!(target: LOG_TARGET, "Finish post_upgrade for {:?}", info_count); + let onchain_version = Pallet::::on_chain_storage_version(); + + assert_eq!(current_count, 0usize); + assert_eq!(info_count, old_count as usize); + assert_eq!(payload_count, old_count as usize); + assert_eq!(onchain_version, SCHEMA_STORAGE_VERSION); + Ok(()) + } +} + +/// migrating to v2 +pub fn migrate_to_v2() -> Weight { + log::info!(target: LOG_TARGET, "Running storage migration..."); + let onchain_version = Pallet::::on_chain_storage_version(); + let current_version = Pallet::::current_storage_version(); + log::info!(target: LOG_TARGET, "onchain_version= {:?}, current_version={:?}", onchain_version, current_version); + let each_layer_access: u64 = 33 * 16; + + if onchain_version < 2 { + let mut reads = 1u64; + let mut writes = 0u64; + let mut bytes = 0u64; + for (schema_id, schema) in old::Schemas::::drain() { + bytes = bytes.saturating_add(schema.encode().len() as u64); + bytes = bytes.saturating_add(each_layer_access * 3); // three layers in merkle tree + + let info = SchemaInfo { + model_type: schema.model_type, + payload_location: schema.payload_location, + settings: schema.settings, + }; + + bytes = bytes.saturating_add(info.encode().len() as u64); + SchemaInfos::::insert(schema_id, info); + + bytes = bytes.saturating_add(schema.model.len() as u64); + SchemaPayloads::::insert(schema_id, schema.model); + + reads.saturating_inc(); + writes = writes.saturating_add(3); + } + + // Set storage version to `2`. + StorageVersion::new(2).put::>(); + writes.saturating_inc(); + + log::info!(target: LOG_TARGET, "Storage migrated to version 2 read={:?}, write={:?}, bytes={:?}", reads, writes, bytes); + let weights = T::DbWeight::get().reads_writes(reads, writes).add_proof_size(bytes); + log::info!(target: LOG_TARGET, "Migration Calculated weights={:?}",weights); + weights + } else { + log::info!( + target: LOG_TARGET, + "Migration did not execute. This probably should be removed onchain:{:?}, current:{:?}", + onchain_version, + current_version + ); + T::DbWeight::get().reads(1) + } +} diff --git a/pallets/schemas/src/tests/other_tests.rs b/pallets/schemas/src/tests/other_tests.rs index 264bf04084..47c259eb13 100644 --- a/pallets/schemas/src/tests/other_tests.rs +++ b/pallets/schemas/src/tests/other_tests.rs @@ -1,7 +1,8 @@ use frame_support::{ assert_noop, assert_ok, dispatch::RawOrigin, - traits::{ChangeMembers, Hash}, + pallet_prelude::GetStorageVersion, + traits::{ChangeMembers, Hash, StorageVersion}, BoundedVec, }; use serial_test::serial; @@ -17,11 +18,15 @@ use common_primitives::{ types::ParquetType, ParquetModel, }, - schema::{ModelType, PayloadLocation, SchemaId, SchemaSetting}, + schema::{ModelType, PayloadLocation, SchemaId, SchemaSetting, SchemaSettings}, }; use sp_runtime::DispatchError::BadOrigin; -use crate::{Config, Error, Event as AnnouncementEvent}; +use crate::{ + migration::v2, + pallet::{SchemaInfos, SchemaPayloads}, + Config, Error, Event as AnnouncementEvent, +}; use super::mock::*; @@ -698,3 +703,74 @@ fn create_schema_with_append_only_setting_and_non_itemized_should_fail() { ); }) } + +#[test] +fn schemas_migration_to_v2_should_work_as_expected() { + new_test_ext().execute_with(|| { + // Arrange + sudo_set_max_schema_size(); + let sender: AccountId = test_public(5); + let schemas = vec![ + r#"{"Name": "Bond", "Code": "007"}"#, + r#"{"type": "num","minimum": -90,"maximum": 90}"#, + r#"{"latitude": 48.858093,"longitude": 2.294694}"#, + ]; + for (idx, fields) in schemas.iter().enumerate() { + assert_ok!(SchemasPallet::create_schema_v2( + RuntimeOrigin::signed(sender.clone()), + create_bounded_schema_vec(fields), + ModelType::AvroBinary, + PayloadLocation::OnChain, + BoundedVec::default() + )); + v2::old::Schemas::::insert( + idx as u16 + 1, + v2::old::Schema { + model_type: ModelType::AvroBinary, + payload_location: PayloadLocation::OnChain, + settings: SchemaSettings::all_disabled(), + model: BoundedVec::try_from(fields.as_bytes().to_vec()) + .expect("should have value"), + }, + ); + } + let old_schema_1 = v2::old::Schemas::::get(1u16).expect("should have value"); + let old_schema_2 = v2::old::Schemas::::get(2u16).expect("should have value"); + let old_schema_3 = v2::old::Schemas::::get(3u16).expect("should have value"); + + // Act + let _ = v2::migrate_to_v2::(); + + // Assert + let old_count = v2::old::Schemas::::iter().count(); + let new_info_count = SchemaInfos::::iter().count(); + let new_payload_count = SchemaPayloads::::iter().count(); + let current_version = SchemasPallet::current_storage_version(); + + assert_eq!(old_count, 0); + assert_eq!(new_info_count, schemas.len()); + assert_eq!(new_payload_count, schemas.len()); + assert_eq!(current_version, StorageVersion::new(2)); + + let schema_info_1 = SchemaInfos::::get(1).expect("should have value"); + let schema_payload_1 = SchemaPayloads::::get(1u16).expect("should have value"); + assert_eq!(schema_info_1.model_type, old_schema_1.model_type); + assert_eq!(schema_info_1.payload_location, old_schema_1.payload_location); + assert_eq!(schema_info_1.settings, old_schema_1.settings); + assert_eq!(schema_payload_1.into_inner(), old_schema_1.model.into_inner()); + + let schema_info_2 = SchemaInfos::::get(2).expect("should have value"); + let schema_payload_2 = SchemaPayloads::::get(2u16).expect("should have value"); + assert_eq!(schema_info_2.model_type, old_schema_2.model_type); + assert_eq!(schema_info_2.payload_location, old_schema_2.payload_location); + assert_eq!(schema_info_2.settings, old_schema_2.settings); + assert_eq!(schema_payload_2.into_inner(), old_schema_2.model.into_inner()); + + let schema_info_3 = SchemaInfos::::get(3).expect("should have value"); + let schema_payload_3 = SchemaPayloads::::get(3u16).expect("should have value"); + assert_eq!(schema_info_3.model_type, old_schema_3.model_type); + assert_eq!(schema_info_3.payload_location, old_schema_3.payload_location); + assert_eq!(schema_info_3.settings, old_schema_3.settings); + assert_eq!(schema_payload_3.into_inner(), old_schema_3.model.into_inner()); + }); +} diff --git a/pallets/schemas/src/types.rs b/pallets/schemas/src/types.rs index e0a99af945..c1573e43b7 100644 --- a/pallets/schemas/src/types.rs +++ b/pallets/schemas/src/types.rs @@ -1,28 +1,18 @@ //! Types for the Schema Pallet use codec::{Decode, Encode, MaxEncodedLen}; use common_primitives::schema::{ModelType, PayloadLocation, SchemaSettings}; -use frame_support::{ - traits::{Get, StorageVersion}, - BoundedVec, -}; +use frame_support::traits::StorageVersion; use scale_info::TypeInfo; use sp_std::fmt::Debug; /// Current storage version of the schemas pallet. -pub const SCHEMA_STORAGE_VERSION: StorageVersion = StorageVersion::new(1); +pub const SCHEMA_STORAGE_VERSION: StorageVersion = StorageVersion::new(2); #[derive(Clone, Encode, Decode, PartialEq, Debug, TypeInfo, Eq, MaxEncodedLen)] -#[scale_info(skip_type_params(MaxModelSize))] -#[codec(mel_bound(MaxModelSize: MaxEncodedLen))] -/// A structure defining a Schema -pub struct Schema -where - MaxModelSize: Get, -{ +/// A structure defining a Schema information (excluding the payload) +pub struct SchemaInfo { /// The type of model (AvroBinary, Parquet, etc.) pub model_type: ModelType, - /// Defines the structure of the message payload using model_type - pub model: BoundedVec, /// The payload location pub payload_location: PayloadLocation, /// additional control settings for the schema diff --git a/pallets/schemas/src/weights.rs b/pallets/schemas/src/weights.rs index 599facf2bc..a64177a2bd 100644 --- a/pallets/schemas/src/weights.rs +++ b/pallets/schemas/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_schemas //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-30, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `benchmark-runner-44wtw-bw25f`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! HOSTNAME: `benchmark-runner-44wtw-d4nrm`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("frequency-bench"), DB CACHE: 1024 // Executed Command: @@ -63,37 +63,41 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Schemas::GovernanceSchemaModelMaxBytes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Schemas::CurrentSchemaIdentifierMaximum` (r:1 w:1) /// Proof: `Schemas::CurrentSchemaIdentifierMaximum` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `Schemas::Schemas` (r:0 w:1) - /// Proof: `Schemas::Schemas` (`max_values`: None, `max_size`: Some(65518), added: 67993, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaInfos` (r:0 w:1) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaPayloads` (r:0 w:1) + /// Proof: `Schemas::SchemaPayloads` (`max_values`: None, `max_size`: Some(65514), added: 67989, mode: `MaxEncodedLen`) /// The range of component `m` is `[16, 65499]`. fn create_schema(m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `136` // Estimated: `10399` - // Minimum execution time: 14_031_000 picoseconds. - Weight::from_parts(14_415_000, 10399) - // Standard Error: 47 - .saturating_add(Weight::from_parts(35_161, 0).saturating_mul(m.into())) + // Minimum execution time: 15_733_000 picoseconds. + Weight::from_parts(16_003_000, 10399) + // Standard Error: 43 + .saturating_add(Weight::from_parts(34_311, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Schemas::GovernanceSchemaModelMaxBytes` (r:1 w:0) /// Proof: `Schemas::GovernanceSchemaModelMaxBytes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Schemas::CurrentSchemaIdentifierMaximum` (r:1 w:1) /// Proof: `Schemas::CurrentSchemaIdentifierMaximum` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `Schemas::Schemas` (r:0 w:1) - /// Proof: `Schemas::Schemas` (`max_values`: None, `max_size`: Some(65518), added: 67993, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaInfos` (r:0 w:1) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaPayloads` (r:0 w:1) + /// Proof: `Schemas::SchemaPayloads` (`max_values`: None, `max_size`: Some(65514), added: 67989, mode: `MaxEncodedLen`) /// The range of component `m` is `[16, 65499]`. fn create_schema_via_governance(m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `136` // Estimated: `10399` - // Minimum execution time: 14_110_000 picoseconds. - Weight::from_parts(14_289_000, 10399) + // Minimum execution time: 15_842_000 picoseconds. + Weight::from_parts(16_036_000, 10399) // Standard Error: 47 - .saturating_add(Weight::from_parts(35_148, 0).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(34_330, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Council::Members` (r:1 w:0) /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -110,10 +114,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `230` // Estimated: `12605` - // Minimum execution time: 21_148_000 picoseconds. - Weight::from_parts(9_634_788, 12605) - // Standard Error: 34 - .saturating_add(Weight::from_parts(3_135, 0).saturating_mul(m.into())) + // Minimum execution time: 21_487_000 picoseconds. + Weight::from_parts(11_162_870, 12605) + // Standard Error: 31 + .saturating_add(Weight::from_parts(3_121, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -121,19 +125,21 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Schemas::GovernanceSchemaModelMaxBytes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Schemas::CurrentSchemaIdentifierMaximum` (r:1 w:1) /// Proof: `Schemas::CurrentSchemaIdentifierMaximum` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `Schemas::Schemas` (r:0 w:1) - /// Proof: `Schemas::Schemas` (`max_values`: None, `max_size`: Some(65518), added: 67993, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaInfos` (r:0 w:1) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaPayloads` (r:0 w:1) + /// Proof: `Schemas::SchemaPayloads` (`max_values`: None, `max_size`: Some(65514), added: 67989, mode: `MaxEncodedLen`) /// The range of component `m` is `[16, 65499]`. fn create_schema_v2(m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `136` // Estimated: `10399` - // Minimum execution time: 13_941_000 picoseconds. - Weight::from_parts(14_046_000, 10399) - // Standard Error: 46 - .saturating_add(Weight::from_parts(35_198, 0).saturating_mul(m.into())) + // Minimum execution time: 15_775_000 picoseconds. + Weight::from_parts(15_884_000, 10399) + // Standard Error: 48 + .saturating_add(Weight::from_parts(34_322, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Schemas::GovernanceSchemaModelMaxBytes` (r:0 w:1) /// Proof: `Schemas::GovernanceSchemaModelMaxBytes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -141,8 +147,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_747_000 picoseconds. - Weight::from_parts(6_980_000, 0) + // Minimum execution time: 6_505_000 picoseconds. + Weight::from_parts(6_843_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -153,37 +159,41 @@ impl WeightInfo for () { /// Proof: `Schemas::GovernanceSchemaModelMaxBytes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Schemas::CurrentSchemaIdentifierMaximum` (r:1 w:1) /// Proof: `Schemas::CurrentSchemaIdentifierMaximum` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `Schemas::Schemas` (r:0 w:1) - /// Proof: `Schemas::Schemas` (`max_values`: None, `max_size`: Some(65518), added: 67993, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaInfos` (r:0 w:1) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaPayloads` (r:0 w:1) + /// Proof: `Schemas::SchemaPayloads` (`max_values`: None, `max_size`: Some(65514), added: 67989, mode: `MaxEncodedLen`) /// The range of component `m` is `[16, 65499]`. fn create_schema(m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `136` // Estimated: `10399` - // Minimum execution time: 14_031_000 picoseconds. - Weight::from_parts(14_415_000, 10399) - // Standard Error: 47 - .saturating_add(Weight::from_parts(35_161, 0).saturating_mul(m.into())) + // Minimum execution time: 15_733_000 picoseconds. + Weight::from_parts(16_003_000, 10399) + // Standard Error: 43 + .saturating_add(Weight::from_parts(34_311, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Schemas::GovernanceSchemaModelMaxBytes` (r:1 w:0) /// Proof: `Schemas::GovernanceSchemaModelMaxBytes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Schemas::CurrentSchemaIdentifierMaximum` (r:1 w:1) /// Proof: `Schemas::CurrentSchemaIdentifierMaximum` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `Schemas::Schemas` (r:0 w:1) - /// Proof: `Schemas::Schemas` (`max_values`: None, `max_size`: Some(65518), added: 67993, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaInfos` (r:0 w:1) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaPayloads` (r:0 w:1) + /// Proof: `Schemas::SchemaPayloads` (`max_values`: None, `max_size`: Some(65514), added: 67989, mode: `MaxEncodedLen`) /// The range of component `m` is `[16, 65499]`. fn create_schema_via_governance(m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `136` // Estimated: `10399` - // Minimum execution time: 14_110_000 picoseconds. - Weight::from_parts(14_289_000, 10399) + // Minimum execution time: 15_842_000 picoseconds. + Weight::from_parts(16_036_000, 10399) // Standard Error: 47 - .saturating_add(Weight::from_parts(35_148, 0).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(34_330, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Council::Members` (r:1 w:0) /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -200,10 +210,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `230` // Estimated: `12605` - // Minimum execution time: 21_148_000 picoseconds. - Weight::from_parts(9_634_788, 12605) - // Standard Error: 34 - .saturating_add(Weight::from_parts(3_135, 0).saturating_mul(m.into())) + // Minimum execution time: 21_487_000 picoseconds. + Weight::from_parts(11_162_870, 12605) + // Standard Error: 31 + .saturating_add(Weight::from_parts(3_121, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -211,19 +221,21 @@ impl WeightInfo for () { /// Proof: `Schemas::GovernanceSchemaModelMaxBytes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Schemas::CurrentSchemaIdentifierMaximum` (r:1 w:1) /// Proof: `Schemas::CurrentSchemaIdentifierMaximum` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `Schemas::Schemas` (r:0 w:1) - /// Proof: `Schemas::Schemas` (`max_values`: None, `max_size`: Some(65518), added: 67993, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaInfos` (r:0 w:1) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaPayloads` (r:0 w:1) + /// Proof: `Schemas::SchemaPayloads` (`max_values`: None, `max_size`: Some(65514), added: 67989, mode: `MaxEncodedLen`) /// The range of component `m` is `[16, 65499]`. fn create_schema_v2(m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `136` // Estimated: `10399` - // Minimum execution time: 13_941_000 picoseconds. - Weight::from_parts(14_046_000, 10399) - // Standard Error: 46 - .saturating_add(Weight::from_parts(35_198, 0).saturating_mul(m.into())) + // Minimum execution time: 15_775_000 picoseconds. + Weight::from_parts(15_884_000, 10399) + // Standard Error: 48 + .saturating_add(Weight::from_parts(34_322, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Schemas::GovernanceSchemaModelMaxBytes` (r:0 w:1) /// Proof: `Schemas::GovernanceSchemaModelMaxBytes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -231,8 +243,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_747_000 picoseconds. - Weight::from_parts(6_980_000, 0) + // Minimum execution time: 6_505_000 picoseconds. + Weight::from_parts(6_843_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/pallets/stateful-storage/src/lib.rs b/pallets/stateful-storage/src/lib.rs index 31e02a9253..4c3dd6a513 100644 --- a/pallets/stateful-storage/src/lib.rs +++ b/pallets/stateful-storage/src/lib.rs @@ -73,7 +73,7 @@ use common_primitives::{ DelegatorId, MessageSourceId, MsaLookup, MsaValidator, ProviderId, SchemaGrantValidator, }, node::Verify, - schema::{PayloadLocation, SchemaId, SchemaProvider, SchemaResponse, SchemaSetting}, + schema::{PayloadLocation, SchemaId, SchemaInfoResponse, SchemaProvider, SchemaSetting}, stateful_storage::{ ItemizedStoragePageResponse, ItemizedStorageResponse, PageHash, PageId, PaginatedStorageResponse, @@ -697,9 +697,9 @@ impl Pallet { fn check_schema_for_read( schema_id: SchemaId, expected_payload_location: PayloadLocation, - ) -> Result { - let schema = - T::SchemaProvider::get_schema_by_id(schema_id).ok_or(Error::::InvalidSchemaId)?; + ) -> Result { + let schema = T::SchemaProvider::get_schema_info_by_id(schema_id) + .ok_or(Error::::InvalidSchemaId)?; // Ensure that the schema's payload location matches the expected location. ensure!( diff --git a/pallets/stateful-storage/src/tests/mock.rs b/pallets/stateful-storage/src/tests/mock.rs index abfce4f839..aff6be3370 100644 --- a/pallets/stateful-storage/src/tests/mock.rs +++ b/pallets/stateful-storage/src/tests/mock.rs @@ -11,7 +11,10 @@ use common_primitives::{ ProviderId, ProviderLookup, SchemaGrantValidator, }, node::AccountId, - schema::{ModelType, PayloadLocation, SchemaId, SchemaProvider, SchemaResponse, SchemaSetting}, + schema::{ + ModelType, PayloadLocation, SchemaId, SchemaInfoResponse, SchemaProvider, SchemaResponse, + SchemaSetting, + }, }; use frame_support::{ dispatch::DispatchResult, @@ -251,6 +254,17 @@ impl SchemaProvider for SchemaHandler { }), } } + + fn get_schema_info_by_id(schema_id: SchemaId) -> Option { + Self::get_schema_by_id(schema_id).and_then(|schema| { + Some(SchemaInfoResponse { + schema_id: schema.schema_id, + settings: schema.settings, + model_type: schema.model_type, + payload_location: schema.payload_location, + }) + }) + } } impl Clone for MaxPaginatedPageId { diff --git a/pallets/stateful-storage/src/weights.rs b/pallets/stateful-storage/src/weights.rs index 332d612ffc..146c672dd5 100644 --- a/pallets/stateful-storage/src/weights.rs +++ b/pallets/stateful-storage/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_stateful_storage //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-30, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `benchmark-runner-44wtw-bw25f`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! HOSTNAME: `benchmark-runner-44wtw-d4nrm`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("frequency-bench"), DB CACHE: 1024 // Executed Command: @@ -60,8 +60,8 @@ pub trait WeightInfo { /// Weights for pallet_stateful_storage using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: `Schemas::Schemas` (r:1 w:0) - /// Proof: `Schemas::Schemas` (`max_values`: None, `max_size`: Some(65518), added: 67993, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaInfos` (r:1 w:0) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `Msa::PublicKeyToMsaId` (r:1 w:0) /// Proof: `Msa::PublicKeyToMsaId` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// Storage: `Msa::DelegatorAndProviderToDelegation` (r:1 w:0) @@ -71,17 +71,17 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[1, 5121]`. fn apply_item_actions(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `33370` - // Estimated: `77893` - // Minimum execution time: 99_564_000 picoseconds. - Weight::from_parts(97_543_524, 77893) - // Standard Error: 308 - .saturating_add(Weight::from_parts(7_013, 0).saturating_mul(s.into())) + // Measured: `33346` + // Estimated: `45721` + // Minimum execution time: 97_596_000 picoseconds. + Weight::from_parts(95_196_204, 45721) + // Standard Error: 469 + .saturating_add(Weight::from_parts(7_119, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Schemas::Schemas` (r:1 w:0) - /// Proof: `Schemas::Schemas` (`max_values`: None, `max_size`: Some(65518), added: 67993, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaInfos` (r:1 w:0) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `Msa::PublicKeyToMsaId` (r:1 w:0) /// Proof: `Msa::PublicKeyToMsaId` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// Storage: `Msa::DelegatorAndProviderToDelegation` (r:1 w:0) @@ -91,17 +91,17 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[1, 1024]`. fn upsert_page(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `416` - // Estimated: `77893` - // Minimum execution time: 30_143_000 picoseconds. - Weight::from_parts(31_160_288, 77893) - // Standard Error: 198 - .saturating_add(Weight::from_parts(1_041, 0).saturating_mul(s.into())) + // Measured: `392` + // Estimated: `12767` + // Minimum execution time: 29_612_000 picoseconds. + Weight::from_parts(31_961_486, 12767) + // Standard Error: 972 + .saturating_add(Weight::from_parts(596, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Schemas::Schemas` (r:1 w:0) - /// Proof: `Schemas::Schemas` (`max_values`: None, `max_size`: Some(65518), added: 67993, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaInfos` (r:1 w:0) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `Msa::PublicKeyToMsaId` (r:1 w:0) /// Proof: `Msa::PublicKeyToMsaId` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// Storage: `Msa::DelegatorAndProviderToDelegation` (r:1 w:0) @@ -110,61 +110,61 @@ impl WeightInfo for SubstrateWeight { /// Proof: UNKNOWN KEY `0x0763c98381dc89abe38627fe2f98cb7af1577fbf1d628fdddb4ebfc6e8d95fb1` (r:1 w:1) fn delete_page() -> Weight { // Proof Size summary in bytes: - // Measured: `1575` - // Estimated: `77893` - // Minimum execution time: 35_161_000 picoseconds. - Weight::from_parts(36_319_000, 77893) + // Measured: `1551` + // Estimated: `13926` + // Minimum execution time: 32_983_000 picoseconds. + Weight::from_parts(33_821_000, 13926) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Msa::PublicKeyToMsaId` (r:1 w:0) /// Proof: `Msa::PublicKeyToMsaId` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Schemas::Schemas` (r:1 w:0) - /// Proof: `Schemas::Schemas` (`max_values`: None, `max_size`: Some(65518), added: 67993, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaInfos` (r:1 w:0) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0xbd1557c8db6bd8599a811a7175fbc2fc6400` (r:1 w:1) /// Proof: UNKNOWN KEY `0xbd1557c8db6bd8599a811a7175fbc2fc6400` (r:1 w:1) /// The range of component `s` is `[1, 5121]`. fn apply_item_actions_with_signature(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `33377` - // Estimated: `77893` - // Minimum execution time: 157_264_000 picoseconds. - Weight::from_parts(150_585_335, 77893) - // Standard Error: 471 - .saturating_add(Weight::from_parts(14_094, 0).saturating_mul(s.into())) + // Measured: `33353` + // Estimated: `45728` + // Minimum execution time: 155_737_000 picoseconds. + Weight::from_parts(153_426_516, 45728) + // Standard Error: 639 + .saturating_add(Weight::from_parts(12_619, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Msa::PublicKeyToMsaId` (r:1 w:0) /// Proof: `Msa::PublicKeyToMsaId` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Schemas::Schemas` (r:1 w:0) - /// Proof: `Schemas::Schemas` (`max_values`: None, `max_size`: Some(65518), added: 67993, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaInfos` (r:1 w:0) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x0763c98381dc89abe38627fe2f98cb7af1577fbf1d628fdddb4ebfc6e8d95fb1` (r:1 w:1) /// Proof: UNKNOWN KEY `0x0763c98381dc89abe38627fe2f98cb7af1577fbf1d628fdddb4ebfc6e8d95fb1` (r:1 w:1) /// The range of component `s` is `[1, 1024]`. fn upsert_page_with_signature(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `349` - // Estimated: `77893` - // Minimum execution time: 84_281_000 picoseconds. - Weight::from_parts(86_715_032, 77893) - // Standard Error: 637 - .saturating_add(Weight::from_parts(6_519, 0).saturating_mul(s.into())) + // Measured: `325` + // Estimated: `12700` + // Minimum execution time: 83_880_000 picoseconds. + Weight::from_parts(85_475_027, 12700) + // Standard Error: 357 + .saturating_add(Weight::from_parts(5_950, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Msa::PublicKeyToMsaId` (r:1 w:0) /// Proof: `Msa::PublicKeyToMsaId` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Schemas::Schemas` (r:1 w:0) - /// Proof: `Schemas::Schemas` (`max_values`: None, `max_size`: Some(65518), added: 67993, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaInfos` (r:1 w:0) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x0763c98381dc89abe38627fe2f98cb7af1577fbf1d628fdddb4ebfc6e8d95fb1` (r:1 w:1) /// Proof: UNKNOWN KEY `0x0763c98381dc89abe38627fe2f98cb7af1577fbf1d628fdddb4ebfc6e8d95fb1` (r:1 w:1) fn delete_page_with_signature() -> Weight { // Proof Size summary in bytes: - // Measured: `1508` - // Estimated: `77893` - // Minimum execution time: 87_878_000 picoseconds. - Weight::from_parts(89_132_000, 77893) + // Measured: `1484` + // Estimated: `13859` + // Minimum execution time: 87_068_000 picoseconds. + Weight::from_parts(87_902_000, 13859) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -172,8 +172,8 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: `Schemas::Schemas` (r:1 w:0) - /// Proof: `Schemas::Schemas` (`max_values`: None, `max_size`: Some(65518), added: 67993, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaInfos` (r:1 w:0) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `Msa::PublicKeyToMsaId` (r:1 w:0) /// Proof: `Msa::PublicKeyToMsaId` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// Storage: `Msa::DelegatorAndProviderToDelegation` (r:1 w:0) @@ -183,17 +183,17 @@ impl WeightInfo for () { /// The range of component `s` is `[1, 5121]`. fn apply_item_actions(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `33370` - // Estimated: `77893` - // Minimum execution time: 99_564_000 picoseconds. - Weight::from_parts(97_543_524, 77893) - // Standard Error: 308 - .saturating_add(Weight::from_parts(7_013, 0).saturating_mul(s.into())) + // Measured: `33346` + // Estimated: `45721` + // Minimum execution time: 97_596_000 picoseconds. + Weight::from_parts(95_196_204, 45721) + // Standard Error: 469 + .saturating_add(Weight::from_parts(7_119, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Schemas::Schemas` (r:1 w:0) - /// Proof: `Schemas::Schemas` (`max_values`: None, `max_size`: Some(65518), added: 67993, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaInfos` (r:1 w:0) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `Msa::PublicKeyToMsaId` (r:1 w:0) /// Proof: `Msa::PublicKeyToMsaId` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// Storage: `Msa::DelegatorAndProviderToDelegation` (r:1 w:0) @@ -203,17 +203,17 @@ impl WeightInfo for () { /// The range of component `s` is `[1, 1024]`. fn upsert_page(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `416` - // Estimated: `77893` - // Minimum execution time: 30_143_000 picoseconds. - Weight::from_parts(31_160_288, 77893) - // Standard Error: 198 - .saturating_add(Weight::from_parts(1_041, 0).saturating_mul(s.into())) + // Measured: `392` + // Estimated: `12767` + // Minimum execution time: 29_612_000 picoseconds. + Weight::from_parts(31_961_486, 12767) + // Standard Error: 972 + .saturating_add(Weight::from_parts(596, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Schemas::Schemas` (r:1 w:0) - /// Proof: `Schemas::Schemas` (`max_values`: None, `max_size`: Some(65518), added: 67993, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaInfos` (r:1 w:0) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `Msa::PublicKeyToMsaId` (r:1 w:0) /// Proof: `Msa::PublicKeyToMsaId` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// Storage: `Msa::DelegatorAndProviderToDelegation` (r:1 w:0) @@ -222,61 +222,61 @@ impl WeightInfo for () { /// Proof: UNKNOWN KEY `0x0763c98381dc89abe38627fe2f98cb7af1577fbf1d628fdddb4ebfc6e8d95fb1` (r:1 w:1) fn delete_page() -> Weight { // Proof Size summary in bytes: - // Measured: `1575` - // Estimated: `77893` - // Minimum execution time: 35_161_000 picoseconds. - Weight::from_parts(36_319_000, 77893) + // Measured: `1551` + // Estimated: `13926` + // Minimum execution time: 32_983_000 picoseconds. + Weight::from_parts(33_821_000, 13926) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Msa::PublicKeyToMsaId` (r:1 w:0) /// Proof: `Msa::PublicKeyToMsaId` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Schemas::Schemas` (r:1 w:0) - /// Proof: `Schemas::Schemas` (`max_values`: None, `max_size`: Some(65518), added: 67993, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaInfos` (r:1 w:0) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0xbd1557c8db6bd8599a811a7175fbc2fc6400` (r:1 w:1) /// Proof: UNKNOWN KEY `0xbd1557c8db6bd8599a811a7175fbc2fc6400` (r:1 w:1) /// The range of component `s` is `[1, 5121]`. fn apply_item_actions_with_signature(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `33377` - // Estimated: `77893` - // Minimum execution time: 157_264_000 picoseconds. - Weight::from_parts(150_585_335, 77893) - // Standard Error: 471 - .saturating_add(Weight::from_parts(14_094, 0).saturating_mul(s.into())) + // Measured: `33353` + // Estimated: `45728` + // Minimum execution time: 155_737_000 picoseconds. + Weight::from_parts(153_426_516, 45728) + // Standard Error: 639 + .saturating_add(Weight::from_parts(12_619, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Msa::PublicKeyToMsaId` (r:1 w:0) /// Proof: `Msa::PublicKeyToMsaId` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Schemas::Schemas` (r:1 w:0) - /// Proof: `Schemas::Schemas` (`max_values`: None, `max_size`: Some(65518), added: 67993, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaInfos` (r:1 w:0) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x0763c98381dc89abe38627fe2f98cb7af1577fbf1d628fdddb4ebfc6e8d95fb1` (r:1 w:1) /// Proof: UNKNOWN KEY `0x0763c98381dc89abe38627fe2f98cb7af1577fbf1d628fdddb4ebfc6e8d95fb1` (r:1 w:1) /// The range of component `s` is `[1, 1024]`. fn upsert_page_with_signature(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `349` - // Estimated: `77893` - // Minimum execution time: 84_281_000 picoseconds. - Weight::from_parts(86_715_032, 77893) - // Standard Error: 637 - .saturating_add(Weight::from_parts(6_519, 0).saturating_mul(s.into())) + // Measured: `325` + // Estimated: `12700` + // Minimum execution time: 83_880_000 picoseconds. + Weight::from_parts(85_475_027, 12700) + // Standard Error: 357 + .saturating_add(Weight::from_parts(5_950, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Msa::PublicKeyToMsaId` (r:1 w:0) /// Proof: `Msa::PublicKeyToMsaId` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Schemas::Schemas` (r:1 w:0) - /// Proof: `Schemas::Schemas` (`max_values`: None, `max_size`: Some(65518), added: 67993, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaInfos` (r:1 w:0) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x0763c98381dc89abe38627fe2f98cb7af1577fbf1d628fdddb4ebfc6e8d95fb1` (r:1 w:1) /// Proof: UNKNOWN KEY `0x0763c98381dc89abe38627fe2f98cb7af1577fbf1d628fdddb4ebfc6e8d95fb1` (r:1 w:1) fn delete_page_with_signature() -> Weight { // Proof Size summary in bytes: - // Measured: `1508` - // Estimated: `77893` - // Minimum execution time: 87_878_000 picoseconds. - Weight::from_parts(89_132_000, 77893) + // Measured: `1484` + // Estimated: `13859` + // Minimum execution time: 87_068_000 picoseconds. + Weight::from_parts(87_902_000, 13859) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/runtime/frequency/src/lib.rs b/runtime/frequency/src/lib.rs index 0f78d6a7ed..4902df6526 100644 --- a/runtime/frequency/src/lib.rs +++ b/runtime/frequency/src/lib.rs @@ -220,7 +220,10 @@ pub type Executive = frame_executive::Executive< frame_system::ChainContext, Runtime, AllPalletsWithSystem, - (pallet_messages::migration::v2::MigrateToV2,), + ( + pallet_messages::migration::v2::MigrateToV2, + pallet_schemas::migration::v2::MigrateToV2, + ), >; /// Opaque types. These are used by the CLI to instantiate machinery that don't need to know @@ -258,7 +261,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("frequency"), impl_name: create_runtime_str!("frequency"), authoring_version: 1, - spec_version: 61, + spec_version: 62, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -272,7 +275,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("frequency-rococo"), impl_name: create_runtime_str!("frequency"), authoring_version: 1, - spec_version: 61, + spec_version: 62, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From 8f328eca9b6e2df79fab9fefd9010862ba445e59 Mon Sep 17 00:00:00 2001 From: Wil Wade Date: Mon, 13 Nov 2023 16:01:12 -0500 Subject: [PATCH 4/9] Only publish the npm package api-augment to latest on full releases (#1777) # Goal The goal of this PR is to update CI publishing of Api-Augment on RC releases. Closes #1776 --- .github/workflows/release.yml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 3464b34452..e264313507 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1031,8 +1031,17 @@ jobs: run: npm version --new-version "${{env.NEW_RELEASE_TAG}}" --no-git-tag-version working-directory: js/api-augment/dist - name: Release on NPM @latest - if: env.TEST_RUN != 'true' + if: env.TEST_RUN != 'true' && + steps.is-full-release.outputs.is-full-release == 'true' run: npm publish --tag latest --access public working-directory: ./js/api-augment/dist env: NODE_AUTH_TOKEN: ${{secrets.NODE_AUTH_TOKEN}} + - name: Release Candidate on NPM + if: env.TEST_RUN != 'true' && + steps.is-full-release.outputs.is-full-release != 'true' + run: npm publish --access public + working-directory: ./js/api-augment/dist + env: + NODE_AUTH_TOKEN: ${{secrets.NODE_AUTH_TOKEN}} + From ff609fd9f3206752e7851675c600216caf56e51f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 14 Nov 2023 08:13:05 -0500 Subject: [PATCH 5/9] Bump actions/github-script from 6 to 7 (#1783) Bumps [actions/github-script](https://github.com/actions/github-script) from 6 to 7. --- .github/workflows/merge-pr.yml | 2 +- .github/workflows/publish-dev-ci-base-image.yml | 2 +- .github/workflows/release.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/merge-pr.yml b/.github/workflows/merge-pr.yml index 6c80bc3bff..85a8599bc5 100644 --- a/.github/workflows/merge-pr.yml +++ b/.github/workflows/merge-pr.yml @@ -100,7 +100,7 @@ jobs: username: ${{github.actor}} password: ${{secrets.GITHUB_TOKEN}} - name: Sanitize repo owner slug - uses: actions/github-script@v6 + uses: actions/github-script@v7 id: repo_slug with: result-encoding: string diff --git a/.github/workflows/publish-dev-ci-base-image.yml b/.github/workflows/publish-dev-ci-base-image.yml index 9a05a2c1ad..8836c774d2 100644 --- a/.github/workflows/publish-dev-ci-base-image.yml +++ b/.github/workflows/publish-dev-ci-base-image.yml @@ -35,7 +35,7 @@ jobs: username: ${{github.actor}} password: ${{secrets.GITHUB_TOKEN}} - name: Sanitize repo owner slug - uses: actions/github-script@v6 + uses: actions/github-script@v7 id: repo_slug with: result-encoding: string diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e264313507..4aa05a49c9 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -786,7 +786,7 @@ jobs: message: Latest full release - name: Dispatch Auto-signer Workflow if: env.TEST_RUN != 'true' - uses: actions/github-script@v6 + uses: actions/github-script@v7 with: github-token: ${{secrets.GHA_WORKFLOW_TRIGGER}} script: | From 0497350b10f4507422cf1afecc140c81adc96da5 Mon Sep 17 00:00:00 2001 From: Joe Caputo Date: Tue, 14 Nov 2023 15:55:06 -0500 Subject: [PATCH 6/9] feat: change instant-seal node docker image to use startup script (#1781) # Goal The goal of this PR is to allow the use of interval sealing mode in CI pipelines using GitHub workflows. The following considerations were taken into account: * Since GitHub workflows do not allow overriding the `CMD` of a Docker image, and overriding `ENTRYPOINT` in GitHub likewise does not allow for passing arguments to the new entrypoint, the simplest option left is to use environment variables. * Since it was deemed undesirable to modify the frequency binary itself to honor environment variables, the approach taken was to create a new image, `frequencychain/standalone-node`. The `ENTRYPOINT` of the image is a shell script instead of the `frequency` binary directly. The shell script takes into account the following variables: * `SEALING_MODE`: `instant`, `interval`, or `manual` * `SEALING_INTERVAL`: number of seconds between blocks in `interval` sealing mode * `CREATE_EMPTY_BLOCKS`: `true` means pass `--sealing-create-empty-blocks` to allow interval sealing mode to form empty blocks as in a live chain. Additional arguments passed to the script are passed through to the underlying `frequency` command-line, just as the `CMD` parameter in the old image. Existing use cases of this image should therefore be unaffected. * The existing `frequencychain/instant-seal-node` image is now deprecated and will eventually be discontinued/removed. Closes #1780 # Discussion # Checklist - [x] Doc(s) updated - [x] Tested new image for all sealing modes: `instant`, `interval`, `manual` --------- Co-authored-by: Wil Wade --- .github/workflows/release.yml | 6 +- .github/workflows/verify-pr-commit.yml | 8 +++ docker/frequency-start.sh | 31 +++++++++++ docker/instant-seal-node.overview.md | 7 +++ docker/standalone-node.dockerfile | 34 ++++++++++++ docker/standalone-node.overview.md | 77 ++++++++++++++++++++++++++ 6 files changed, 162 insertions(+), 1 deletion(-) create mode 100755 docker/frequency-start.sh create mode 100644 docker/standalone-node.dockerfile create mode 100644 docker/standalone-node.overview.md diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 4aa05a49c9..5c0cfdb5dc 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -896,7 +896,7 @@ jobs: fail-fast: true matrix: arch: [amd64] - node: [collator-node-local, instant-seal-node] + node: [collator-node-local, instant-seal-node, standalone-node] include: - node: collator-node-local network: local @@ -906,6 +906,10 @@ jobs: network: dev build-profile: release release-file-name-prefix: frequency-dev + - node: standalone-node + network: dev + build-profile: release + release-file-name-prefix: frequency-dev - arch: amd64 build-profile: release docker-platform: linux/amd64 diff --git a/.github/workflows/verify-pr-commit.yml b/.github/workflows/verify-pr-commit.yml index 78b50488de..8f93a6e204 100644 --- a/.github/workflows/verify-pr-commit.yml +++ b/.github/workflows/verify-pr-commit.yml @@ -488,6 +488,14 @@ jobs: context: . push: false file: ./docker/${{env.IMAGE_NAME}}.dockerfile + - name: Build collator standalone + env: + IMAGE_NAME: standalone-node + uses: docker/build-push-action@v5 + with: + context: . + push: false + file: ./docker/${{env.IMAGE_NAME}}.dockerfile - name: Build collator image for local relay chain env: IMAGE_NAME: collator-node-local diff --git a/docker/frequency-start.sh b/docker/frequency-start.sh new file mode 100755 index 0000000000..80a4d414f5 --- /dev/null +++ b/docker/frequency-start.sh @@ -0,0 +1,31 @@ +#!/bin/sh + +if [ -z "${SEALING_MODE}" ] +then + SEALING_MODE=instant +fi + +if [ -n "${SEALING_INTERVAL}" ] +then + SEALING_INTERVAL="--sealing-interval=${SEALING_INTERVAL}" +fi + +if [ "${CREATE_EMPTY_BLOCKS}" = true ] +then + CREATE_EMPTY_BLOCKS="--sealing-create-empty-blocks" +fi + +exec /frequency/frequency \ + --dev \ + -lruntime=debug \ + --no-telemetry \ + --no-prometheus \ + --rpc-port=9944 \ + --rpc-external \ + --rpc-cors=all \ + --rpc-methods=Unsafe \ + --base-path=/data \ + --sealing=${SEALING_MODE} \ + ${SEALING_INTERVAL} \ + ${CREATE_EMPTY_BLOCKS} \ + $* diff --git a/docker/instant-seal-node.overview.md b/docker/instant-seal-node.overview.md index 58d7521931..2b77a413c6 100644 --- a/docker/instant-seal-node.overview.md +++ b/docker/instant-seal-node.overview.md @@ -1,5 +1,12 @@ # Frequency Collator Node in Local Only Sealing Mode +## Deprecation Note + +This image has been deprecated and will be removed in a future Frequency release. The new image, [frequencychain/standalone-node](./standalone-node.overview.md), replaces it. Current usage patterns for this image are fully supported by the new image, as well as additional enhancements. +## + +### Description + Runs just one collator node that will not connect to any other nodes. Defaults to running in instant sealing mode where a block will be triggered when a transaction enters the validated transaction pool. A "collator node" is a Frequency parachain node that is actively collating (aka forming blocks to submit to the relay chain, although in this case without a relay chain). diff --git a/docker/standalone-node.dockerfile b/docker/standalone-node.dockerfile new file mode 100644 index 0000000000..f5d8a5f614 --- /dev/null +++ b/docker/standalone-node.dockerfile @@ -0,0 +1,34 @@ +# Docker image for running Frequency parachain node container (with collating) +# locally as a standalone node. Requires to run from repository root and to copy +# the binary in the build folder. +# This is the build stage for Polkadot. Here we create the binary in a temporary image. +FROM --platform=linux/amd64 ubuntu:20.04 AS base + +LABEL maintainer="Frequency" +LABEL description="Frequency standalone node" + +RUN apt-get update && apt-get install -y ca-certificates && update-ca-certificates + +# This is the 2nd stage: a very small image where we copy the Frequency binary +FROM --platform=linux/amd64 ubuntu:20.04 + +RUN useradd -m -u 1000 -U -s /bin/sh -d /frequency frequency && \ + mkdir -p /data /frequency/.local/share && \ + chown -R frequency:frequency /data && \ + ln -s /data /frequency/.local/share/frequency + +USER frequency + +COPY --from=base /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt +# For local testing only +# COPY --chown=frequency target/x86_64-unknown-linux-gnu/debug/frequency ./frequency/frequency +COPY --chown=frequency target/release/frequency ./frequency/ +COPY --chown=frequency docker/frequency-start.sh ./frequency/ +RUN chmod +x ./frequency/frequency ./frequency/frequency-start.sh + +# 9944 for RPC call +EXPOSE 9944 + +VOLUME ["/data"] + +ENTRYPOINT [ "/frequency/frequency-start.sh" ] diff --git a/docker/standalone-node.overview.md b/docker/standalone-node.overview.md new file mode 100644 index 0000000000..41a2156158 --- /dev/null +++ b/docker/standalone-node.overview.md @@ -0,0 +1,77 @@ +# Frequency Collator Node in Local Only Sealing Mode + +Runs just one collator node that will not connect to any other nodes. +Defaults to running in instant sealing mode where a block will be triggered when a transaction enters the validated transaction pool. +A "collator node" is a Frequency parachain node that is actively collating (aka forming blocks to submit to the relay chain, although in this case without a relay chain). + +### Quick Run + +```sh +docker run --rm -p 9944:9944 frequencychain/standalone-node: +``` + + +## Trigger Block Manually + +If running in manual sealing mode or to form empty blocks in instant sealing mode, the `engine_createBlock` RPC can be used: + +```sh +curl http://localhost:9944 -H "Content-Type:application/json;charset=utf-8" -d '{ \ + "jsonrpc":"2.0", \ + "id":1, \ + "method":"engine_createBlock", \ + "params": [true, true] \ + }' +``` + + +## Default Arguments + +| Argument | Description | +| --- | --- | +| `--dev` | Specify the development chain. This flag sets `--chain=dev`, `--force-authoring`, `--rpc-cors=all`, `--alice`, and `--tmp` flags, unless explicitly overridden| +| `-lruntime=debug` | Sets a the custom logging filter for the 'runtime' target to 'debug'. Syntax is `=`, e.g. -lsync=debug. Log levels (least to most verbose) are error, warn, info, debug, and trace. By default, all targets log `info`. The global log level can be set with `-l`| +| `--no-telemetry` | Disable connecting to the Substrate telemetry server. Telemetry is on by default on global chains | +| `--no-prometheus` | Do not expose a Prometheus exporter endpoint. Prometheus metric endpoint is enabled by default | +| `--rpc-port=9944` | Specify JSON-RPC server TCP port | +| `--rpc-external`| Listen to all RPC interfaces. Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC proxy server to filter out dangerous methods. More details: . Use `--unsafe-rpc-external` to suppress the warning if you understand the risks| +| `--rpc-cors=all` | Specify browser Origins allowed to access the HTTP & WS RPC servers. A comma-separated list of origins (protocol://domain or special `null` value). Value of `all` will disable origin validation. Default is to allow localhost and origins. When running in --dev mode the default is to allow all origins | +| `--rpc-methods=Unsafe` | RPC methods to expose | +| `--base-path=/data` | Specify custom base path | +| `--sealing=instant` | Manual sealing + automatically form a block each time a transaction enters the validated transaction pool | + +### Run + +Note: Docker `--rm` removes the volume when stopped. + +```sh +docker run --rm -p 9944:9944 frequencychain/standalone-node: +``` + +## Environment Variables + +The following environment variables are supported by this image. The same behavior may be requested by overriding the command line arguments in the `CMD` of the container; however, certain use cases (GitHub Actions) do not support overriding `CMD` when instantiating a container-based service in a workflow. In such a case, injecting these environment variables is a viable workaround. + +| Environmnet Variable | Possible Values | Description | +| --- | --- | --- | +| `SEALING_MODE` | `instant`, `interval`, `manual` | Overrides `--sealing=SEALING_MODE` | +| `SEALING_INTERVAL` | integer > 0 | Adds `--sealing-interval=SEALING_INTERVAL`. The sealing interval in seconds (in `interval` sealing mode) | +| `CREATE_EMPTY_BLOCKS` | `true` | Add `--sealing-create-empty-blocks`. Create empty blocks in interval sealing modes | + + +## Overriding Arguments + +| Argument | Description | +| --- | --- | +| `--sealing=manual` | Only form a block when `engine_createBlock` RPC is called | +| `--help` | See all the options possible | + +### Run + +```sh +docker run --rm -p 9944:9944 frequencychain/standalone-node: -- --manual-seal +``` + +| **Node** | **Ports** | **Explorer URL** | +| ----------------------- | :-------------------------------: | ----------------------------------------------------------------------------------------- | +| Frequency Local-Only Node | ws and rpc :`9944` | [127.0.0.1:9944](https://polkadot.js.org/apps/?rpc=ws%3A%2F%2F127.0.0.1%3A9944#/explorer) | From 36c4e4b5e65c4aaf16bf2a3a7d6c90f294f930af Mon Sep 17 00:00:00 2001 From: eNddy Date: Tue, 14 Nov 2023 15:39:54 -0800 Subject: [PATCH 7/9] chore(*): refactor cargo crate dep (#1770) # Goal Refactor cargo crate dependencies into the workspace. This facilitates updating Polkadot versions. issue #1751 Co-authored-by: Enddy Dumbrique --- Cargo.lock | 10 -- Cargo.toml | 138 ++++++++++++++++ common/helpers/Cargo.toml | 12 +- common/primitives/Cargo.toml | 28 ++-- common/primitives/src/handles.rs | 2 +- common/primitives/src/macros.rs | 4 +- common/primitives/src/messages.rs | 2 +- common/primitives/src/msa.rs | 10 +- .../src/parquet/column_compression_codec.rs | 2 +- common/primitives/src/rpc.rs | 2 +- common/primitives/src/schema.rs | 2 +- common/primitives/src/stateful_storage.rs | 2 +- common/primitives/src/utils.rs | 2 +- node/Cargo.toml | 2 +- node/cli-opt/Cargo.toml | 2 +- node/cli/Cargo.toml | 75 +++++---- node/service/Cargo.toml | 148 +++++++++--------- node/service/src/block_sealing.rs | 2 +- node/service/src/rpc/tests/mod.rs | 2 +- pallets/capacity/Cargo.toml | 30 ++-- pallets/capacity/src/benchmarking.rs | 2 +- pallets/capacity/src/types.rs | 2 +- pallets/frequency-tx-payment/Cargo.toml | 28 ++-- pallets/frequency-tx-payment/src/lib.rs | 2 +- .../frequency-tx-payment/src/rpc/Cargo.toml | 28 ++-- .../frequency-tx-payment/src/rpc/src/lib.rs | 2 +- .../src/runtime-api/Cargo.toml | 12 +- .../src/runtime-api/src/lib.rs | 2 +- pallets/handles/Cargo.toml | 32 ++-- pallets/handles/src/handles-utils/Cargo.toml | 8 +- .../handles/src/handles_signed_extension.rs | 2 +- pallets/handles/src/rpc/Cargo.toml | 22 ++- pallets/handles/src/runtime-api/Cargo.toml | 8 +- .../handles/src/tests/handle_change_tests.rs | 2 +- .../src/tests/handle_creation_tests.rs | 2 +- .../src/tests/handle_retirements_tests.rs | 2 +- .../src/tests/handles_replay_attack_test.rs | 2 +- pallets/handles/src/tests/mock.rs | 2 +- pallets/messages/Cargo.toml | 28 ++-- pallets/messages/src/lib.rs | 2 +- pallets/messages/src/rpc/Cargo.toml | 18 +-- pallets/messages/src/runtime-api/Cargo.toml | 9 +- pallets/messages/src/tests/mock.rs | 2 +- pallets/messages/src/tests/other_tests.rs | 2 +- pallets/messages/src/types.rs | 2 +- pallets/msa/Cargo.toml | 37 +++-- pallets/msa/src/lib.rs | 2 +- pallets/msa/src/rpc/Cargo.toml | 22 +-- pallets/msa/src/rpc/src/lib.rs | 2 +- pallets/msa/src/runtime-api/Cargo.toml | 10 +- pallets/msa/src/runtime-api/src/lib.rs | 2 +- pallets/msa/src/tests/mock.rs | 2 +- pallets/msa/src/types.rs | 2 +- pallets/schemas/Cargo.toml | 36 ++--- pallets/schemas/src/rpc/Cargo.toml | 24 ++- pallets/schemas/src/runtime-api/Cargo.toml | 12 +- pallets/schemas/src/tests/mock.rs | 2 +- pallets/schemas/src/types.rs | 2 +- pallets/stateful-storage/Cargo.toml | 32 ++-- pallets/stateful-storage/src/benchmarking.rs | 2 +- pallets/stateful-storage/src/rpc/Cargo.toml | 24 ++- .../src/runtime-api/Cargo.toml | 11 +- .../src/stateful_child_tree.rs | 11 +- pallets/stateful-storage/src/test_common.rs | 2 +- .../src/tests/apply_item_actions_tests.rs | 2 +- .../src/tests/delete_page_tests.rs | 2 +- .../src/tests/itemized_operations_tests.rs | 2 +- pallets/stateful-storage/src/tests/mock.rs | 2 +- .../stateful-storage/src/tests/other_tests.rs | 2 +- .../src/tests/upsert_page_tests.rs | 2 +- pallets/stateful-storage/src/types.rs | 2 +- pallets/time-release/Cargo.toml | 24 +-- pallets/time-release/src/types.rs | 2 +- runtime/common/Cargo.toml | 44 +++--- runtime/common/src/constants.rs | 2 +- runtime/common/src/extensions/check_nonce.rs | 2 +- runtime/frequency/Cargo.toml | 106 +++++++------ runtime/frequency/src/lib.rs | 6 +- runtime/system-runtime-api/Cargo.toml | 16 +- 79 files changed, 634 insertions(+), 519 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7db51ad47f..e5ccfe2c8f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3729,7 +3729,6 @@ dependencies = [ "pallet-handles", "pallet-msa", "pallet-transaction-payment", - "parity-scale-codec", "polkadot-cli", "polkadot-parachain-primitives", "polkadot-primitives", @@ -6910,7 +6909,6 @@ dependencies = [ "frame-support", "jsonrpsee", "pallet-handles-runtime-api", - "parity-scale-codec", "rayon", "sc-client-api", "sp-api", @@ -6926,7 +6924,6 @@ name = "pallet-handles-runtime-api" version = "0.0.0" dependencies = [ "common-primitives", - "parity-scale-codec", "sp-api", "sp-std", ] @@ -7053,7 +7050,6 @@ dependencies = [ "frame-support", "jsonrpsee", "pallet-messages-runtime-api", - "parity-scale-codec", "sc-client-api", "sp-api", "sp-blockchain", @@ -7068,7 +7064,6 @@ version = "0.0.0" dependencies = [ "common-primitives", "frame-support", - "parity-scale-codec", "sp-api", ] @@ -7404,7 +7399,6 @@ dependencies = [ "frame-support", "jsonrpsee", "pallet-schemas-runtime-api", - "parity-scale-codec", "sc-client-api", "sp-api", "sp-blockchain", @@ -7422,7 +7416,6 @@ version = "0.0.0" dependencies = [ "common-primitives", "frame-support", - "parity-scale-codec", "sp-api", "sp-runtime", "sp-std", @@ -7584,7 +7577,6 @@ dependencies = [ "frame-support", "jsonrpsee", "pallet-stateful-storage-runtime-api", - "parity-scale-codec", "sc-client-api", "sp-api", "sp-blockchain", @@ -7602,7 +7594,6 @@ version = "0.0.0" dependencies = [ "common-primitives", "frame-support", - "parity-scale-codec", "sp-api", "sp-runtime", ] @@ -13593,7 +13584,6 @@ dependencies = [ "common-primitives", "frame-support", "frame-system", - "parity-scale-codec", "serde_json", "sp-api", "sp-runtime", diff --git a/Cargo.toml b/Cargo.toml index af9142b7c5..b699550aab 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,144 @@ members = [ ] resolver = "2" +[workspace.dependencies] +log = { version = "0.4.17", default-features = false } +env_logger = "0.10.0" +twox-hash = { version = "1.6.3", default-features = false, features = ["digest_0_10"]} +thiserror = "1.0.40" +apache-avro = { version = "0.14.0", default-features = false } +rand = "0.8.5" + +# substrate wasm +parity-scale-codec = { version = "3.6.1", default-features = false } +frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +frame-executive = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +frame-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +frame-system-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +frame-try-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +sp-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +sp-io = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +sp-session = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +sp-weights = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +sp-offchain = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", +] } +sp-keystore = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +sp-keyring = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +sp-version = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +chrono = { version = "0.4.24" } +pretty_assertions = { version = "1.3.0" } +smallvec = "1.11.0" +numtoa = "0.2.4" +enumflags2 = "0.7.7" +serde = { version = "1.0", default-features = false } +serial_test = { version = "0.9.0", default-features = false } + +# substrate pallets +pallet-aura = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +pallet-authorship = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +pallet-collective = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +pallet-democracy = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +pallet-multisig = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +pallet-preimage = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +pallet-scheduler = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +pallet-session = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +pallet-sudo = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +pallet-treasury = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +pallet-utility = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } + +# polkadot +polkadot-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +polkadot-parachain-primitives = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +polkadot-runtime-common = {git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +polkadot-primitives = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +polkadot-service = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } + +# cumulus +cumulus-client-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-client-collator = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-client-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-client-consensus-common = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-client-consensus-proposer = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-client-network = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-client-service = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-relay-chain-inprocess-interface = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-relay-chain-interface = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-relay-chain-minimal-node = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-relay-chain-rpc-interface = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } + +cumulus-pallet-aura-ext = {git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +cumulus-pallet-parachain-system = {git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +cumulus-pallet-session-benchmarking = {git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +cumulus-primitives-aura = {git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +cumulus-primitives-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +cumulus-primitives-parachain-inherent = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-primitives-timestamp = {git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +pallet-collator-selection = {git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +parachain-info = {git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } + +# client +derive_more = "0.99.17" +futures = "0.3.25" +hex = "0.4.3" +hex-literal = "0.4.1" +impl-serde = { version = "0.4.0", default-features = false } +jsonrpsee = { version = "0.16.2", default-features = false } +oorandom = "11.1.3" +phf = { version = "0.11", default-features = false, features = ["macros"] } +rayon = "1.5.3" +serde_json = { version = "1.0.86", default-features = false } +tokio = { version = "1.25.0", default-features = false } +unicode-normalization = { version = "0.1.22", default-features = false } +clap = { version = "4.2.5", features = ["derive"] } + +frame-benchmarking-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-basic-authorship = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-chain-spec = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-client-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-client-db = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-consensus = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-consensus-manual-seal = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-executor = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +sc-keystore = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-network = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-network-common = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-network-sync = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-offchain = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-rpc = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-service = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-sysinfo = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-telemetry = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-tracing = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sc-transaction-pool-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-block-builder = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +sp-blockchain = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +sp-consensus = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +sp-inherents = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +sp-rpc = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +sp-timestamp = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +sp-wasm-interface = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +substrate-build-script-utils = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +substrate-frame-rpc-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +substrate-prometheus-endpoint = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +substrate-test-runtime-client = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false } +try-runtime-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } + [profile.release] panic = "unwind" lto = true diff --git a/common/helpers/Cargo.toml b/common/helpers/Cargo.toml index 6e84c26f5f..7094698c61 100644 --- a/common/helpers/Cargo.toml +++ b/common/helpers/Cargo.toml @@ -13,12 +13,12 @@ version = "0.0.0" targets = ['x86_64-unknown-linux-gnu'] [dependencies] -apache-avro = { version = "0.14.0", features = ["snappy"] } -thiserror = "1.0.40" -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-io = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } +apache-avro = { workspace = true, features = ["snappy"] } +thiserror = { workspace = true } +jsonrpsee = { workspace = true, features = ["client-core", "server", "macros"] } +sp-runtime = { workspace = true } +sp-api = { workspace = true } +sp-io = { workspace = true } common-primitives = { path = "../primitives", default-features = false } [features] diff --git a/common/primitives/Cargo.toml b/common/primitives/Cargo.toml index 522e97762e..e95e00300a 100644 --- a/common/primitives/Cargo.toml +++ b/common/primitives/Cargo.toml @@ -13,31 +13,31 @@ version = "0.0.0" targets = ['x86_64-unknown-linux-gnu'] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +parity-scale-codec = { workspace = true, features = [ "derive", ] } -frame-support = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -frame-system = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -impl-serde = { version = "0.4.0", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = [ +frame-support = { workspace = true } +frame-system = { workspace = true } +impl-serde = { workspace = true } +scale-info = { workspace = true, features = [ "derive", ] } -serde = { version = "1.0", default-features = false, features = ["derive"] } -serde_json = { version = "1.0.107", default-features = false, features = [ +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true, features = [ "alloc", ] } -enumflags2 = "0.7.7" -smallvec = "1.11.0" -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } +enumflags2 = { workspace = true } +smallvec = { workspace = true } +sp-api = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] default = ['std'] runtime-benchmarks = [] std = [ - 'codec/std', + 'parity-scale-codec/std', 'frame-support/std', 'frame-system/std', 'sp-std/std', diff --git a/common/primitives/src/handles.rs b/common/primitives/src/handles.rs index d7af1d4bab..b20aa011c7 100644 --- a/common/primitives/src/handles.rs +++ b/common/primitives/src/handles.rs @@ -1,8 +1,8 @@ use crate::msa::MessageSourceId; #[cfg(feature = "std")] use crate::utils::*; -use codec::{Decode, Encode}; use frame_support::BoundedVec; +use parity_scale_codec::{Decode, Encode}; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; use sp_core::ConstU32; diff --git a/common/primitives/src/macros.rs b/common/primitives/src/macros.rs index ed7514f35f..529cf5198c 100644 --- a/common/primitives/src/macros.rs +++ b/common/primitives/src/macros.rs @@ -50,9 +50,9 @@ macro_rules! impl_codec_bitflags { } impl EncodeLike for $wrapper {} impl Decode for $wrapper { - fn decode( + fn decode( input: &mut I, - ) -> sp_std::result::Result { + ) -> sp_std::result::Result { let field = <$size>::decode(input)?; Ok(Self(BitFlags::from_bits(field as $size).map_err(|_| "invalid value")?)) } diff --git a/common/primitives/src/messages.rs b/common/primitives/src/messages.rs index a6766eeac1..263809d3d0 100644 --- a/common/primitives/src/messages.rs +++ b/common/primitives/src/messages.rs @@ -1,7 +1,7 @@ #[cfg(feature = "std")] use crate::utils; use crate::{msa::MessageSourceId, node::BlockNumber}; -use codec::{Decode, Encode}; +use parity_scale_codec::{Decode, Encode}; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; diff --git a/common/primitives/src/msa.rs b/common/primitives/src/msa.rs index fd7cab77e6..cc03c21522 100644 --- a/common/primitives/src/msa.rs +++ b/common/primitives/src/msa.rs @@ -1,5 +1,5 @@ -use codec::{Decode, Encode, EncodeLike, Error, MaxEncodedLen}; use frame_support::{dispatch::DispatchResult, traits::Get, BoundedBTreeMap, BoundedVec}; +use parity_scale_codec::{Decode, Encode, EncodeLike, Error, MaxEncodedLen}; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; @@ -30,7 +30,9 @@ impl Encode for DelegatorId { } impl Decode for DelegatorId { - fn decode(input: &mut I) -> Result { + fn decode( + input: &mut I, + ) -> Result { match ::decode(input) { Ok(x) => Ok(DelegatorId(x)), _ => Err(Error::from("Could not decode DelegatorId")), @@ -138,7 +140,9 @@ impl Encode for ProviderId { } impl Decode for ProviderId { - fn decode(input: &mut I) -> Result { + fn decode( + input: &mut I, + ) -> Result { match ::decode(input) { Ok(x) => Ok(ProviderId(x)), _ => Err(Error::from("Could not decode ProviderId")), diff --git a/common/primitives/src/parquet/column_compression_codec.rs b/common/primitives/src/parquet/column_compression_codec.rs index 60940a7d67..05fb7a7e73 100644 --- a/common/primitives/src/parquet/column_compression_codec.rs +++ b/common/primitives/src/parquet/column_compression_codec.rs @@ -1,4 +1,4 @@ -use codec::{Decode, Encode, MaxEncodedLen}; +use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; use sp_std::prelude::*; diff --git a/common/primitives/src/rpc.rs b/common/primitives/src/rpc.rs index bec91bb0e8..096ad2fd73 100644 --- a/common/primitives/src/rpc.rs +++ b/common/primitives/src/rpc.rs @@ -3,8 +3,8 @@ use crate::utils::as_hex; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; -use codec::{Codec, Decode, Encode, EncodeLike}; use frame_system::{EventRecord, Phase}; +use parity_scale_codec::{Codec, Decode, Encode, EncodeLike}; use scale_info::TypeInfo; use sp_std::{fmt::Debug, vec::Vec}; diff --git a/common/primitives/src/schema.rs b/common/primitives/src/schema.rs index 7af0022bfd..6d765f5cb1 100644 --- a/common/primitives/src/schema.rs +++ b/common/primitives/src/schema.rs @@ -1,8 +1,8 @@ use crate::impl_codec_bitflags; #[cfg(feature = "std")] use crate::utils; -use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; use enumflags2::{bitflags, BitFlags}; +use parity_scale_codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; diff --git a/common/primitives/src/stateful_storage.rs b/common/primitives/src/stateful_storage.rs index 067cc87893..9fc399f1bc 100644 --- a/common/primitives/src/stateful_storage.rs +++ b/common/primitives/src/stateful_storage.rs @@ -1,7 +1,7 @@ use crate::msa::{MessageSourceId, SchemaId}; #[cfg(feature = "std")] use crate::utils; -use codec::{Decode, Encode}; +use parity_scale_codec::{Decode, Encode}; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; diff --git a/common/primitives/src/utils.rs b/common/primitives/src/utils.rs index 941047d1cc..8eb24ea380 100644 --- a/common/primitives/src/utils.rs +++ b/common/primitives/src/utils.rs @@ -112,7 +112,7 @@ pub fn wrap_binary_data(data: Vec) -> Vec { #[cfg(test)] mod tests { use super::*; - use codec::{Decode, Encode}; + use parity_scale_codec::{Decode, Encode}; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; diff --git a/node/Cargo.toml b/node/Cargo.toml index 853d3a354f..53b6f092bc 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -14,7 +14,7 @@ name = "frequency" path = "src/main.rs" [build-dependencies] -substrate-build-script-utils = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +substrate-build-script-utils = { workspace = true } [dependencies] # Frequency Runtime diff --git a/node/cli-opt/Cargo.toml b/node/cli-opt/Cargo.toml index 14ff969e04..25d11f76d2 100644 --- a/node/cli-opt/Cargo.toml +++ b/node/cli-opt/Cargo.toml @@ -9,4 +9,4 @@ repository = "https://github.com/LibertyDSNP/frequency/" version = "0.0.0" [dependencies] -clap = { version = "4.2.5", features = ["derive"] } +clap = { workspace = true, features = ["derive"] } diff --git a/node/cli/Cargo.toml b/node/cli/Cargo.toml index 4e111f3857..6b5373ee6d 100644 --- a/node/cli/Cargo.toml +++ b/node/cli/Cargo.toml @@ -9,14 +9,13 @@ repository = "https://github.com/LibertyDSNP/frequency/" version = "0.0.0" [dependencies] -clap = { version = "4.2.5", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.6.1" } -derive_more = "0.99.17" -futures = { version = "0.3.28", features = ["thread-pool"] } -hex-literal = "0.4.1" -log = "0.4.17" -serde_json = "1.0.107" -serde = { version = "1.0", features = ["derive"] } +clap = { workspace = true, features = ["derive"] } +derive_more = { workspace = true } +futures = { workspace = true, features = ["thread-pool"] } +hex-literal = { workspace = true } +log = { workspace = true } +serde_json = { workspace = true } +serde = { workspace = true, features = ["derive"] } # Frequency Runtime common-primitives = { default-features = false, path = "../../common/primitives" } common-runtime = { package = "common-runtime", path = "../../runtime/common", default-features = false } @@ -28,40 +27,40 @@ pallet-handles = { package = "pallet-handles", path = "../../pallets/handles", d cli-opt = { default-features = false, path = "../cli-opt" } # Substrate -frame-benchmarking-cli = { git = "https://github.com/paritytech/polkadot-sdk", optional = true, branch = "release-polkadot-v1.1.0" } -frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", optional = true, branch = "release-polkadot-v1.1.0" } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-transaction-payment = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sc-cli = { git = "https://github.com/paritytech/polkadot-sdk", optional = true, branch = "release-polkadot-v1.1.0" } -sc-client-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-service = { git = "https://github.com/paritytech/polkadot-sdk", optional = true, branch = "release-polkadot-v1.1.0" } -sc-sysinfo = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-telemetry = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-tracing = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-executor = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-io = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", optional = true, branch = "release-polkadot-v1.1.0" } -sp-inherents = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-keyring = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-timestamp = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -try-runtime-cli = { git = "https://github.com/paritytech/polkadot-sdk", optional = true, branch = "release-polkadot-v1.1.0" } +frame-benchmarking-cli = { workspace = true, optional = true } +frame-benchmarking = { workspace = true, optional = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-balances = { workspace = true } +pallet-transaction-payment = { workspace = true } +sc-cli = { workspace = true, optional = true } +sc-client-api = { workspace = true } +sc-service = { workspace = true, optional = true } +sc-sysinfo = { workspace = true } +sc-telemetry = { workspace = true } +sc-tracing = { workspace = true } +sc-executor = { workspace = true } +sp-io = { workspace = true } +sp-api = { workspace = true } +sp-core = { workspace = true, optional = true } +sp-inherents = { workspace = true } +sp-keyring = { workspace = true } +sp-runtime = { workspace = true } +sp-timestamp = { workspace = true } +try-runtime-cli = { workspace = true, optional = true } # Polkadot -polkadot-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -polkadot-parachain-primitives = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -polkadot-primitives = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -polkadot-service = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +polkadot-cli = { workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-primitives = { workspace = true } +polkadot-service = { workspace = true } # Cumulus -cumulus-client-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -cumulus-client-service = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -cumulus-primitives-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -cumulus-primitives-parachain-inherent = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-client-cli = { workspace = true } +cumulus-client-service = { workspace = true } +cumulus-primitives-parachain-inherent = { workspace =true } +cumulus-primitives-core = { workspace = true } [build-dependencies] -substrate-build-script-utils = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +substrate-build-script-utils = { workspace = true } [features] std = ["sp-api/std", "sp-io/std"] diff --git a/node/service/Cargo.toml b/node/service/Cargo.toml index fa93f523f6..0bf540b7c3 100644 --- a/node/service/Cargo.toml +++ b/node/service/Cargo.toml @@ -9,82 +9,88 @@ repository = "https://github.com/LibertyDSNP/frequency/" version = "0.0.0" [dependencies] -clap = { version = "4.2.5", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.6.1" } -derive_more = "0.99.17" -futures = "0.3.25" -hex = "0.4.3" -hex-literal = "0.4.1" -jsonrpsee = { version = "0.16.2", features = ["server"] } -log = "0.4.17" -serde = { version = "1.0", features = ["derive"] } -tokio = { version = "1.25.0", features = ["macros", "time", "parking_lot"] } +clap = { workspace = true, features = ["derive"] } +parity-scale-codec = { workspace = true } + +derive_more = { workspace = true } +futures = { workspace = true } +hex = { workspace = true } +hex-literal = { workspace = true } + +jsonrpsee = { workspace = true, features = ["server"] } +log = { workspace = true } +serde = { workspace = true, features = ["derive"] } +tokio = { workspace = true, features = ["macros", "time", "parking_lot"] } # Frequency Runtime frequency-runtime = { path = "../../runtime/frequency", optional = true } common-runtime = { path = "../../runtime/common" } cli-opt = { default-features = false, path = "../cli-opt" } # Substrate -frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -frame-benchmarking-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-basic-authorship = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-chain-spec = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-client-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-client-db = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-consensus = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-consensus-manual-seal = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-executor = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-keystore = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-network = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-network-common = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-network-sync = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-offchain = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-rpc = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-service = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-sysinfo = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-telemetry = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-tracing = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sc-transaction-pool-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-block-builder = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-blockchain = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-consensus = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-inherents = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-keystore = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-offchain = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-session = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-timestamp = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-wasm-interface = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +frame-benchmarking = { workspace = true } +frame-benchmarking-cli = { workspace = true } +frame-system = { workspace = true } +pallet-transaction-payment-rpc = { workspace = true } +sc-basic-authorship = { workspace = true } +sc-chain-spec = { workspace = true } +sc-executor = { workspace = true } +sc-cli = { workspace = true } +sc-keystore = { workspace = true } + +sc-service = { workspace = true } +sc-client-api = { workspace = true } + +sc-client-db = { workspace = true } +sc-consensus = { workspace = true } +sc-consensus-manual-seal = { workspace = true } +sc-network = { workspace = true } +sc-network-common = { workspace = true } +sc-network-sync = { workspace = true } +sc-offchain = { workspace = true } + +sc-sysinfo = { workspace = true } + +sc-rpc = { workspace = true } +sc-telemetry = { workspace = true } +sc-tracing = { workspace = true } +sc-transaction-pool = { workspace = true } +sc-transaction-pool-api = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-blockchain = { workspace = true } +sp-consensus = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-inherents = { workspace = true } +sp-keystore = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-timestamp = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-wasm-interface = { workspace = true } -substrate-frame-rpc-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -substrate-prometheus-endpoint = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -try-runtime-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +substrate-frame-rpc-system = { workspace = true } +substrate-prometheus-endpoint = { workspace = true } +try-runtime-cli = { workspace = true } # Polkadot -polkadot-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -polkadot-primitives = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -polkadot-service = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +polkadot-cli = { workspace = true } +polkadot-primitives = { workspace = true } +polkadot-service = { workspace = true } # Cumulus -cumulus-client-cli = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -cumulus-client-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -cumulus-client-consensus-common = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -cumulus-client-network = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -cumulus-client-service = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -cumulus-primitives-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -cumulus-primitives-parachain-inherent = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -cumulus-relay-chain-inprocess-interface = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -cumulus-relay-chain-interface = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -cumulus-relay-chain-rpc-interface = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -cumulus-relay-chain-minimal-node = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -cumulus-client-collator = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -cumulus-client-consensus-proposer = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +cumulus-client-cli = { workspace = true } +cumulus-client-collator = { workspace = true } +cumulus-client-consensus-aura = { workspace = true } +cumulus-client-consensus-common = { workspace = true } +cumulus-client-consensus-proposer = { workspace = true } +cumulus-client-network = { workspace = true } +cumulus-client-service = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-parachain-inherent = { workspace = true } +cumulus-relay-chain-inprocess-interface = { workspace = true } +cumulus-relay-chain-interface = { workspace = true } +cumulus-relay-chain-minimal-node = { workspace = true } +cumulus-relay-chain-rpc-interface = { workspace = true } # Frequency common-helpers = { default-features = false, path = "../../common/helpers" } common-primitives = { default-features = false, path = "../../common/primitives" } @@ -103,12 +109,12 @@ pallet-frequency-tx-payment-runtime-api = { path = "../../pallets/frequency-tx-p system-runtime-api = { path = "../../runtime/system-runtime-api" } [build-dependencies] -substrate-build-script-utils = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +substrate-build-script-utils = { workspace = true } [dev-dependencies] -substrate-test-runtime-client = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sc-client-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -scale-info = { version = "2.10.0", default-features = false, features = [ +substrate-test-runtime-client = { workspace = true } +sc-client-api = { workspace = true } +scale-info = { workspace = true, features = [ "derive", ] } diff --git a/node/service/src/block_sealing.rs b/node/service/src/block_sealing.rs index 7205226a3a..c2d164470e 100644 --- a/node/service/src/block_sealing.rs +++ b/node/service/src/block_sealing.rs @@ -236,7 +236,7 @@ async fn run_seal_command( SC: SelectChain + 'static, TP: TransactionPool, CIDP: CreateInherentDataProviders, - P: codec::Encode + Send + Sync + 'static, + P: parity_scale_codec::Encode + Send + Sync + 'static, { while let Some(command) = commands_stream.next().await { match command { diff --git a/node/service/src/rpc/tests/mod.rs b/node/service/src/rpc/tests/mod.rs index b38b9ada52..6728f1affe 100644 --- a/node/service/src/rpc/tests/mod.rs +++ b/node/service/src/rpc/tests/mod.rs @@ -3,9 +3,9 @@ mod rpc_mock; use rpc_mock::*; use crate::rpc::frequency_rpc::{FrequencyRpcApiServer, FrequencyRpcHandler}; -use codec::{Decode, Encode}; use common_primitives::rpc::RpcEvent; use frame_system::{EventRecord, Phase}; +use parity_scale_codec::{Decode, Encode}; use sp_core::H256; use sp_runtime::scale_info::TypeInfo; use std::sync::Arc; diff --git a/pallets/capacity/Cargo.toml b/pallets/capacity/Cargo.toml index b8f7f84491..8550f1de76 100644 --- a/pallets/capacity/Cargo.toml +++ b/pallets/capacity/Cargo.toml @@ -13,26 +13,26 @@ version = "0.0.0" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = {package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +parity-scale-codec = { workspace = true, features = [ "derive", ]} -common-primitives = {default-features = false, path = "../../common/primitives"} -frame-benchmarking = {default-features = false, git = "https://github.com/paritytech/polkadot-sdk", optional = true, branch = "release-polkadot-v1.1.0" } -frame-support = {default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -frame-system = {default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -scale-info = {version = "2.10.0", default-features = false, features = [ +common-primitives = { default-features = false, path = "../../common/primitives" } +frame-benchmarking = { workspace = true, optional = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +scale-info = { workspace = true, features = [ "derive", ]} -log = { version = "0.4.17", default-features = false } -sp-core = {default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-io = {default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-runtime = {default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-std = {default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +log = { workspace = true, default-features = false } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -common-runtime = {path = '../../runtime/common', default-features = false} -pallet-balances = {git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -pallet-msa = {path = '../msa'} +common-runtime = { path = '../../runtime/common', default-features = false } +pallet-balances = { workspace = true } +pallet-msa = { path = '../msa' } [features] default = ["std"] @@ -42,7 +42,7 @@ runtime-benchmarks = [ "pallet-msa/runtime-benchmarks", ] std = [ - "codec/std", + "parity-scale-codec/std", "scale-info/std", "frame-support/std", "frame-system/std", diff --git a/pallets/capacity/src/benchmarking.rs b/pallets/capacity/src/benchmarking.rs index df1a0e83d3..3f6fe24099 100644 --- a/pallets/capacity/src/benchmarking.rs +++ b/pallets/capacity/src/benchmarking.rs @@ -1,10 +1,10 @@ use super::*; use crate::Pallet as Capacity; -use codec::alloc::vec::Vec; use frame_benchmarking::{account, benchmarks, whitelist_account}; use frame_support::{assert_ok, traits::Currency}; use frame_system::RawOrigin; +use parity_scale_codec::alloc::vec::Vec; const SEED: u32 = 0; diff --git a/pallets/capacity/src/types.rs b/pallets/capacity/src/types.rs index 9b76809180..72f5168cb8 100644 --- a/pallets/capacity/src/types.rs +++ b/pallets/capacity/src/types.rs @@ -1,8 +1,8 @@ //! Types for the Capacity Pallet use super::*; -use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{BoundedVec, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound}; use log::warn; +use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_runtime::{ traits::{CheckedAdd, CheckedSub, Saturating, Zero}, diff --git a/pallets/frequency-tx-payment/Cargo.toml b/pallets/frequency-tx-payment/Cargo.toml index 77cf9d86e9..0b75767ed4 100644 --- a/pallets/frequency-tx-payment/Cargo.toml +++ b/pallets/frequency-tx-payment/Cargo.toml @@ -13,31 +13,31 @@ version = "0.0.0" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = {package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +parity-scale-codec = { workspace = true, features = [ "derive", ]} -frame-benchmarking = {default-features = false, git = "https://github.com/paritytech/polkadot-sdk", optional = true, branch = "release-polkadot-v1.1.0" } -frame-support = {default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -frame-system = {default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +frame-benchmarking = { workspace = true, optional = true } +frame-support = { workspace = true } +frame-system = { workspace = true } pallet-capacity = {default-features = false, path = "../capacity"} pallet-msa = {default-features = false, path = "../msa"} -pallet-transaction-payment = {default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -scale-info = {version = "2.10.0", default-features = false, features = [ +pallet-transaction-payment = { workspace = true } +scale-info = { workspace = true, features = [ "derive", ]} -sp-core = {default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-io = {default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-runtime = {default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-std = {default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -log = "0.4.17" +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +log = { workspace = true } # Frequency related dependencies common-primitives = {default-features = false, path = "../../common/primitives"} [dev-dependencies] common-runtime = { path = "../../runtime/common", default-features = false } -pallet-balances = {git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -pallet-utility = {git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +pallet-balances = { workspace = true } +pallet-utility = { workspace = true } # Frequency related dev dependencies pallet-msa = {path = '../msa'} @@ -52,7 +52,7 @@ runtime-benchmarks = [ "pallet-capacity/runtime-benchmarks", ] std = [ - "codec/std", + "parity-scale-codec/std", "scale-info/std", "frame-support/std", "frame-system/std", diff --git a/pallets/frequency-tx-payment/src/lib.rs b/pallets/frequency-tx-payment/src/lib.rs index d0ca58f3f9..14b90cd066 100644 --- a/pallets/frequency-tx-payment/src/lib.rs +++ b/pallets/frequency-tx-payment/src/lib.rs @@ -23,7 +23,6 @@ #![allow(clippy::expect_used)] #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchInfo, GetDispatchInfo, PostDispatchInfo}, pallet_prelude::*, @@ -33,6 +32,7 @@ use frame_support::{ }; use frame_system::pallet_prelude::*; use pallet_transaction_payment::{FeeDetails, InclusionFee, OnChargeTransaction}; +use parity_scale_codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::{ traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension, Zero}, diff --git a/pallets/frequency-tx-payment/src/rpc/Cargo.toml b/pallets/frequency-tx-payment/src/rpc/Cargo.toml index ad98321b47..89c0ba8912 100644 --- a/pallets/frequency-tx-payment/src/rpc/Cargo.toml +++ b/pallets/frequency-tx-payment/src/rpc/Cargo.toml @@ -10,31 +10,31 @@ repository = "https://github.com/LibertyDSNP/frequency/" edition = "2021" [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } -rayon = "1.5.3" +parity-scale-codec = { workspace = true } +jsonrpsee = { workspace = true, features = ["client-core", "server", "macros"] } +rayon = { workspace = true } # Frequency crates pallet-frequency-tx-payment-runtime-api = { default-features = false, path = "../runtime-api" } common-primitives = { default-features = false, path = "../../../../common/primitives" } common-helpers = { default-features = false, path = "../../../../common/helpers" } # Substrate crates -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-blockchain = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-rpc = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-weights = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } +sp-core = { workspace = true } +sp-api = { workspace = true } +sp-std = { workspace = true } +sp-blockchain = { workspace = true } +sp-runtime = { workspace = true } +sp-rpc = { workspace = true } +sp-weights = { workspace = true } [dev-dependencies] -tokio = { version = "1.25.0", features = ["macros", "time", "parking_lot"] } -substrate-test-runtime-client = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sc-client-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +tokio = { workspace = true, features = ["macros", "time", "parking_lot"] } +substrate-test-runtime-client = { workspace = true } +sc-client-api = { workspace = true } [features] default = ["std"] std = [ - "codec/std", + "parity-scale-codec/std", "sp-api/std", "sp-runtime/std", "pallet-frequency-tx-payment-runtime-api/std", diff --git a/pallets/frequency-tx-payment/src/rpc/src/lib.rs b/pallets/frequency-tx-payment/src/rpc/src/lib.rs index cef0971bc0..e2740a2fb3 100644 --- a/pallets/frequency-tx-payment/src/rpc/src/lib.rs +++ b/pallets/frequency-tx-payment/src/rpc/src/lib.rs @@ -19,7 +19,6 @@ use std::{convert::TryInto, sync::Arc}; -use codec::{Codec, Decode}; use jsonrpsee::{ core::{async_trait, Error as JsonRpseeError, RpcResult}, proc_macros::rpc, @@ -29,6 +28,7 @@ use jsonrpsee::{ }, }; use pallet_frequency_tx_payment_runtime_api::{FeeDetails, InclusionFee}; +use parity_scale_codec::{Codec, Decode}; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; use sp_core::Bytes; diff --git a/pallets/frequency-tx-payment/src/runtime-api/Cargo.toml b/pallets/frequency-tx-payment/src/runtime-api/Cargo.toml index ac519bb584..0b83377650 100644 --- a/pallets/frequency-tx-payment/src/runtime-api/Cargo.toml +++ b/pallets/frequency-tx-payment/src/runtime-api/Cargo.toml @@ -10,21 +10,21 @@ repository = "https://github.com/LibertyDSNP/frequency/" edition = "2021" [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +parity-scale-codec = { workspace = true, features = [ "derive" ] } # Substrate -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } +sp-api = { workspace = true } +sp-std = { workspace = true } +frame-support = { workspace = true } # Frequency related dependencies common-primitives = { default-features = false, path = "../../../../common/primitives" } -pallet-transaction-payment = {default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +pallet-transaction-payment = { workspace = true } [features] default = ['std'] std = [ - "codec/std", + "parity-scale-codec/std", "sp-api/std", "sp-std/std", "frame-support/std", diff --git a/pallets/frequency-tx-payment/src/runtime-api/src/lib.rs b/pallets/frequency-tx-payment/src/runtime-api/src/lib.rs index a22bfa12c6..c287e105e2 100644 --- a/pallets/frequency-tx-payment/src/runtime-api/src/lib.rs +++ b/pallets/frequency-tx-payment/src/runtime-api/src/lib.rs @@ -16,8 +16,8 @@ //! - An interface between the runtime and Custom RPCs. //! - Runtime interfaces for end users beyond just State Queries -use codec::Codec; use frame_support::sp_runtime; +use parity_scale_codec::Codec; use sp_runtime::traits::MaybeDisplay; pub use pallet_transaction_payment::{FeeDetails, InclusionFee}; diff --git a/pallets/handles/Cargo.toml b/pallets/handles/Cargo.toml index c33ec33885..1ed911016b 100644 --- a/pallets/handles/Cargo.toml +++ b/pallets/handles/Cargo.toml @@ -13,26 +13,26 @@ repository = "https://github.com/libertyDSNP/frequency/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive",] } -log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive",] } -numtoa = "0.2.4" +parity-scale-codec = { workspace = true, features = ["derive",] } +log = { workspace = true } +scale-info = { workspace = true, features = ["derive",] } +numtoa = { workspace = true } # Substrate -frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, optional = true, branch = "release-polkadot-v1.1.0" } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-io = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } +frame-benchmarking = { workspace = true, optional = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } # Frequency related dependencies common-primitives = { default-features = false, path = "../../common/primitives" } handles-utils = { default-features = false, path = "src/handles-utils"} [dev-dependencies] -env_logger = "0.10.0" -pretty_assertions = "1.3.0" -serde = { version = "1.0", features = ["derive"] } -sp-keystore = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } +env_logger = { workspace = true } +pretty_assertions = { workspace = true } +serde = { workspace = true, features = ["derive"] } +sp-keystore = { workspace = true } [features] default = ['std'] @@ -43,7 +43,7 @@ runtime-benchmarks = [ "common-primitives/runtime-benchmarks" ] std = [ - 'codec/std', + 'parity-scale-codec/std', 'scale-info/std', 'sp-std/std', 'sp-core/std', diff --git a/pallets/handles/src/handles-utils/Cargo.toml b/pallets/handles/src/handles-utils/Cargo.toml index b523ca916d..4fa1828e14 100644 --- a/pallets/handles/src/handles-utils/Cargo.toml +++ b/pallets/handles/src/handles-utils/Cargo.toml @@ -11,10 +11,10 @@ publish = false repository = "https://github.com/libertyDSNP/frequency/" [dependencies] -phf = { version = "0.11", default-features = false, features = ["macros"] } -oorandom = "11.1.3" -unicode-normalization = { version = "0.1.22", default-features = false } -twox-hash = { version = "1.6.3", default-features = false, features = ["digest_0_10"] } +phf = { workspace = true, features = ["macros"] } +oorandom = { workspace = true } +unicode-normalization = { workspace = true } +twox-hash = { workspace = true, features = ["digest_0_10"] } [features] default = ["std"] diff --git a/pallets/handles/src/handles_signed_extension.rs b/pallets/handles/src/handles_signed_extension.rs index 5faec5e27c..836435880e 100644 --- a/pallets/handles/src/handles_signed_extension.rs +++ b/pallets/handles/src/handles_signed_extension.rs @@ -1,12 +1,12 @@ //! Substrate Signed Extension for validating requests to the handles pallet use crate::{Call, Config, Error, MSAIdToDisplayName}; -use codec::{Decode, Encode}; use common_primitives::msa::MsaValidator; use core::marker::PhantomData; use frame_support::{ dispatch::DispatchInfo, ensure, pallet_prelude::ValidTransaction, traits::IsSubType, unsigned::UnknownTransaction, }; +use parity_scale_codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::{ traits::{DispatchInfoOf, Dispatchable, SignedExtension}, diff --git a/pallets/handles/src/rpc/Cargo.toml b/pallets/handles/src/rpc/Cargo.toml index 998452d463..ac22b23c64 100644 --- a/pallets/handles/src/rpc/Cargo.toml +++ b/pallets/handles/src/rpc/Cargo.toml @@ -10,29 +10,27 @@ repository = "https://github.com/LibertyDSNP/frequency/" edition = "2021" [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } -rayon = "1.5.3" +jsonrpsee = { workspace = true, features = ["client-core", "server", "macros"] } +rayon = { workspace = true } # Frequency crates pallet-handles-runtime-api = { default-features = false, path = "../runtime-api" } common-primitives = { default-features = false, path = "../../../../common/primitives" } common-helpers = { default-features = false, path = "../../../../common/helpers" } # Substrate crates -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-blockchain = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } +sp-api = { workspace = true } +sp-std = { workspace = true } +sp-blockchain = { workspace = true } +sp-runtime = { workspace = true } +frame-support = { workspace = true } [dev-dependencies] -tokio = { version = "1.25.0", features = ["macros", "time", "parking_lot"] } -substrate-test-runtime-client = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sc-client-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +tokio = { workspace = true, features = ["macros", "time", "parking_lot"] } +substrate-test-runtime-client = { workspace = true } +sc-client-api = { workspace = true } [features] default = ["std"] std = [ - 'codec/std', 'sp-std/std', 'sp-api/std', 'sp-runtime/std', diff --git a/pallets/handles/src/runtime-api/Cargo.toml b/pallets/handles/src/runtime-api/Cargo.toml index c691e384b7..82005eaac5 100644 --- a/pallets/handles/src/runtime-api/Cargo.toml +++ b/pallets/handles/src/runtime-api/Cargo.toml @@ -10,19 +10,15 @@ repository = "https://github.com/LibertyDSNP/frequency/" edition = "2021" [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ - "derive" -] } # Substrate -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } +sp-std = { workspace = true } +sp-api = { workspace = true } # Frequency related dependencies common-primitives = { default-features = false, path = "../../../../common/primitives" } [features] default = ["std"] std = [ - 'codec/std', 'sp-std/std', 'sp-api/std', 'common-primitives/std', diff --git a/pallets/handles/src/tests/handle_change_tests.rs b/pallets/handles/src/tests/handle_change_tests.rs index 9af724f893..6694d6295e 100644 --- a/pallets/handles/src/tests/handle_change_tests.rs +++ b/pallets/handles/src/tests/handle_change_tests.rs @@ -1,7 +1,7 @@ use crate::{tests::mock::*, Error, Event}; -use codec::Decode; use common_primitives::msa::MessageSourceId; use frame_support::{assert_err, assert_ok}; +use parity_scale_codec::Decode; use sp_core::{sr25519, Encode, Pair}; #[test] diff --git a/pallets/handles/src/tests/handle_creation_tests.rs b/pallets/handles/src/tests/handle_creation_tests.rs index 7d02bcd943..d36887cec8 100644 --- a/pallets/handles/src/tests/handle_creation_tests.rs +++ b/pallets/handles/src/tests/handle_creation_tests.rs @@ -1,7 +1,7 @@ use crate::{tests::mock::*, Error, Event}; -use codec::Decode; use common_primitives::{handles::HANDLE_BYTES_MAX, msa::MessageSourceId}; use frame_support::{assert_err, assert_noop, assert_ok, dispatch::DispatchResult}; +use parity_scale_codec::Decode; use sp_core::{sr25519, Encode, Pair}; use sp_std::collections::btree_set::BTreeSet; diff --git a/pallets/handles/src/tests/handle_retirements_tests.rs b/pallets/handles/src/tests/handle_retirements_tests.rs index 0c60f80a84..1aedcd2719 100644 --- a/pallets/handles/src/tests/handle_retirements_tests.rs +++ b/pallets/handles/src/tests/handle_retirements_tests.rs @@ -1,11 +1,11 @@ use crate::{handles_signed_extension::HandlesSignedExtension, tests::mock::*, Error, Event}; -use codec::Decode; use common_primitives::{handles::*, msa::MessageSourceId}; use frame_support::{ assert_noop, assert_ok, dispatch::{DispatchInfo, GetDispatchInfo, Pays}, }; use numtoa::*; +use parity_scale_codec::Decode; use sp_core::{sr25519, Encode, Pair}; use sp_runtime::traits::SignedExtension; diff --git a/pallets/handles/src/tests/handles_replay_attack_test.rs b/pallets/handles/src/tests/handles_replay_attack_test.rs index 12acc9aafc..41103cf6bb 100644 --- a/pallets/handles/src/tests/handles_replay_attack_test.rs +++ b/pallets/handles/src/tests/handles_replay_attack_test.rs @@ -1,8 +1,8 @@ use crate::{tests::mock::*, Error}; -use codec::Decode; use common_primitives::{handles::*, msa::MessageSourceId}; use frame_support::{assert_noop, assert_ok}; use numtoa::*; +use parity_scale_codec::Decode; use sp_core::{sr25519, Encode, Pair}; #[test] diff --git a/pallets/handles/src/tests/mock.rs b/pallets/handles/src/tests/mock.rs index e6c6686a20..6dda9aae66 100644 --- a/pallets/handles/src/tests/mock.rs +++ b/pallets/handles/src/tests/mock.rs @@ -1,6 +1,6 @@ use crate as pallet_handles; -use codec::Decode; pub use pallet_handles::Call as HandlesCall; +use parity_scale_codec::Decode; use common_primitives::{ handles::*, diff --git a/pallets/messages/Cargo.toml b/pallets/messages/Cargo.toml index 1ac21e9306..9d159065a3 100644 --- a/pallets/messages/Cargo.toml +++ b/pallets/messages/Cargo.toml @@ -13,21 +13,21 @@ version = "0.0.0" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +parity-scale-codec = { workspace = true, features = [ "derive", ] } -log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = [ +log = { workspace = true } +scale-info = { workspace = true, features = [ "derive", ] } # Substrate -frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, optional = true, branch = "release-polkadot-v1.1.0" } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-io = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } +frame-benchmarking = { workspace = true, optional = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } # Frequency related dependencies common-primitives = { default-features = false, path = "../../common/primitives" } # Pinning a specific commit to fix an unwrap-panic issue: REVIEW: remove when cid > 0.10.1 @@ -37,9 +37,9 @@ multibase = { version ="0.9", default-features = false } [dev-dependencies] common-runtime = { path = '../../runtime/common', default-features = false } # Testing dependencies -rand = "0.8.5" -pretty_assertions = "1.3.0" -serde = { version = "1.0", features = ["derive"] } +rand = { workspace = true } +pretty_assertions = { workspace = true } +serde = { workspace = true, features = ["derive"] } serde_json = "1.0" [features] @@ -52,7 +52,7 @@ runtime-benchmarks = [ "common-runtime/runtime-benchmarks", ] std = [ - 'codec/std', + 'parity-scale-codec/std', 'scale-info/std', 'sp-std/std', 'sp-core/std', diff --git a/pallets/messages/src/lib.rs b/pallets/messages/src/lib.rs index fafbb5d756..8a620a1952 100644 --- a/pallets/messages/src/lib.rs +++ b/pallets/messages/src/lib.rs @@ -59,7 +59,6 @@ use frame_support::{ensure, pallet_prelude::Weight, traits::Get, BoundedVec}; use sp_runtime::DispatchError; use sp_std::{convert::TryInto, fmt::Debug, prelude::*}; -use codec::Encode; use common_primitives::{ messages::*, msa::{ @@ -68,6 +67,7 @@ use common_primitives::{ schema::*, }; use frame_support::dispatch::DispatchResult; +use parity_scale_codec::Encode; #[cfg(feature = "runtime-benchmarks")] use common_primitives::benchmarks::{MsaBenchmarkHelper, SchemaBenchmarkHelper}; diff --git a/pallets/messages/src/rpc/Cargo.toml b/pallets/messages/src/rpc/Cargo.toml index 3f5e8921b2..d2ffe64ca8 100644 --- a/pallets/messages/src/rpc/Cargo.toml +++ b/pallets/messages/src/rpc/Cargo.toml @@ -10,27 +10,25 @@ repository = "https://github.com/LibertyDSNP/frequency/" edition = "2021" [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } +jsonrpsee = { workspace = true, features = ["client-core", "server", "macros"] } # Frequency crates pallet-messages-runtime-api = { default-features = false, path = "../runtime-api" } common-primitives = { default-features = false, path = "../../../../common/primitives" } common-helpers = { default-features = false, path = "../../../../common/helpers" } # Substrate crates -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-blockchain = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -frame-support = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +sp-api = { workspace = true } +sp-blockchain = { workspace = true } +sp-runtime = { workspace = true } +frame-support = { workspace = true } [dev-dependencies] -tokio = { version = "1.25.0", features = ["macros", "time", "parking_lot"] } -substrate-test-runtime-client = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sc-client-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +tokio = { workspace = true, features = ["macros", "time", "parking_lot"] } +substrate-test-runtime-client = { workspace = true } +sc-client-api = { workspace = true } [features] default = ["std"] std = [ - 'codec/std', "sp-api/std", "sp-runtime/std", 'frame-support/std', diff --git a/pallets/messages/src/runtime-api/Cargo.toml b/pallets/messages/src/runtime-api/Cargo.toml index d159e581c8..12bc8a1209 100644 --- a/pallets/messages/src/runtime-api/Cargo.toml +++ b/pallets/messages/src/runtime-api/Cargo.toml @@ -10,15 +10,12 @@ repository = "https://github.com/LibertyDSNP/frequency/" edition = "2021" [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ - "derive" -] } # Substrate -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } +sp-api = { workspace = true } +frame-support = { workspace = true } # Frequency related dependencies common-primitives = { default-features = false, path = "../../../../common/primitives" } [features] default = ["std"] -std = ["codec/std", "sp-api/std", "frame-support/std", 'common-primitives/std'] +std = ["sp-api/std", "frame-support/std", 'common-primitives/std'] diff --git a/pallets/messages/src/tests/mock.rs b/pallets/messages/src/tests/mock.rs index fe55e06983..7cbdd256dc 100644 --- a/pallets/messages/src/tests/mock.rs +++ b/pallets/messages/src/tests/mock.rs @@ -7,13 +7,13 @@ use common_primitives::{ schema::*, }; -use codec::{Encode, MaxEncodedLen}; use frame_support::{ dispatch::DispatchResult, parameter_types, traits::{ConstU16, ConstU32, OnFinalize, OnInitialize}, }; use frame_system as system; +use parity_scale_codec::{Encode, MaxEncodedLen}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, diff --git a/pallets/messages/src/tests/other_tests.rs b/pallets/messages/src/tests/other_tests.rs index 8ccc31dde2..c421d2fbe6 100644 --- a/pallets/messages/src/tests/other_tests.rs +++ b/pallets/messages/src/tests/other_tests.rs @@ -3,7 +3,6 @@ use crate::{ tests::mock::*, BlockMessageIndex, Error, Event as MessageEvent, Message, MessagesV2, }; -use codec::Encode; use common_primitives::{messages::MessageResponse, schema::*}; use frame_support::{ assert_err, assert_noop, assert_ok, @@ -13,6 +12,7 @@ use frame_support::{ }; use frame_system::{EventRecord, Phase}; use multibase::Base; +use parity_scale_codec::Encode; #[allow(unused_imports)] use pretty_assertions::{assert_eq, assert_ne, assert_str_eq}; use rand::Rng; diff --git a/pallets/messages/src/types.rs b/pallets/messages/src/types.rs index a63a8139ff..6a5d9b4676 100644 --- a/pallets/messages/src/types.rs +++ b/pallets/messages/src/types.rs @@ -1,9 +1,9 @@ -use codec::{Decode, Encode, MaxEncodedLen}; use common_primitives::{ messages::MessageResponse, msa::MessageSourceId, node::BlockNumber, schema::PayloadLocation, }; use frame_support::{traits::Get, BoundedVec}; use multibase::Base; +use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_std::{fmt::Debug, prelude::*}; diff --git a/pallets/msa/Cargo.toml b/pallets/msa/Cargo.toml index b67adabcd8..9d5008d516 100644 --- a/pallets/msa/Cargo.toml +++ b/pallets/msa/Cargo.toml @@ -13,21 +13,20 @@ version = "0.0.0" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -log = "0.4.17" -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +log = { workspace = true } +parity-scale-codec = { workspace = true, features = [ "derive", -] } -frame-benchmarking = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", optional = true, branch = "release-polkadot-v1.1.0" } -frame-support = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -frame-system = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -scale-info = { version = "2.10.0", default-features = false, features = [ - "derive", -] } -sp-core = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-io = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-runtime = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-std = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-weights = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +]} +frame-benchmarking = { workspace = true, optional = true } +frame-support = { workspace = true } +frame-system = { workspace = true } + +scale-info = { workspace = true, features = ["derive"] } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-weights = { workspace = true } # Frequency related dependencies common-primitives = { default-features = false, path = "../../common/primitives" } @@ -35,10 +34,10 @@ common-primitives = { default-features = false, path = "../../common/primitives" common-runtime = { path = "../../runtime/common", default-features = false } pallet-schemas = { path = "../schemas", default-features = false } pallet-handles = { path = "../handles", default-features = false } -pallet-collective = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-keystore = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-keyring = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -pretty_assertions = { version = "1.3.0"} +pallet-collective = { workspace = true } +sp-keystore = { workspace = true } +sp-keyring = { workspace = true } +pretty_assertions = { workspace = true } [features] default = ["std"] @@ -50,7 +49,7 @@ runtime-benchmarks = [ "pallet-collective/runtime-benchmarks", ] std = [ - "codec/std", + "parity-scale-codec/std", "scale-info/std", "frame-support/std", "frame-system/std", diff --git a/pallets/msa/src/lib.rs b/pallets/msa/src/lib.rs index 60592ca14f..c8932e2ef5 100644 --- a/pallets/msa/src/lib.rs +++ b/pallets/msa/src/lib.rs @@ -55,13 +55,13 @@ missing_docs )] -use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchInfo, DispatchResult, PostDispatchInfo}, ensure, pallet_prelude::*, traits::IsSubType, }; +use parity_scale_codec::{Decode, Encode}; #[cfg(feature = "runtime-benchmarks")] use common_primitives::benchmarks::{MsaBenchmarkHelper, RegisterProviderBenchmarkHelper}; diff --git a/pallets/msa/src/rpc/Cargo.toml b/pallets/msa/src/rpc/Cargo.toml index a40a71c36c..22a9a34b4d 100644 --- a/pallets/msa/src/rpc/Cargo.toml +++ b/pallets/msa/src/rpc/Cargo.toml @@ -10,28 +10,28 @@ repository = "https://github.com/LibertyDSNP/frequency/" edition = "2021" [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } -rayon = "1.5.3" +parity-scale-codec = { workspace = true } +jsonrpsee = { workspace = true, features = ["client-core", "server", "macros"] } +rayon ={ workspace = true } # Frequency crates pallet-msa-runtime-api = { default-features = false, path = "../runtime-api" } common-primitives = { default-features = false, path = "../../../../common/primitives" } common-helpers = { default-features = false, path = "../../../../common/helpers" } # Substrate crates -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-blockchain = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } +sp-api = { workspace = true } +sp-std = { workspace = true } +sp-blockchain = { workspace = true } +sp-runtime = { workspace = true } [dev-dependencies] -tokio = { version = "1.25.0", features = ["macros", "time", "parking_lot"] } -substrate-test-runtime-client = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sc-client-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +tokio = { workspace = true, features = ["macros", "time", "parking_lot"] } +substrate-test-runtime-client = { workspace = true } +sc-client-api = { workspace = true } [features] default = ["std"] std = [ - 'codec/std', + 'parity-scale-codec/std', "sp-api/std", "sp-runtime/std", "pallet-msa-runtime-api/std", diff --git a/pallets/msa/src/rpc/src/lib.rs b/pallets/msa/src/rpc/src/lib.rs index b96e5c3a0e..4af25aeb1d 100644 --- a/pallets/msa/src/rpc/src/lib.rs +++ b/pallets/msa/src/rpc/src/lib.rs @@ -8,13 +8,13 @@ //! Custom APIs for [MSA](../pallet_msa/index.html) -use codec::Codec; use common_helpers::rpc::map_rpc_result; use common_primitives::{ msa::{DelegatorId, ProviderId, SchemaGrant}, node::BlockNumber, schema::SchemaId, }; +use parity_scale_codec::Codec; use jsonrpsee::{ core::{async_trait, RpcResult}, diff --git a/pallets/msa/src/runtime-api/Cargo.toml b/pallets/msa/src/runtime-api/Cargo.toml index d9b9424343..73c6626a3f 100644 --- a/pallets/msa/src/runtime-api/Cargo.toml +++ b/pallets/msa/src/runtime-api/Cargo.toml @@ -10,20 +10,20 @@ repository = "https://github.com/LibertyDSNP/frequency/" edition = "2021" [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +parity-scale-codec = { workspace = true, features = [ "derive" ] } # Substrate -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } +sp-api = { workspace = true } +sp-std = { workspace = true } +frame-support = { workspace = true } # Frequency related dependencies common-primitives = { default-features = false, path = "../../../../common/primitives" } [features] default = ["std"] std = [ - "codec/std", + "parity-scale-codec/std", "sp-api/std", "sp-std/std", "frame-support/std", diff --git a/pallets/msa/src/runtime-api/src/lib.rs b/pallets/msa/src/runtime-api/src/lib.rs index 23f995f285..5b565bdd4a 100644 --- a/pallets/msa/src/runtime-api/src/lib.rs +++ b/pallets/msa/src/runtime-api/src/lib.rs @@ -17,8 +17,8 @@ //! - An interface between the runtime and Custom RPCs. //! - Runtime interfaces for end users beyond just State Queries -use codec::Codec; use common_primitives::{msa::*, node::BlockNumber}; +use parity_scale_codec::Codec; use sp_std::vec::Vec; // Here we declare the runtime API. It is implemented it the `impl` block in diff --git a/pallets/msa/src/tests/mock.rs b/pallets/msa/src/tests/mock.rs index 1aa140e581..ec42b784a1 100644 --- a/pallets/msa/src/tests/mock.rs +++ b/pallets/msa/src/tests/mock.rs @@ -1,5 +1,4 @@ use crate::{self as pallet_msa, types::EMPTY_FUNCTION, AddProvider}; -use codec::MaxEncodedLen; use common_primitives::{ msa::MessageSourceId, node::BlockNumber, schema::SchemaId, utils::wrap_binary_data, }; @@ -11,6 +10,7 @@ use frame_support::{ }; use frame_system::EnsureRoot; use pallet_collective; +use parity_scale_codec::MaxEncodedLen; use sp_core::{sr25519, sr25519::Public, Encode, Pair, H256}; use sp_runtime::{ traits::{BlakeTwo256, ConvertInto, IdentityLookup}, diff --git a/pallets/msa/src/types.rs b/pallets/msa/src/types.rs index 86e3506b85..70fa7fbee4 100644 --- a/pallets/msa/src/types.rs +++ b/pallets/msa/src/types.rs @@ -2,7 +2,7 @@ #![cfg_attr(not(feature = "std"), no_std)] use super::*; -use codec::{Decode, Encode}; +use parity_scale_codec::{Decode, Encode}; use core::fmt::Debug; diff --git a/pallets/schemas/Cargo.toml b/pallets/schemas/Cargo.toml index 7474e52357..81741a42d9 100644 --- a/pallets/schemas/Cargo.toml +++ b/pallets/schemas/Cargo.toml @@ -14,34 +14,34 @@ targets = ["x86_64-unknown-linux-gnu"] # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +parity-scale-codec = { workspace = true, features = [ "derive", ] } -log = { version = "0.4.17", default-features = false } -frame-benchmarking = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", optional = true, branch = "release-polkadot-v1.1.0" } -frame-support = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -frame-system = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -numtoa = { default-features = false, version = '0.2.4', optional = true } -scale-info = { version = "2.10.0", default-features = false, features = [ +log = { workspace = true } +frame-benchmarking = { workspace = true, optional = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +numtoa = { workspace = true, optional = true } +scale-info = { workspace = true, features = [ "derive", ] } -serde_json = { version = "1.0.86", default-features = false, features = [ +serde_json = { workspace = true, features = [ "alloc", ] } -smallvec = "1.11.0" -sp-core = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-io = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-runtime = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-std = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-weights = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +smallvec = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +sp-weights = { workspace = true } # Frequency related dependencies common-primitives = { default-features = false, path = "../../common/primitives" } [dev-dependencies] common-runtime = { path = '../../runtime/common', default-features = false } -serial_test = { default-features = false, version = '0.9.0' } -sp-keyring = {git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -pallet-collective = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } +serial_test = { workspace = true } +sp-keyring = { workspace = true } +pallet-collective = { workspace = true } [features] default = ["std"] @@ -52,7 +52,7 @@ runtime-benchmarks = [ "numtoa", ] std = [ - "codec/std", + "parity-scale-codec/std", "scale-info/std", "frame-support/std", "frame-system/std", diff --git a/pallets/schemas/src/rpc/Cargo.toml b/pallets/schemas/src/rpc/Cargo.toml index 13215da3a9..49e2c38ffd 100644 --- a/pallets/schemas/src/rpc/Cargo.toml +++ b/pallets/schemas/src/rpc/Cargo.toml @@ -10,31 +10,29 @@ repository = "https://github.com/libertyDSNP/frequency/" edition = "2021" [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } +jsonrpsee = { workspace = true, features = ["client-core", "server", "macros"] } # Frequency crates pallet-schemas-runtime-api = { path = "../runtime-api", default-features = false } common-primitives = { path = '../../../../common/primitives', default-features = false } common-helpers = { path = '../../../../common/helpers', default-features = false } # Substrate crates -frame-support = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-core = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-rpc = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-blockchain = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-std = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +frame-support = { workspace = true } +sp-core = { workspace = true } +sp-api = { workspace = true } +sp-rpc = { workspace = true } +sp-runtime = { workspace = true } +sp-blockchain = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -tokio = { version = "1.25.0", features = ["macros", "time", "parking_lot"] } -substrate-test-runtime-client = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sc-client-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +tokio = { workspace = true, features = ["macros", "time", "parking_lot"] } +substrate-test-runtime-client = { workspace = true } +sc-client-api = { workspace = true } [features] default = ["std"] std = [ "sp-std/std", - "codec/std", "sp-api/std", "sp-core/std", "sp-runtime/std", diff --git a/pallets/schemas/src/runtime-api/Cargo.toml b/pallets/schemas/src/runtime-api/Cargo.toml index 23a17452de..2adc5f65f1 100644 --- a/pallets/schemas/src/runtime-api/Cargo.toml +++ b/pallets/schemas/src/runtime-api/Cargo.toml @@ -10,21 +10,17 @@ repository = "https://github.com/libertyDSNP/frequency/" edition = "2021" [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ - "derive" -] } # Substrate -sp-runtime = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-std = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } +sp-runtime = { workspace = true } +sp-api = { workspace = true } +sp-std = { workspace = true } +frame-support = { workspace = true } # Frequency related dependencies common-primitives = { default-features = false, path = "../../../../common/primitives" } [features] default = ["std"] std = [ - "codec/std", "sp-api/std", "frame-support/std", "common-primitives/std", diff --git a/pallets/schemas/src/tests/mock.rs b/pallets/schemas/src/tests/mock.rs index 91a1391d99..918da81343 100644 --- a/pallets/schemas/src/tests/mock.rs +++ b/pallets/schemas/src/tests/mock.rs @@ -1,9 +1,9 @@ -use codec::MaxEncodedLen; use frame_support::{ traits::{ConstU16, ConstU32, EitherOfDiverse}, weights::{Weight, WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial}, }; use frame_system::EnsureRoot; +use parity_scale_codec::MaxEncodedLen; use common_primitives::node::AccountId; use common_runtime::constants::DAYS; diff --git a/pallets/schemas/src/types.rs b/pallets/schemas/src/types.rs index c1573e43b7..a9df238733 100644 --- a/pallets/schemas/src/types.rs +++ b/pallets/schemas/src/types.rs @@ -1,7 +1,7 @@ //! Types for the Schema Pallet -use codec::{Decode, Encode, MaxEncodedLen}; use common_primitives::schema::{ModelType, PayloadLocation, SchemaSettings}; use frame_support::traits::StorageVersion; +use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_std::fmt::Debug; diff --git a/pallets/stateful-storage/Cargo.toml b/pallets/stateful-storage/Cargo.toml index af572a1b7f..9e9781b77a 100644 --- a/pallets/stateful-storage/Cargo.toml +++ b/pallets/stateful-storage/Cargo.toml @@ -13,29 +13,27 @@ version = "0.0.0" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +parity-scale-codec = { workspace = true, features = [ "derive", ] } -log = { version = "0.4.17", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = [ - "derive", -] } -twox-hash = { version = "1.6.3", default-features = false, features = ["digest_0_10"] } +log = { workspace = true } +scale-info = { workspace = true, features = ["derive"] } +twox-hash = { workspace = true, features = ["digest_0_10"] } # Substrate -frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, optional = true, branch = "release-polkadot-v1.1.0" } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-io = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } +frame-benchmarking = { workspace = true, optional = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } # Frequency related dependencies common-primitives = { default-features = false, path = "../../common/primitives" } [dev-dependencies] -env_logger = "0.10.0" -pretty_assertions = "1.3.0" -sp-keystore = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +env_logger = { workspace = true } +pretty_assertions = { workspace = true } +sp-keystore = { workspace = true } [features] default = ['std'] @@ -46,7 +44,7 @@ runtime-benchmarks = [ "common-primitives/runtime-benchmarks" ] std = [ - 'codec/std', + 'parity-scale-codec/std', 'scale-info/std', 'sp-std/std', 'sp-core/std', 'sp-io/std', diff --git a/pallets/stateful-storage/src/benchmarking.rs b/pallets/stateful-storage/src/benchmarking.rs index bd597e1340..6abd5ce9aa 100644 --- a/pallets/stateful-storage/src/benchmarking.rs +++ b/pallets/stateful-storage/src/benchmarking.rs @@ -1,7 +1,6 @@ #![allow(clippy::unwrap_used, clippy::expect_used)] use super::*; use crate::{types::ItemAction, Pallet as StatefulStoragePallet}; -use codec::{Decode, Encode}; use common_primitives::{ schema::{ModelType, PayloadLocation}, stateful_storage::{PageHash, PageId}, @@ -9,6 +8,7 @@ use common_primitives::{ use frame_benchmarking::{benchmarks, whitelisted_caller}; use frame_support::assert_ok; use frame_system::RawOrigin; +use parity_scale_codec::{Decode, Encode}; use sp_core::{bounded::BoundedVec, crypto::KeyTypeId}; use sp_runtime::RuntimeAppPublic; use stateful_child_tree::StatefulChildTree; diff --git a/pallets/stateful-storage/src/rpc/Cargo.toml b/pallets/stateful-storage/src/rpc/Cargo.toml index a58af0e5cf..ed29b393c7 100644 --- a/pallets/stateful-storage/src/rpc/Cargo.toml +++ b/pallets/stateful-storage/src/rpc/Cargo.toml @@ -10,31 +10,29 @@ repository = "https://github.com/libertyDSNP/frequency/" edition = "2021" [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } +jsonrpsee = { workspace = true, features = ["client-core", "server", "macros"] } # Frequency crates pallet-stateful-storage-runtime-api = { path = "../runtime-api", default-features = false } common-primitives = { path = '../../../../common/primitives', default-features = false } common-helpers = { path = '../../../../common/helpers', default-features = false } # Substrate crates -frame-support = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-core = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-rpc = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-blockchain = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-std = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +frame-support = { workspace = true } +sp-core = { workspace = true } +sp-api = { workspace = true } +sp-rpc = { workspace = true } +sp-runtime = { workspace = true } +sp-blockchain = { workspace = true } +sp-std = { workspace = true } [dev-dependencies] -tokio = { version = "1.25.0", features = ["macros", "time", "parking_lot"] } -substrate-test-runtime-client = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sc-client-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +tokio = { workspace = true, features = ["macros", "time", "parking_lot"] } +substrate-test-runtime-client = { workspace = true } +sc-client-api = { workspace = true } [features] default = ["std"] std = [ "sp-std/std", - "codec/std", "sp-api/std", "sp-core/std", "sp-runtime/std", diff --git a/pallets/stateful-storage/src/runtime-api/Cargo.toml b/pallets/stateful-storage/src/runtime-api/Cargo.toml index d3457217f9..a418225ac8 100644 --- a/pallets/stateful-storage/src/runtime-api/Cargo.toml +++ b/pallets/stateful-storage/src/runtime-api/Cargo.toml @@ -10,16 +10,13 @@ repository = "https://github.com/LibertyDSNP/frequency/" edition = "2021" [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ - "derive" -] } # Substrate -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } +sp-api = { workspace = true } +frame-support = { workspace = true } +sp-runtime = { workspace = true } # Frequency related dependencies common-primitives = { default-features = false, path = "../../../../common/primitives" } [features] default = ["std"] -std = ["codec/std", "sp-api/std", "frame-support/std", 'common-primitives/std', 'sp-runtime/std'] +std = ["sp-api/std", "frame-support/std", 'common-primitives/std', 'sp-runtime/std'] diff --git a/pallets/stateful-storage/src/stateful_child_tree.rs b/pallets/stateful-storage/src/stateful_child_tree.rs index 7af29bdf01..686bb46bf0 100644 --- a/pallets/stateful-storage/src/stateful_child_tree.rs +++ b/pallets/stateful-storage/src/stateful_child_tree.rs @@ -1,11 +1,11 @@ use core::marker::PhantomData; -use codec::{Decode, Encode}; use common_primitives::msa::MessageSourceId; use frame_support::{ storage::{child, child::ChildInfo, ChildTriePrefixIterator}, Blake2_128, Blake2_256, Hashable, StorageHasher, Twox128, Twox256, }; +use parity_scale_codec::{Decode, Encode}; use sp_core::{ConstU8, Get}; use sp_io::hashing::twox_64; use sp_std::{fmt::Debug, prelude::*}; @@ -59,12 +59,15 @@ pub trait MultipartKey: MultipartStorageKeyPart { fn hash(&self) -> Vec; fn hash_prefix_only(&self) -> Vec; - fn decode(hash: &[u8]) -> Result { + fn decode(hash: &[u8]) -> Result { let mut key_material = H::reverse(hash, Self::Arity::get()); ::decode(&mut key_material) } - fn decode_without_prefix(hash: &[u8], prefix_len: u8) -> Result { + fn decode_without_prefix( + hash: &[u8], + prefix_len: u8, + ) -> Result { if prefix_len > Self::Arity::get() { return Err("Prefix longer than total key length".into()) } @@ -85,7 +88,7 @@ impl MultipartKey for () { Vec::new() } - fn decode(_hash: &[u8]) -> Result { + fn decode(_hash: &[u8]) -> Result { Ok(()) } } diff --git a/pallets/stateful-storage/src/test_common.rs b/pallets/stateful-storage/src/test_common.rs index 78be17bdff..e94167daf2 100644 --- a/pallets/stateful-storage/src/test_common.rs +++ b/pallets/stateful-storage/src/test_common.rs @@ -32,12 +32,12 @@ pub mod constants { #[cfg(test)] pub mod test_utility { use crate::{pallet, tests::mock::Test, Config, ItemHeader, ItemizedPage, Page}; - use codec::{Decode, Encode, MaxEncodedLen}; use common_primitives::{ schema::{ModelType, PayloadLocation}, stateful_storage::PageNonce, }; use frame_support::BoundedVec; + use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_core::Get; diff --git a/pallets/stateful-storage/src/tests/apply_item_actions_tests.rs b/pallets/stateful-storage/src/tests/apply_item_actions_tests.rs index 240821d8bc..31f527f95a 100644 --- a/pallets/stateful-storage/src/tests/apply_item_actions_tests.rs +++ b/pallets/stateful-storage/src/tests/apply_item_actions_tests.rs @@ -5,12 +5,12 @@ use crate::{ types::*, Config, Error, Event as StatefulEvent, }; -use codec::Encode; use common_primitives::{ stateful_storage::{PageHash, PageNonce}, utils::wrap_binary_data, }; use frame_support::{assert_err, assert_ok, BoundedVec}; +use parity_scale_codec::Encode; #[allow(unused_imports)] use pretty_assertions::{assert_eq, assert_ne, assert_str_eq}; use sp_core::{sr25519, Get, Pair}; diff --git a/pallets/stateful-storage/src/tests/delete_page_tests.rs b/pallets/stateful-storage/src/tests/delete_page_tests.rs index 97e1d44531..450a8f3ba7 100644 --- a/pallets/stateful-storage/src/tests/delete_page_tests.rs +++ b/pallets/stateful-storage/src/tests/delete_page_tests.rs @@ -5,9 +5,9 @@ use crate::{ types::*, Config, Error, Event as StatefulEvent, }; -use codec::Encode; use common_primitives::{stateful_storage::PageHash, utils::wrap_binary_data}; use frame_support::{assert_err, assert_ok, assert_storage_noop}; +use parity_scale_codec::Encode; #[allow(unused_imports)] use pretty_assertions::{assert_eq, assert_ne, assert_str_eq}; use sp_core::{sr25519, Get, Pair}; diff --git a/pallets/stateful-storage/src/tests/itemized_operations_tests.rs b/pallets/stateful-storage/src/tests/itemized_operations_tests.rs index eecc1fca81..37b4db4336 100644 --- a/pallets/stateful-storage/src/tests/itemized_operations_tests.rs +++ b/pallets/stateful-storage/src/tests/itemized_operations_tests.rs @@ -1,7 +1,7 @@ use crate::{test_common::test_utility::*, tests::mock::*, types::*, Config}; -use codec::{Encode, MaxEncodedLen}; use common_primitives::stateful_storage::PageNonce; use frame_support::{assert_ok, traits::Len}; +use parity_scale_codec::{Encode, MaxEncodedLen}; #[allow(unused_imports)] use pretty_assertions::{assert_eq, assert_ne, assert_str_eq}; diff --git a/pallets/stateful-storage/src/tests/mock.rs b/pallets/stateful-storage/src/tests/mock.rs index aff6be3370..d40f83a883 100644 --- a/pallets/stateful-storage/src/tests/mock.rs +++ b/pallets/stateful-storage/src/tests/mock.rs @@ -1,5 +1,5 @@ use crate as pallet_stateful_storage; -use codec::Decode; +use parity_scale_codec::Decode; use crate::test_common::{ constants, diff --git a/pallets/stateful-storage/src/tests/other_tests.rs b/pallets/stateful-storage/src/tests/other_tests.rs index efe03abc3e..35302ba13f 100644 --- a/pallets/stateful-storage/src/tests/other_tests.rs +++ b/pallets/stateful-storage/src/tests/other_tests.rs @@ -5,9 +5,9 @@ use crate::{ types::*, Config, Error, }; -use codec::Encode; use common_primitives::utils::wrap_binary_data; use frame_support::{assert_err, assert_ok}; +use parity_scale_codec::Encode; #[allow(unused_imports)] use pretty_assertions::{assert_eq, assert_ne, assert_str_eq}; use sp_core::Pair; diff --git a/pallets/stateful-storage/src/tests/upsert_page_tests.rs b/pallets/stateful-storage/src/tests/upsert_page_tests.rs index 3701c0cf99..f7f8366f8d 100644 --- a/pallets/stateful-storage/src/tests/upsert_page_tests.rs +++ b/pallets/stateful-storage/src/tests/upsert_page_tests.rs @@ -5,13 +5,13 @@ use crate::{ types::*, Config, Error, Event as StatefulEvent, }; -use codec::Encode; use common_primitives::{ schema::SchemaId, stateful_storage::{PageHash, PageId}, utils::wrap_binary_data, }; use frame_support::{assert_err, assert_ok}; +use parity_scale_codec::Encode; #[allow(unused_imports)] use pretty_assertions::{assert_eq, assert_ne, assert_str_eq}; use sp_core::{sr25519, Get, Pair}; diff --git a/pallets/stateful-storage/src/types.rs b/pallets/stateful-storage/src/types.rs index 1c69e34618..1570202cfa 100644 --- a/pallets/stateful-storage/src/types.rs +++ b/pallets/stateful-storage/src/types.rs @@ -1,6 +1,5 @@ //! Types for the Stateful Storage Pallet use crate::Config; -use codec::{Decode, Encode, MaxEncodedLen}; use common_primitives::{ msa::MessageSourceId, schema::SchemaId, @@ -8,6 +7,7 @@ use common_primitives::{ }; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; +use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_core::bounded::BoundedVec; use sp_std::{ diff --git a/pallets/time-release/Cargo.toml b/pallets/time-release/Cargo.toml index c961aa97b0..df6a05c545 100644 --- a/pallets/time-release/Cargo.toml +++ b/pallets/time-release/Cargo.toml @@ -10,30 +10,30 @@ publish = false version = "0.0.0" [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +parity-scale-codec = { workspace = true, features = [ "max-encoded-len", ] } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { workspace = true, features = [ "derive", ] } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-io = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -frame-benchmarking = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", optional = true, branch = "release-polkadot-v1.1.0" } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } +frame-benchmarking = { workspace = true, optional = true } [dev-dependencies] -chrono = { version = "0.4.24" } -pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } +chrono = { workspace = true } +pallet-balances = { workspace = true } +sp-core = { workspace = true } common-primitives = { default-features = false, path = "../../common/primitives" } [features] default = ["std"] std = [ - "codec/std", + "parity-scale-codec/std", "frame-support/std", "frame-system/std", "scale-info/std", diff --git a/pallets/time-release/src/types.rs b/pallets/time-release/src/types.rs index 98cf83dbbf..94b5640551 100644 --- a/pallets/time-release/src/types.rs +++ b/pallets/time-release/src/types.rs @@ -1,7 +1,7 @@ //! Types for the TimeRelease Pallet #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Decode, Encode, HasCompact, MaxEncodedLen}; +use parity_scale_codec::{Decode, Encode, HasCompact, MaxEncodedLen}; use sp_runtime::{traits::AtLeast32Bit, RuntimeDebug}; use sp_std::cmp::{Eq, PartialEq}; diff --git a/runtime/common/Cargo.toml b/runtime/common/Cargo.toml index 37dba2229c..e324676f9f 100644 --- a/runtime/common/Cargo.toml +++ b/runtime/common/Cargo.toml @@ -13,35 +13,35 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # Frequency -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +parity-scale-codec = { workspace = true, features = [ "derive", ] } common-primitives = { default-features = false, path = "../../common/primitives" } -scale-info = { version = "2.10.0", default-features = false, features = [ +scale-info = { workspace = true, features = [ "derive", ] } -smallvec = "1.11.0" +smallvec = { workspace = true } # Substrate -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-weights = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-std = { workspace = true } +sp-weights = { workspace = true } # Substrate pallets -pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-collective = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-democracy = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-multisig = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-preimage = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-scheduler = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-session = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-sudo = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-timestamp = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-treasury = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-utility = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } +pallet-balances = { workspace = true } +pallet-collective = { workspace = true } +pallet-democracy = { workspace = true } +pallet-multisig = { workspace = true } +pallet-preimage = { workspace = true } +pallet-scheduler = { workspace = true } +pallet-session = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-treasury = { workspace = true } +pallet-utility = { workspace = true } # cumulus -cumulus-primitives-core = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-collator-selection = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } +cumulus-primitives-core = { workspace = true } +pallet-collator-selection = { workspace = true } pallet-time-release = { path = "../../pallets/time-release", default-features = false } [features] @@ -70,7 +70,7 @@ std = [ "pallet-scheduler/std", "sp-core/std", "common-primitives/std", - "codec/std", + "parity-scale-codec/std", "scale-info/std", "sp-std/std", "pallet-preimage/std", diff --git a/runtime/common/src/constants.rs b/runtime/common/src/constants.rs index 24cec370f4..a4439c1743 100644 --- a/runtime/common/src/constants.rs +++ b/runtime/common/src/constants.rs @@ -1,6 +1,6 @@ use crate::prod_or_testnet_or_local; -use codec::{Encode, MaxEncodedLen}; use common_primitives::node::BlockNumber; +use parity_scale_codec::{Encode, MaxEncodedLen}; use frame_support::{ parameter_types, diff --git a/runtime/common/src/extensions/check_nonce.rs b/runtime/common/src/extensions/check_nonce.rs index 8e9aa1dea4..627215245c 100644 --- a/runtime/common/src/extensions/check_nonce.rs +++ b/runtime/common/src/extensions/check_nonce.rs @@ -16,8 +16,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use codec::{Decode, Encode}; use frame_system::Config; +use parity_scale_codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchInfo, Pays}, diff --git a/runtime/frequency/Cargo.toml b/runtime/frequency/Cargo.toml index 5c0b16c822..77a959d234 100644 --- a/runtime/frequency/Cargo.toml +++ b/runtime/frequency/Cargo.toml @@ -15,51 +15,55 @@ targets = ["x86_64-unknown-linux-gnu"] substrate-wasm-builder = {git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } [dependencies] -codec = {package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +parity-scale-codec = {workspace = true, features = [ "derive", ]} -hex-literal = {version = "0.4.1", optional = true} -log = {version = "0.4.17", default-features = false} -scale-info = {version = "2.10.0", default-features = false, features = [ +hex-literal = { workspace = true, optional = true} +log = { workspace = true } +scale-info = { workspace = true, features = [ "derive", ]} -serde = {version = "1.0", optional = true, features = ["derive"]} -smallvec = "1.11.0" + +serde = { workspace = true, optional = true, features = ["derive"] } +smallvec = { workspace = true } # Substrate -frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, optional = true, branch = "release-polkadot-v1.1.0" } -frame-executive = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -frame-system-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, optional = true, branch = "release-polkadot-v1.1.0" } -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -frame-try-runtime = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, optional = true, branch = "release-polkadot-v1.1.0" } -pallet-aura = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-authorship = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-preimage = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-scheduler = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-democracy = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-collective = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-session = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-sudo = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-multisig = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-timestamp = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-transaction-payment = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-treasury = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-utility = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-block-builder = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-inherents = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-io = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-offchain = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-session = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-version = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } +frame-benchmarking = { workspace = true, optional = true } +frame-executive = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { workspace = true, optional = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { workspace = true, optional = true } + +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-balances = { workspace = true } +pallet-preimage = { workspace = true } +pallet-scheduler = { workspace = true } +pallet-democracy = { workspace = true } +pallet-collective = { workspace = true } +pallet-session = { workspace = true } +pallet-sudo = { workspace = true } +pallet-multisig = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-treasury = { workspace = true } +pallet-utility = { workspace = true } + +sp-api = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-inherents = { workspace = true } +sp-io = { workspace = true } + +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-std = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } # Ported Pallet from ORML pallet-time-release = { path = "../../pallets/time-release", default-features = false } # Frequency @@ -80,22 +84,22 @@ pallet-handles = { path = "../../pallets/handles", default-features = false } pallet-handles-runtime-api = { path = "../../pallets/handles/src/runtime-api", default-features = false } system-runtime-api = { path = '../system-runtime-api', default-features = false } # Polkadot -polkadot-parachain-primitives = {git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -polkadot-runtime-common = {git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } # Cumulus -cumulus-pallet-aura-ext = {git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -cumulus-pallet-parachain-system = {git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -cumulus-pallet-session-benchmarking = {git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -cumulus-primitives-core = {git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -cumulus-primitives-timestamp = {git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -cumulus-primitives-aura = {git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -pallet-collator-selection = {git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -parachain-info = {git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } +cumulus-pallet-aura-ext = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-timestamp = { workspace = true } +cumulus-primitives-aura = { workspace = true } +pallet-collator-selection = { workspace = true } +parachain-info = { workspace = true } [features] default = ["std"] std = [ - "codec/std", + "parity-scale-codec/std", "log/std", "scale-info/std", "serde", diff --git a/runtime/frequency/src/lib.rs b/runtime/frequency/src/lib.rs index 4902df6526..07a70720b9 100644 --- a/runtime/frequency/src/lib.rs +++ b/runtime/frequency/src/lib.rs @@ -17,7 +17,7 @@ use sp_runtime::{ ApplyExtrinsicResult, DispatchError, }; -use codec::Encode; +use parity_scale_codec::Encode; use sp_std::prelude::*; #[cfg(feature = "std")] @@ -261,7 +261,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("frequency"), impl_name: create_runtime_str!("frequency"), authoring_version: 1, - spec_version: 62, + spec_version: 63, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -275,7 +275,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("frequency-rococo"), impl_name: create_runtime_str!("frequency"), authoring_version: 1, - spec_version: 62, + spec_version: 63, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/runtime/system-runtime-api/Cargo.toml b/runtime/system-runtime-api/Cargo.toml index a83cb525bc..f64a1a48aa 100644 --- a/runtime/system-runtime-api/Cargo.toml +++ b/runtime/system-runtime-api/Cargo.toml @@ -10,25 +10,21 @@ repository = "https://github.com/libertyDSNP/frequency/" edition = "2021" [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ - "derive" -] } -serde_json = { version = "1.0.86", default-features = false, features = [ +serde_json = { workspace = true, features = [ "alloc", ] } # Substrate -sp-runtime = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -sp-std = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" } +sp-runtime = { workspace = true } +sp-api = { workspace = true } +sp-std = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } # Frequency related dependencies common-primitives = { default-features = false, path = "../../common/primitives" } [features] default = ["std"] std = [ - "codec/std", "sp-api/std", "frame-support/std", "common-primitives/std", From 9691055c47620bc28e2a59eabb81595e33ea29b3 Mon Sep 17 00:00:00 2001 From: Aramik Date: Thu, 16 Nov 2023 15:10:02 -0800 Subject: [PATCH 8/9] Stateful storage pov optimization (#1757) # Goal The goal of this PR is to evaluate and minimize PoV consumption by `stateful-storage` pallet. Closes #1782 # Discussion - refactored benchmarks to calculate the max of time and PoV - decreased `additional-trie-layers` number - decreased `MaxItemizedPageSizeBytes ` from 64KiB to around 10KiB (will allow around 292 of 32 bytes itemized public keys) # Improvements - by applying mentioned changes the PoV for `apply_item_actions` got reduced from **45KB** to **15KB** - by applying mentioned changes the PoV for `upsert` and `delete` pages got reduced from **12KB** to **6KB** # Future improvements - by using PoV clawback we can further reduce the PoV sizes. A [PR](https://github.com/paritytech/polkadot-sdk/pull/1462) to track # Checklist - [x] Benchmarks added - [x] Weights updated --------- Co-authored-by: Frequency CI [bot] Co-authored-by: Wil Wade --- pallets/capacity/src/weights.rs | 62 ++--- pallets/frequency-tx-payment/src/weights.rs | 30 +-- pallets/handles/src/weights.rs | 58 ++--- pallets/messages/src/weights.rs | 38 +-- pallets/msa/src/weights.rs | 182 +++++++------- pallets/schemas/src/weights.rs | 94 ++++---- pallets/stateful-storage/src/benchmarking.rs | 137 +++++++++-- pallets/stateful-storage/src/lib.rs | 27 ++- pallets/stateful-storage/src/weights.rs | 228 ++++++++++++------ pallets/time-release/src/weights.rs | 58 ++--- runtime/common/src/constants.rs | 9 +- runtime/common/src/weights/block_weights.rs | 20 +- .../common/src/weights/extrinsic_weights.rs | 20 +- runtime/common/src/weights/pallet_balances.rs | 58 ++--- .../src/weights/pallet_collator_selection.rs | 84 +++---- .../src/weights/pallet_collective_council.rs | 152 ++++++------ .../pallet_collective_technical_committee.rs | 152 ++++++------ .../common/src/weights/pallet_democracy.rs | 200 +++++++-------- runtime/common/src/weights/pallet_multisig.rs | 86 +++---- runtime/common/src/weights/pallet_preimage.rs | 90 +++---- .../common/src/weights/pallet_scheduler.rs | 92 +++---- runtime/common/src/weights/pallet_session.rs | 18 +- .../common/src/weights/pallet_timestamp.rs | 16 +- runtime/common/src/weights/pallet_treasury.rs | 48 ++-- runtime/common/src/weights/pallet_utility.rs | 38 +-- runtime/frequency/src/lib.rs | 4 +- scripts/run_benchmarks.sh | 2 +- 27 files changed, 1084 insertions(+), 919 deletions(-) diff --git a/pallets/capacity/src/weights.rs b/pallets/capacity/src/weights.rs index 4a665d8a64..fb7f024db5 100644 --- a/pallets/capacity/src/weights.rs +++ b/pallets/capacity/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_capacity //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-30, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `benchmark-runner-44wtw-bw25f`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! HOSTNAME: `benchmark-runner-44wtw-sz2gt`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("frequency-bench"), DB CACHE: 1024 // Executed Command: @@ -33,7 +33,7 @@ // --chain=frequency-bench // --heap-pages=4096 // --wasm-execution=compiled -// --additional-trie-layers=20 +// --additional-trie-layers=5 // --steps=20 // --repeat=10 // --output=./scripts/../pallets/capacity/src/weights.rs @@ -74,9 +74,9 @@ impl WeightInfo for SubstrateWeight { fn stake() -> Weight { // Proof Size summary in bytes: // Measured: `223` - // Estimated: `13674` - // Minimum execution time: 43_127_000 picoseconds. - Weight::from_parts(44_191_000, 13674) + // Estimated: `6249` + // Minimum execution time: 43_154_000 picoseconds. + Weight::from_parts(44_525_000, 6249) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -89,9 +89,9 @@ impl WeightInfo for SubstrateWeight { fn withdraw_unstaked() -> Weight { // Proof Size summary in bytes: // Measured: `339` - // Estimated: `13674` - // Minimum execution time: 33_146_000 picoseconds. - Weight::from_parts(34_058_000, 13674) + // Estimated: `6249` + // Minimum execution time: 33_031_000 picoseconds. + Weight::from_parts(34_284_000, 6249) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -102,9 +102,9 @@ impl WeightInfo for SubstrateWeight { fn on_initialize() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `10399` - // Minimum execution time: 4_040_000 picoseconds. - Weight::from_parts(4_215_000, 10399) + // Estimated: `2974` + // Minimum execution time: 3_876_000 picoseconds. + Weight::from_parts(4_036_000, 2974) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -121,9 +121,9 @@ impl WeightInfo for SubstrateWeight { fn unstake() -> Weight { // Proof Size summary in bytes: // Measured: `433` - // Estimated: `13674` - // Minimum execution time: 38_156_000 picoseconds. - Weight::from_parts(39_905_000, 13674) + // Estimated: `6249` + // Minimum execution time: 38_465_000 picoseconds. + Weight::from_parts(39_656_000, 6249) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -133,8 +133,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_429_000 picoseconds. - Weight::from_parts(6_896_000, 0) + // Minimum execution time: 6_805_000 picoseconds. + Weight::from_parts(7_569_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -156,9 +156,9 @@ impl WeightInfo for () { fn stake() -> Weight { // Proof Size summary in bytes: // Measured: `223` - // Estimated: `13674` - // Minimum execution time: 43_127_000 picoseconds. - Weight::from_parts(44_191_000, 13674) + // Estimated: `6249` + // Minimum execution time: 43_154_000 picoseconds. + Weight::from_parts(44_525_000, 6249) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -171,9 +171,9 @@ impl WeightInfo for () { fn withdraw_unstaked() -> Weight { // Proof Size summary in bytes: // Measured: `339` - // Estimated: `13674` - // Minimum execution time: 33_146_000 picoseconds. - Weight::from_parts(34_058_000, 13674) + // Estimated: `6249` + // Minimum execution time: 33_031_000 picoseconds. + Weight::from_parts(34_284_000, 6249) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -184,9 +184,9 @@ impl WeightInfo for () { fn on_initialize() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `10399` - // Minimum execution time: 4_040_000 picoseconds. - Weight::from_parts(4_215_000, 10399) + // Estimated: `2974` + // Minimum execution time: 3_876_000 picoseconds. + Weight::from_parts(4_036_000, 2974) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -203,9 +203,9 @@ impl WeightInfo for () { fn unstake() -> Weight { // Proof Size summary in bytes: // Measured: `433` - // Estimated: `13674` - // Minimum execution time: 38_156_000 picoseconds. - Weight::from_parts(39_905_000, 13674) + // Estimated: `6249` + // Minimum execution time: 38_465_000 picoseconds. + Weight::from_parts(39_656_000, 6249) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -215,8 +215,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_429_000 picoseconds. - Weight::from_parts(6_896_000, 0) + // Minimum execution time: 6_805_000 picoseconds. + Weight::from_parts(7_569_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/pallets/frequency-tx-payment/src/weights.rs b/pallets/frequency-tx-payment/src/weights.rs index 85b3f62966..e8bf3c984f 100644 --- a/pallets/frequency-tx-payment/src/weights.rs +++ b/pallets/frequency-tx-payment/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_frequency_tx_payment //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-30, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `benchmark-runner-44wtw-bw25f`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! HOSTNAME: `benchmark-runner-44wtw-sz2gt`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("frequency-bench"), DB CACHE: 1024 // Executed Command: @@ -33,7 +33,7 @@ // --chain=frequency-bench // --heap-pages=4096 // --wasm-execution=compiled -// --additional-trie-layers=20 +// --additional-trie-layers=5 // --steps=20 // --repeat=10 // --output=./scripts/../pallets/frequency-tx-payment/src/weights.rs @@ -60,18 +60,18 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_362_000 picoseconds. - Weight::from_parts(3_551_000, 0) + // Minimum execution time: 3_294_000 picoseconds. + Weight::from_parts(3_499_000, 0) } /// The range of component `n` is `[0, 10]`. fn pay_with_capacity_batch_all(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_061_000 picoseconds. - Weight::from_parts(8_127_386, 0) - // Standard Error: 16_699 - .saturating_add(Weight::from_parts(4_653_714, 0).saturating_mul(n.into())) + // Minimum execution time: 7_318_000 picoseconds. + Weight::from_parts(8_276_383, 0) + // Standard Error: 12_166 + .saturating_add(Weight::from_parts(4_732_333, 0).saturating_mul(n.into())) } } @@ -81,17 +81,17 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_362_000 picoseconds. - Weight::from_parts(3_551_000, 0) + // Minimum execution time: 3_294_000 picoseconds. + Weight::from_parts(3_499_000, 0) } /// The range of component `n` is `[0, 10]`. fn pay_with_capacity_batch_all(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_061_000 picoseconds. - Weight::from_parts(8_127_386, 0) - // Standard Error: 16_699 - .saturating_add(Weight::from_parts(4_653_714, 0).saturating_mul(n.into())) + // Minimum execution time: 7_318_000 picoseconds. + Weight::from_parts(8_276_383, 0) + // Standard Error: 12_166 + .saturating_add(Weight::from_parts(4_732_333, 0).saturating_mul(n.into())) } } diff --git a/pallets/handles/src/weights.rs b/pallets/handles/src/weights.rs index 659567b895..97d3480f54 100644 --- a/pallets/handles/src/weights.rs +++ b/pallets/handles/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_handles //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-30, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `benchmark-runner-44wtw-bw25f`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! HOSTNAME: `benchmark-runner-44wtw-sz2gt`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("frequency-bench"), DB CACHE: 1024 // Executed Command: @@ -33,7 +33,7 @@ // --chain=frequency-bench // --heap-pages=4096 // --wasm-execution=compiled -// --additional-trie-layers=20 +// --additional-trie-layers=5 // --steps=20 // --repeat=10 // --output=./scripts/../pallets/handles/src/weights.rs @@ -69,11 +69,11 @@ impl WeightInfo for SubstrateWeight { fn claim_handle(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `191` - // Estimated: `12434` - // Minimum execution time: 81_465_000 picoseconds. - Weight::from_parts(83_332_398, 12434) - // Standard Error: 9_174 - .saturating_add(Weight::from_parts(77_143, 0).saturating_mul(b.into())) + // Estimated: `5009` + // Minimum execution time: 82_418_000 picoseconds. + Weight::from_parts(84_250_657, 5009) + // Standard Error: 18_982 + .saturating_add(Weight::from_parts(75_863, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -89,11 +89,11 @@ impl WeightInfo for SubstrateWeight { fn change_handle(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `297 + b * (1 ±0)` - // Estimated: `12434` - // Minimum execution time: 91_552_000 picoseconds. - Weight::from_parts(93_641_136, 12434) - // Standard Error: 10_045 - .saturating_add(Weight::from_parts(152_851, 0).saturating_mul(b.into())) + // Estimated: `5009` + // Minimum execution time: 93_554_000 picoseconds. + Weight::from_parts(94_507_705, 5009) + // Standard Error: 8_136 + .saturating_add(Weight::from_parts(165_626, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -106,9 +106,9 @@ impl WeightInfo for SubstrateWeight { fn retire_handle() -> Weight { // Proof Size summary in bytes: // Measured: `306` - // Estimated: `12434` - // Minimum execution time: 22_019_000 picoseconds. - Weight::from_parts(22_866_000, 12434) + // Estimated: `5009` + // Minimum execution time: 22_739_000 picoseconds. + Weight::from_parts(23_360_000, 5009) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -128,11 +128,11 @@ impl WeightInfo for () { fn claim_handle(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `191` - // Estimated: `12434` - // Minimum execution time: 81_465_000 picoseconds. - Weight::from_parts(83_332_398, 12434) - // Standard Error: 9_174 - .saturating_add(Weight::from_parts(77_143, 0).saturating_mul(b.into())) + // Estimated: `5009` + // Minimum execution time: 82_418_000 picoseconds. + Weight::from_parts(84_250_657, 5009) + // Standard Error: 18_982 + .saturating_add(Weight::from_parts(75_863, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -148,11 +148,11 @@ impl WeightInfo for () { fn change_handle(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `297 + b * (1 ±0)` - // Estimated: `12434` - // Minimum execution time: 91_552_000 picoseconds. - Weight::from_parts(93_641_136, 12434) - // Standard Error: 10_045 - .saturating_add(Weight::from_parts(152_851, 0).saturating_mul(b.into())) + // Estimated: `5009` + // Minimum execution time: 93_554_000 picoseconds. + Weight::from_parts(94_507_705, 5009) + // Standard Error: 8_136 + .saturating_add(Weight::from_parts(165_626, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -165,9 +165,9 @@ impl WeightInfo for () { fn retire_handle() -> Weight { // Proof Size summary in bytes: // Measured: `306` - // Estimated: `12434` - // Minimum execution time: 22_019_000 picoseconds. - Weight::from_parts(22_866_000, 12434) + // Estimated: `5009` + // Minimum execution time: 22_739_000 picoseconds. + Weight::from_parts(23_360_000, 5009) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/pallets/messages/src/weights.rs b/pallets/messages/src/weights.rs index b20bf16e69..476963d5bd 100644 --- a/pallets/messages/src/weights.rs +++ b/pallets/messages/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_messages //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-10, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `benchmark-runner-44wtw-d4nrm`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! HOSTNAME: `benchmark-runner-44wtw-sz2gt`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("frequency-bench"), DB CACHE: 1024 // Executed Command: @@ -33,7 +33,7 @@ // --chain=frequency-bench // --heap-pages=4096 // --wasm-execution=compiled -// --additional-trie-layers=20 +// --additional-trie-layers=5 // --steps=20 // --repeat=10 // --output=./scripts/../pallets/messages/src/weights.rs @@ -68,11 +68,11 @@ impl WeightInfo for SubstrateWeight { fn add_onchain_message(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `402` - // Estimated: `12592` - // Minimum execution time: 32_164_000 picoseconds. - Weight::from_parts(33_345_645, 12592) - // Standard Error: 43 - .saturating_add(Weight::from_parts(848, 0).saturating_mul(n.into())) + // Estimated: `5167` + // Minimum execution time: 32_088_000 picoseconds. + Weight::from_parts(33_157_225, 5167) + // Standard Error: 109 + .saturating_add(Weight::from_parts(807, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -85,9 +85,9 @@ impl WeightInfo for SubstrateWeight { fn add_ipfs_message() -> Weight { // Proof Size summary in bytes: // Measured: `790` - // Estimated: `12423` - // Minimum execution time: 31_839_000 picoseconds. - Weight::from_parts(32_576_000, 12423) + // Estimated: `4998` + // Minimum execution time: 32_045_000 picoseconds. + Weight::from_parts(32_582_000, 4998) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -107,11 +107,11 @@ impl WeightInfo for () { fn add_onchain_message(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `402` - // Estimated: `12592` - // Minimum execution time: 32_164_000 picoseconds. - Weight::from_parts(33_345_645, 12592) - // Standard Error: 43 - .saturating_add(Weight::from_parts(848, 0).saturating_mul(n.into())) + // Estimated: `5167` + // Minimum execution time: 32_088_000 picoseconds. + Weight::from_parts(33_157_225, 5167) + // Standard Error: 109 + .saturating_add(Weight::from_parts(807, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -124,9 +124,9 @@ impl WeightInfo for () { fn add_ipfs_message() -> Weight { // Proof Size summary in bytes: // Measured: `790` - // Estimated: `12423` - // Minimum execution time: 31_839_000 picoseconds. - Weight::from_parts(32_576_000, 12423) + // Estimated: `4998` + // Minimum execution time: 32_045_000 picoseconds. + Weight::from_parts(32_582_000, 4998) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/pallets/msa/src/weights.rs b/pallets/msa/src/weights.rs index 015fa361b8..0ff92ee9e7 100644 --- a/pallets/msa/src/weights.rs +++ b/pallets/msa/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_msa //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-30, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `benchmark-runner-44wtw-bw25f`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! HOSTNAME: `benchmark-runner-44wtw-sz2gt`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("frequency-bench"), DB CACHE: 1024 // Executed Command: @@ -33,7 +33,7 @@ // --chain=frequency-bench // --heap-pages=4096 // --wasm-execution=compiled -// --additional-trie-layers=20 +// --additional-trie-layers=5 // --steps=20 // --repeat=10 // --output=./scripts/../pallets/msa/src/weights.rs @@ -75,9 +75,9 @@ impl WeightInfo for SubstrateWeight { fn create() -> Weight { // Proof Size summary in bytes: // Measured: `4` - // Estimated: `12423` - // Minimum execution time: 12_647_000 picoseconds. - Weight::from_parts(13_214_000, 12423) + // Estimated: `4998` + // Minimum execution time: 12_927_000 picoseconds. + Weight::from_parts(13_413_000, 4998) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -100,12 +100,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 30]`. fn create_sponsored_account_with_delegation(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1393` - // Estimated: `14946` - // Minimum execution time: 115_007_000 picoseconds. - Weight::from_parts(118_226_184, 14946) - // Standard Error: 116_172 - .saturating_add(Weight::from_parts(304_680, 0).saturating_mul(s.into())) + // Measured: `1394` + // Estimated: `7521` + // Minimum execution time: 116_866_000 picoseconds. + Weight::from_parts(119_640_588, 7521) + // Standard Error: 13_082 + .saturating_add(Weight::from_parts(70_133, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -116,9 +116,9 @@ impl WeightInfo for SubstrateWeight { fn revoke_delegation_by_provider() -> Weight { // Proof Size summary in bytes: // Measured: `236` - // Estimated: `12592` - // Minimum execution time: 17_201_000 picoseconds. - Weight::from_parts(17_362_000, 12592) + // Estimated: `5167` + // Minimum execution time: 17_346_000 picoseconds. + Weight::from_parts(17_882_000, 5167) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -132,10 +132,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Msa::PublicKeyCountForMsaId` (`max_values`: None, `max_size`: Some(17), added: 2492, mode: `MaxEncodedLen`) fn add_public_key_to_msa() -> Weight { // Proof Size summary in bytes: - // Measured: `1649` - // Estimated: `18396` - // Minimum execution time: 168_854_000 picoseconds. - Weight::from_parts(172_349_000, 18396) + // Measured: `1616` + // Estimated: `10971` + // Minimum execution time: 169_365_000 picoseconds. + Weight::from_parts(172_841_000, 10971) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -146,9 +146,9 @@ impl WeightInfo for SubstrateWeight { fn delete_msa_public_key() -> Weight { // Proof Size summary in bytes: // Measured: `329` - // Estimated: `14946` - // Minimum execution time: 23_981_000 picoseconds. - Weight::from_parts(24_617_000, 14946) + // Estimated: `7521` + // Minimum execution time: 24_516_000 picoseconds. + Weight::from_parts(25_841_000, 7521) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -159,9 +159,9 @@ impl WeightInfo for SubstrateWeight { fn retire_msa() -> Weight { // Proof Size summary in bytes: // Measured: `146` - // Estimated: `12423` - // Minimum execution time: 19_302_000 picoseconds. - Weight::from_parts(19_765_000, 12423) + // Estimated: `4998` + // Minimum execution time: 19_457_000 picoseconds. + Weight::from_parts(20_553_000, 4998) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -181,11 +181,11 @@ impl WeightInfo for SubstrateWeight { fn grant_delegation(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `1443` - // Estimated: `14946` - // Minimum execution time: 110_255_000 picoseconds. - Weight::from_parts(113_007_446, 14946) - // Standard Error: 13_684 - .saturating_add(Weight::from_parts(111_907, 0).saturating_mul(s.into())) + // Estimated: `7521` + // Minimum execution time: 110_342_000 picoseconds. + Weight::from_parts(113_376_837, 7521) + // Standard Error: 18_613 + .saturating_add(Weight::from_parts(168_163, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -196,9 +196,9 @@ impl WeightInfo for SubstrateWeight { fn revoke_delegation_by_delegator() -> Weight { // Proof Size summary in bytes: // Measured: `236` - // Estimated: `12592` - // Minimum execution time: 17_145_000 picoseconds. - Weight::from_parts(17_802_000, 12592) + // Estimated: `5167` + // Minimum execution time: 17_265_000 picoseconds. + Weight::from_parts(18_123_000, 5167) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -209,9 +209,9 @@ impl WeightInfo for SubstrateWeight { fn create_provider() -> Weight { // Proof Size summary in bytes: // Measured: `142` - // Estimated: `12423` - // Minimum execution time: 13_984_000 picoseconds. - Weight::from_parts(14_167_000, 12423) + // Estimated: `4998` + // Minimum execution time: 14_215_000 picoseconds. + Weight::from_parts(14_644_000, 4998) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -222,9 +222,9 @@ impl WeightInfo for SubstrateWeight { fn create_provider_via_governance() -> Weight { // Proof Size summary in bytes: // Measured: `142` - // Estimated: `12423` - // Minimum execution time: 13_696_000 picoseconds. - Weight::from_parts(14_311_000, 12423) + // Estimated: `4998` + // Minimum execution time: 13_930_000 picoseconds. + Weight::from_parts(14_489_000, 4998) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -241,9 +241,9 @@ impl WeightInfo for SubstrateWeight { fn propose_to_be_provider() -> Weight { // Proof Size summary in bytes: // Measured: `222` - // Estimated: `12597` - // Minimum execution time: 23_742_000 picoseconds. - Weight::from_parts(23_995_000, 12597) + // Estimated: `5172` + // Minimum execution time: 24_218_000 picoseconds. + Weight::from_parts(24_976_000, 5172) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -257,11 +257,11 @@ impl WeightInfo for SubstrateWeight { fn revoke_schema_permissions(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `391 + s * (6 ±0)` - // Estimated: `12592` - // Minimum execution time: 20_546_000 picoseconds. - Weight::from_parts(21_724_420, 12592) - // Standard Error: 4_103 - .saturating_add(Weight::from_parts(109_234, 0).saturating_mul(s.into())) + // Estimated: `5167` + // Minimum execution time: 20_709_000 picoseconds. + Weight::from_parts(22_382_082, 5167) + // Standard Error: 12_844 + .saturating_add(Weight::from_parts(87_187, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -278,9 +278,9 @@ impl WeightInfo for () { fn create() -> Weight { // Proof Size summary in bytes: // Measured: `4` - // Estimated: `12423` - // Minimum execution time: 12_647_000 picoseconds. - Weight::from_parts(13_214_000, 12423) + // Estimated: `4998` + // Minimum execution time: 12_927_000 picoseconds. + Weight::from_parts(13_413_000, 4998) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -303,12 +303,12 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 30]`. fn create_sponsored_account_with_delegation(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1393` - // Estimated: `14946` - // Minimum execution time: 115_007_000 picoseconds. - Weight::from_parts(118_226_184, 14946) - // Standard Error: 116_172 - .saturating_add(Weight::from_parts(304_680, 0).saturating_mul(s.into())) + // Measured: `1394` + // Estimated: `7521` + // Minimum execution time: 116_866_000 picoseconds. + Weight::from_parts(119_640_588, 7521) + // Standard Error: 13_082 + .saturating_add(Weight::from_parts(70_133, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(10_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -319,9 +319,9 @@ impl WeightInfo for () { fn revoke_delegation_by_provider() -> Weight { // Proof Size summary in bytes: // Measured: `236` - // Estimated: `12592` - // Minimum execution time: 17_201_000 picoseconds. - Weight::from_parts(17_362_000, 12592) + // Estimated: `5167` + // Minimum execution time: 17_346_000 picoseconds. + Weight::from_parts(17_882_000, 5167) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -335,10 +335,10 @@ impl WeightInfo for () { /// Proof: `Msa::PublicKeyCountForMsaId` (`max_values`: None, `max_size`: Some(17), added: 2492, mode: `MaxEncodedLen`) fn add_public_key_to_msa() -> Weight { // Proof Size summary in bytes: - // Measured: `1649` - // Estimated: `18396` - // Minimum execution time: 168_854_000 picoseconds. - Weight::from_parts(172_349_000, 18396) + // Measured: `1616` + // Estimated: `10971` + // Minimum execution time: 169_365_000 picoseconds. + Weight::from_parts(172_841_000, 10971) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -349,9 +349,9 @@ impl WeightInfo for () { fn delete_msa_public_key() -> Weight { // Proof Size summary in bytes: // Measured: `329` - // Estimated: `14946` - // Minimum execution time: 23_981_000 picoseconds. - Weight::from_parts(24_617_000, 14946) + // Estimated: `7521` + // Minimum execution time: 24_516_000 picoseconds. + Weight::from_parts(25_841_000, 7521) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -362,9 +362,9 @@ impl WeightInfo for () { fn retire_msa() -> Weight { // Proof Size summary in bytes: // Measured: `146` - // Estimated: `12423` - // Minimum execution time: 19_302_000 picoseconds. - Weight::from_parts(19_765_000, 12423) + // Estimated: `4998` + // Minimum execution time: 19_457_000 picoseconds. + Weight::from_parts(20_553_000, 4998) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -384,11 +384,11 @@ impl WeightInfo for () { fn grant_delegation(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `1443` - // Estimated: `14946` - // Minimum execution time: 110_255_000 picoseconds. - Weight::from_parts(113_007_446, 14946) - // Standard Error: 13_684 - .saturating_add(Weight::from_parts(111_907, 0).saturating_mul(s.into())) + // Estimated: `7521` + // Minimum execution time: 110_342_000 picoseconds. + Weight::from_parts(113_376_837, 7521) + // Standard Error: 18_613 + .saturating_add(Weight::from_parts(168_163, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -399,9 +399,9 @@ impl WeightInfo for () { fn revoke_delegation_by_delegator() -> Weight { // Proof Size summary in bytes: // Measured: `236` - // Estimated: `12592` - // Minimum execution time: 17_145_000 picoseconds. - Weight::from_parts(17_802_000, 12592) + // Estimated: `5167` + // Minimum execution time: 17_265_000 picoseconds. + Weight::from_parts(18_123_000, 5167) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -412,9 +412,9 @@ impl WeightInfo for () { fn create_provider() -> Weight { // Proof Size summary in bytes: // Measured: `142` - // Estimated: `12423` - // Minimum execution time: 13_984_000 picoseconds. - Weight::from_parts(14_167_000, 12423) + // Estimated: `4998` + // Minimum execution time: 14_215_000 picoseconds. + Weight::from_parts(14_644_000, 4998) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -425,9 +425,9 @@ impl WeightInfo for () { fn create_provider_via_governance() -> Weight { // Proof Size summary in bytes: // Measured: `142` - // Estimated: `12423` - // Minimum execution time: 13_696_000 picoseconds. - Weight::from_parts(14_311_000, 12423) + // Estimated: `4998` + // Minimum execution time: 13_930_000 picoseconds. + Weight::from_parts(14_489_000, 4998) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -444,9 +444,9 @@ impl WeightInfo for () { fn propose_to_be_provider() -> Weight { // Proof Size summary in bytes: // Measured: `222` - // Estimated: `12597` - // Minimum execution time: 23_742_000 picoseconds. - Weight::from_parts(23_995_000, 12597) + // Estimated: `5172` + // Minimum execution time: 24_218_000 picoseconds. + Weight::from_parts(24_976_000, 5172) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -460,11 +460,11 @@ impl WeightInfo for () { fn revoke_schema_permissions(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `391 + s * (6 ±0)` - // Estimated: `12592` - // Minimum execution time: 20_546_000 picoseconds. - Weight::from_parts(21_724_420, 12592) - // Standard Error: 4_103 - .saturating_add(Weight::from_parts(109_234, 0).saturating_mul(s.into())) + // Estimated: `5167` + // Minimum execution time: 20_709_000 picoseconds. + Weight::from_parts(22_382_082, 5167) + // Standard Error: 12_844 + .saturating_add(Weight::from_parts(87_187, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/pallets/schemas/src/weights.rs b/pallets/schemas/src/weights.rs index a64177a2bd..8db058944e 100644 --- a/pallets/schemas/src/weights.rs +++ b/pallets/schemas/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_schemas //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-10, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `benchmark-runner-44wtw-d4nrm`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! HOSTNAME: `benchmark-runner-44wtw-sz2gt`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("frequency-bench"), DB CACHE: 1024 // Executed Command: @@ -33,7 +33,7 @@ // --chain=frequency-bench // --heap-pages=4096 // --wasm-execution=compiled -// --additional-trie-layers=20 +// --additional-trie-layers=5 // --steps=20 // --repeat=10 // --output=./scripts/../pallets/schemas/src/weights.rs @@ -71,11 +71,11 @@ impl WeightInfo for SubstrateWeight { fn create_schema(m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `136` - // Estimated: `10399` - // Minimum execution time: 15_733_000 picoseconds. - Weight::from_parts(16_003_000, 10399) - // Standard Error: 43 - .saturating_add(Weight::from_parts(34_311, 0).saturating_mul(m.into())) + // Estimated: `2974` + // Minimum execution time: 15_561_000 picoseconds. + Weight::from_parts(15_783_000, 2974) + // Standard Error: 49 + .saturating_add(Weight::from_parts(34_744, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -91,11 +91,11 @@ impl WeightInfo for SubstrateWeight { fn create_schema_via_governance(m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `136` - // Estimated: `10399` - // Minimum execution time: 15_842_000 picoseconds. - Weight::from_parts(16_036_000, 10399) - // Standard Error: 47 - .saturating_add(Weight::from_parts(34_330, 0).saturating_mul(m.into())) + // Estimated: `2974` + // Minimum execution time: 15_899_000 picoseconds. + Weight::from_parts(16_004_000, 2974) + // Standard Error: 53 + .saturating_add(Weight::from_parts(34_628, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -113,11 +113,11 @@ impl WeightInfo for SubstrateWeight { fn propose_to_create_schema(m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `230` - // Estimated: `12605` - // Minimum execution time: 21_487_000 picoseconds. - Weight::from_parts(11_162_870, 12605) - // Standard Error: 31 - .saturating_add(Weight::from_parts(3_121, 0).saturating_mul(m.into())) + // Estimated: `5180` + // Minimum execution time: 21_420_000 picoseconds. + Weight::from_parts(10_115_148, 5180) + // Standard Error: 33 + .saturating_add(Weight::from_parts(3_133, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -133,11 +133,11 @@ impl WeightInfo for SubstrateWeight { fn create_schema_v2(m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `136` - // Estimated: `10399` - // Minimum execution time: 15_775_000 picoseconds. - Weight::from_parts(15_884_000, 10399) - // Standard Error: 48 - .saturating_add(Weight::from_parts(34_322, 0).saturating_mul(m.into())) + // Estimated: `2974` + // Minimum execution time: 15_700_000 picoseconds. + Weight::from_parts(15_894_000, 2974) + // Standard Error: 46 + .saturating_add(Weight::from_parts(34_920, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -147,8 +147,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_505_000 picoseconds. - Weight::from_parts(6_843_000, 0) + // Minimum execution time: 6_806_000 picoseconds. + Weight::from_parts(7_135_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -167,11 +167,11 @@ impl WeightInfo for () { fn create_schema(m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `136` - // Estimated: `10399` - // Minimum execution time: 15_733_000 picoseconds. - Weight::from_parts(16_003_000, 10399) - // Standard Error: 43 - .saturating_add(Weight::from_parts(34_311, 0).saturating_mul(m.into())) + // Estimated: `2974` + // Minimum execution time: 15_561_000 picoseconds. + Weight::from_parts(15_783_000, 2974) + // Standard Error: 49 + .saturating_add(Weight::from_parts(34_744, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -187,11 +187,11 @@ impl WeightInfo for () { fn create_schema_via_governance(m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `136` - // Estimated: `10399` - // Minimum execution time: 15_842_000 picoseconds. - Weight::from_parts(16_036_000, 10399) - // Standard Error: 47 - .saturating_add(Weight::from_parts(34_330, 0).saturating_mul(m.into())) + // Estimated: `2974` + // Minimum execution time: 15_899_000 picoseconds. + Weight::from_parts(16_004_000, 2974) + // Standard Error: 53 + .saturating_add(Weight::from_parts(34_628, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -209,11 +209,11 @@ impl WeightInfo for () { fn propose_to_create_schema(m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `230` - // Estimated: `12605` - // Minimum execution time: 21_487_000 picoseconds. - Weight::from_parts(11_162_870, 12605) - // Standard Error: 31 - .saturating_add(Weight::from_parts(3_121, 0).saturating_mul(m.into())) + // Estimated: `5180` + // Minimum execution time: 21_420_000 picoseconds. + Weight::from_parts(10_115_148, 5180) + // Standard Error: 33 + .saturating_add(Weight::from_parts(3_133, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -229,11 +229,11 @@ impl WeightInfo for () { fn create_schema_v2(m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `136` - // Estimated: `10399` - // Minimum execution time: 15_775_000 picoseconds. - Weight::from_parts(15_884_000, 10399) - // Standard Error: 48 - .saturating_add(Weight::from_parts(34_322, 0).saturating_mul(m.into())) + // Estimated: `2974` + // Minimum execution time: 15_700_000 picoseconds. + Weight::from_parts(15_894_000, 2974) + // Standard Error: 46 + .saturating_add(Weight::from_parts(34_920, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -243,8 +243,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_505_000 picoseconds. - Weight::from_parts(6_843_000, 0) + // Minimum execution time: 6_806_000 picoseconds. + Weight::from_parts(7_135_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/pallets/stateful-storage/src/benchmarking.rs b/pallets/stateful-storage/src/benchmarking.rs index 6abd5ce9aa..99a44899fb 100644 --- a/pallets/stateful-storage/src/benchmarking.rs +++ b/pallets/stateful-storage/src/benchmarking.rs @@ -80,15 +80,39 @@ fn get_paginated_page( } benchmarks! { - apply_item_actions { - let s in 1 .. (T::MaxItemizedBlobSizeBytes::get() * T::MaxItemizedActionsCount::get() + 1); + apply_item_actions_add { + let s in (T::MaxItemizedBlobSizeBytes::get()) .. (T::MaxItemizedBlobSizeBytes::get() * T::MaxItemizedActionsCount::get()); let provider_msa_id = 1u64; let delegator_msa_id = 2u64; let schema_id = constants::ITEMIZED_SCHEMA; let caller: T::AccountId = whitelisted_caller(); let num_of_items = s / T::MaxItemizedBlobSizeBytes::get(); - let num_of_existing_items = (T::MaxItemizedPageSizeBytes::get() / T::MaxItemizedBlobSizeBytes::get()) / 2; - let delete_actions = T::MaxItemizedActionsCount::get() - num_of_items; + let key = (schema_id,); + + T::SchemaBenchmarkHelper::set_schema_count(schema_id - 1); + assert_ok!(create_schema::(PayloadLocation::Itemized)); + assert_ok!(T::MsaBenchmarkHelper::add_key(provider_msa_id.into(), caller.clone())); + assert_ok!(T::MsaBenchmarkHelper::set_delegation_relationship(provider_msa_id.into(), delegator_msa_id.into(), [schema_id].to_vec())); + + let actions = itemized_actions_populate::(num_of_items, T::MaxItemizedBlobSizeBytes::get() as usize, 0); + }: { + assert_ok!(StatefulStoragePallet::::apply_item_actions(RawOrigin::Signed(caller).into(), delegator_msa_id.into(), schema_id, NONEXISTENT_PAGE_HASH, actions)); + } + verify { + let page_result = get_itemized_page::(delegator_msa_id, schema_id); + assert!(page_result.is_some()); + assert!(page_result.unwrap().data.len() > 0); + } + + apply_item_actions_delete { + let n in 1 .. T::MaxItemizedActionsCount::get(); + let provider_msa_id = 1u64; + let delegator_msa_id = 2u64; + let schema_id = constants::ITEMIZED_SCHEMA; + let caller: T::AccountId = whitelisted_caller(); + let num_of_items= n; + // removed 2 bytes are for ItemHeader size which is currently 2 bytes per item + let num_of_existing_items = T::MaxItemizedPageSizeBytes::get() / (T::MaxItemizedBlobSizeBytes::get() + 2); let key = (schema_id,); T::SchemaBenchmarkHelper::set_schema_count(schema_id - 1); @@ -111,8 +135,10 @@ benchmarks! { PALLET_STORAGE_PREFIX, ITEMIZED_STORAGE_PREFIX, &key).unwrap().unwrap_or_default().get_hash(); - let actions = itemized_actions_populate::(num_of_items, T::MaxItemizedBlobSizeBytes::get() as usize, delete_actions); - }: _ (RawOrigin::Signed(caller), delegator_msa_id.into(), schema_id, content_hash, actions) + let actions = itemized_actions_populate::(0, 0, num_of_items); + }: { + assert_ok!(StatefulStoragePallet::::apply_item_actions(RawOrigin::Signed(caller).into(), delegator_msa_id.into(), schema_id, content_hash, actions)); + } verify { let page_result = get_itemized_page::(delegator_msa_id, schema_id); assert!(page_result.is_some()); @@ -126,14 +152,27 @@ benchmarks! { let page_id: PageId = 1; let schema_id = constants::PAGINATED_SCHEMA; let caller: T::AccountId = whitelisted_caller(); - let payload = vec![0u8; s as usize]; - let schema_key = schema_id.encode().to_vec(); + let payload = vec![1u8; s as usize]; + let max_payload = vec![1u8; T::MaxPaginatedPageSizeBytes::get() as usize]; + let page = PaginatedPage::::from(BoundedVec::try_from(max_payload).unwrap()); T::SchemaBenchmarkHelper::set_schema_count(schema_id - 1); assert_ok!(create_schema::(PayloadLocation::Paginated)); assert_ok!(T::MsaBenchmarkHelper::add_key(provider_msa_id.into(), caller.clone())); assert_ok!(T::MsaBenchmarkHelper::set_delegation_relationship(provider_msa_id.into(), delegator_msa_id.into(), [schema_id].to_vec())); - }: _(RawOrigin::Signed(caller), delegator_msa_id.into(), schema_id, page_id, NONEXISTENT_PAGE_HASH, payload.try_into().unwrap()) + + let key = (schema_id, page_id); + StatefulChildTree::::write(&delegator_msa_id, + PALLET_STORAGE_PREFIX, + PAGINATED_STORAGE_PREFIX, + &key, &page + ); + let content_hash = StatefulChildTree::::try_read::<_, PaginatedPage::>( + &delegator_msa_id, + PALLET_STORAGE_PREFIX, + PAGINATED_STORAGE_PREFIX, + &key).expect("error reading").expect("no data").get_hash(); + }: _(RawOrigin::Signed(caller), delegator_msa_id.into(), schema_id, page_id, content_hash, payload.try_into().unwrap()) verify { let page_result = get_paginated_page::(delegator_msa_id, schema_id, page_id); assert!(page_result.is_some()); @@ -170,15 +209,50 @@ benchmarks! { assert!(page_result.is_none()); } - apply_item_actions_with_signature { - let s in 1 .. (T::MaxItemizedBlobSizeBytes::get() * T::MaxItemizedActionsCount::get() + 1); - + apply_item_actions_with_signature_v2_add { + let s in (T::MaxItemizedBlobSizeBytes::get()) .. (T::MaxItemizedBlobSizeBytes::get() * T::MaxItemizedActionsCount::get()); let msa_id = 1u64; let schema_id = constants::ITEMIZED_SCHEMA; let caller: T::AccountId = whitelisted_caller(); let num_of_items = s / T::MaxItemizedBlobSizeBytes::get(); - let num_of_existing_items = (T::MaxItemizedPageSizeBytes::get() / T::MaxItemizedBlobSizeBytes::get()) / 2; - let delete_actions = T::MaxItemizedActionsCount::get() - num_of_items; + let key = (schema_id,); + let expiration = BlockNumberFor::::from(10u32); + + let delegator_account_public = SignerId::generate_pair(Some(constants::BENCHMARK_SIGNATURE_ACCOUNT_SEED.as_bytes().to_vec())); + let delegator_account = T::AccountId::decode(&mut &delegator_account_public.encode()[..]).unwrap(); + let delegator_msa_id = constants::SIGNATURE_MSA_ID; + + T::SchemaBenchmarkHelper::set_schema_count(schema_id - 1); + assert_ok!(create_schema::(PayloadLocation::Itemized)); + assert_ok!(T::MsaBenchmarkHelper::add_key(msa_id.into(), caller.clone())); + assert_ok!(T::MsaBenchmarkHelper::add_key(delegator_msa_id.into(), delegator_account.clone())); + assert_ok!(T::MsaBenchmarkHelper::set_delegation_relationship(msa_id.into(), delegator_msa_id.into(), [schema_id].to_vec())); + + let actions = itemized_actions_populate::(num_of_items, T::MaxItemizedBlobSizeBytes::get() as usize, 0); + let payload = ItemizedSignaturePayloadV2 { + actions, + target_hash: NONEXISTENT_PAGE_HASH, + expiration, + schema_id, + }; + let encode_data_new_key_data = wrap_binary_data(payload.encode()); + let signature = delegator_account_public.sign(&encode_data_new_key_data).unwrap(); + }: { + assert_ok!(StatefulStoragePallet::::apply_item_actions_with_signature_v2(RawOrigin::Signed(caller).into(), delegator_account.into(), MultiSignature::Sr25519(signature.into()), payload)); + } + verify { + let page_result = get_itemized_page::(delegator_msa_id, schema_id); + assert!(page_result.is_some()); + assert!(page_result.unwrap().data.len() > 0); + } + + apply_item_actions_with_signature_v2_delete { + let n in 1 .. T::MaxItemizedActionsCount::get(); + let msa_id = 1u64; + let schema_id = constants::ITEMIZED_SCHEMA; + let caller: T::AccountId = whitelisted_caller(); + let num_of_items = n; + let num_of_existing_items = T::MaxItemizedPageSizeBytes::get() / (T::MaxItemizedBlobSizeBytes::get() + 2); let key = (schema_id,); let expiration = BlockNumberFor::::from(10u32); @@ -207,24 +281,25 @@ benchmarks! { PALLET_STORAGE_PREFIX, ITEMIZED_STORAGE_PREFIX, &key).unwrap().unwrap_or_default().get_hash(); - let actions = itemized_actions_populate::(num_of_items, T::MaxItemizedBlobSizeBytes::get() as usize, delete_actions); - let payload = ItemizedSignaturePayload { + let actions = itemized_actions_populate::(0, 0, num_of_items); + let payload = ItemizedSignaturePayloadV2 { actions, target_hash: content_hash, - msa_id: delegator_msa_id, expiration, schema_id, }; let encode_data_new_key_data = wrap_binary_data(payload.encode()); let signature = delegator_account_public.sign(&encode_data_new_key_data).unwrap(); - }: _ (RawOrigin::Signed(caller), delegator_account.into(), MultiSignature::Sr25519(signature.into()), payload) + }: { + assert_ok!(StatefulStoragePallet::::apply_item_actions_with_signature_v2(RawOrigin::Signed(caller).into(), delegator_account.into(), MultiSignature::Sr25519(signature.into()), payload)); + } verify { let page_result = get_itemized_page::(delegator_msa_id, schema_id); assert!(page_result.is_some()); assert!(page_result.unwrap().data.len() > 0); } - upsert_page_with_signature { + upsert_page_with_signature_v2 { let s in 1 .. T::MaxPaginatedPageSizeBytes::get(); let provider_msa_id = 1u64; @@ -233,6 +308,8 @@ benchmarks! { let schema_id = constants::PAGINATED_SCHEMA; let caller: T::AccountId = whitelisted_caller(); let payload = vec![0u8; s as usize]; + let max_payload = vec![1u8; T::MaxPaginatedPageSizeBytes::get() as usize]; + let page = PaginatedPage::::from(BoundedVec::try_from(max_payload).unwrap()); let schema_key = schema_id.encode().to_vec(); let expiration = BlockNumberFor::::from(10u32); @@ -244,10 +321,21 @@ benchmarks! { assert_ok!(create_schema::(PayloadLocation::Paginated)); assert_ok!(T::MsaBenchmarkHelper::add_key(delegator_msa_id.into(), delegator_account.clone())); - let payload = PaginatedUpsertSignaturePayload { + let key = (schema_id, page_id); + StatefulChildTree::::write(&delegator_msa_id, + PALLET_STORAGE_PREFIX, + PAGINATED_STORAGE_PREFIX, + &key, &page + ); + let content_hash = StatefulChildTree::::try_read::<_, PaginatedPage::>( + &delegator_msa_id, + PALLET_STORAGE_PREFIX, + PAGINATED_STORAGE_PREFIX, + &key).expect("error reading").expect("no data").get_hash(); + + let payload = PaginatedUpsertSignaturePayloadV2 { payload: BoundedVec::try_from(payload).unwrap(), - target_hash: PageHash::default(), - msa_id: delegator_msa_id, + target_hash: content_hash, expiration, schema_id, page_id, @@ -261,7 +349,7 @@ benchmarks! { assert!(page_result.unwrap().data.len() > 0); } - delete_page_with_signature { + delete_page_with_signature_v2 { let provider_msa_id = 1u64; let delegator_msa_id = 2u64; let schema_id = constants::PAGINATED_SCHEMA; @@ -292,9 +380,8 @@ benchmarks! { PAGINATED_STORAGE_PREFIX, &key).unwrap().unwrap().get_hash(); - let payload = PaginatedDeleteSignaturePayload { + let payload = PaginatedDeleteSignaturePayloadV2 { target_hash: content_hash, - msa_id: delegator_msa_id, expiration, schema_id, page_id, diff --git a/pallets/stateful-storage/src/lib.rs b/pallets/stateful-storage/src/lib.rs index 4c3dd6a513..d94107e20d 100644 --- a/pallets/stateful-storage/src/lib.rs +++ b/pallets/stateful-storage/src/lib.rs @@ -257,7 +257,10 @@ pub mod pallet { /// * [`Event::ItemizedPageDeleted`] /// #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::apply_item_actions(Pallet::::sum_add_actions_bytes(actions)))] + #[pallet::weight( + T::WeightInfo::apply_item_actions_delete(actions.len() as u32) + .max(T::WeightInfo::apply_item_actions_add(Pallet::::sum_add_actions_bytes(actions))) + )] pub fn apply_item_actions( origin: OriginFor, #[pallet::compact] state_owner_msa_id: MessageSourceId, @@ -336,9 +339,10 @@ pub mod pallet { /// * [`Event::ItemizedPageDeleted`] /// #[pallet::call_index(3)] - #[pallet::weight(T::WeightInfo::apply_item_actions_with_signature( - Pallet::::sum_add_actions_bytes(&payload.actions) - ))] + #[pallet::weight( + T::WeightInfo::apply_item_actions_with_signature_v2_delete(payload.actions.len() as u32) + .max(T::WeightInfo::apply_item_actions_with_signature_v2_add(Pallet::::sum_add_actions_bytes(&payload.actions))) + )] #[allow(deprecated)] #[deprecated(note = "please use `apply_item_actions_with_signature_v2` instead")] pub fn apply_item_actions_with_signature( @@ -378,7 +382,7 @@ pub mod pallet { /// * [`Event::PaginatedPageUpdated`] /// #[pallet::call_index(4)] - #[pallet::weight(T::WeightInfo::upsert_page_with_signature(payload.payload.len() as u32))] + #[pallet::weight(T::WeightInfo::upsert_page_with_signature_v2(payload.payload.len() as u32))] #[allow(deprecated)] #[deprecated(note = "please use `upsert_page_with_signature_v2` instead")] pub fn upsert_page_with_signature( @@ -421,7 +425,7 @@ pub mod pallet { /// * [`Event::PaginatedPageDeleted`] /// #[pallet::call_index(5)] - #[pallet::weight(T::WeightInfo::delete_page_with_signature())] + #[pallet::weight(T::WeightInfo::delete_page_with_signature_v2())] #[allow(deprecated)] #[deprecated(note = "please use `delete_page_with_signature_v2` instead")] pub fn delete_page_with_signature( @@ -465,9 +469,10 @@ pub mod pallet { /// * [`Event::ItemizedPageDeleted`] /// #[pallet::call_index(6)] - #[pallet::weight(T::WeightInfo::apply_item_actions_with_signature( - Pallet::::sum_add_actions_bytes(&payload.actions) - ))] + #[pallet::weight( + T::WeightInfo::apply_item_actions_with_signature_v2_delete(payload.actions.len() as u32) + .max(T::WeightInfo::apply_item_actions_with_signature_v2_add(Pallet::::sum_add_actions_bytes(&payload.actions))) + )] pub fn apply_item_actions_with_signature_v2( origin: OriginFor, delegator_key: T::AccountId, @@ -506,7 +511,7 @@ pub mod pallet { /// * [`Event::PaginatedPageUpdated`] /// #[pallet::call_index(7)] - #[pallet::weight(T::WeightInfo::upsert_page_with_signature(payload.payload.len() as u32))] + #[pallet::weight(T::WeightInfo::upsert_page_with_signature_v2(payload.payload.len() as u32))] pub fn upsert_page_with_signature_v2( origin: OriginFor, delegator_key: T::AccountId, @@ -548,7 +553,7 @@ pub mod pallet { /// * [`Event::PaginatedPageDeleted`] /// #[pallet::call_index(8)] - #[pallet::weight(T::WeightInfo::delete_page_with_signature())] + #[pallet::weight(T::WeightInfo::delete_page_with_signature_v2())] pub fn delete_page_with_signature_v2( origin: OriginFor, delegator_key: T::AccountId, diff --git a/pallets/stateful-storage/src/weights.rs b/pallets/stateful-storage/src/weights.rs index 146c672dd5..cce563ef8e 100644 --- a/pallets/stateful-storage/src/weights.rs +++ b/pallets/stateful-storage/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_stateful_storage //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-10, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `benchmark-runner-44wtw-d4nrm`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! HOSTNAME: `benchmark-runner-44wtw-sz2gt`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("frequency-bench"), DB CACHE: 1024 // Executed Command: @@ -33,7 +33,7 @@ // --chain=frequency-bench // --heap-pages=4096 // --wasm-execution=compiled -// --additional-trie-layers=20 +// --additional-trie-layers=5 // --steps=20 // --repeat=10 // --output=./scripts/../pallets/stateful-storage/src/weights.rs @@ -49,12 +49,14 @@ use core::marker::PhantomData; /// Weight functions needed for pallet_stateful_storage. pub trait WeightInfo { - fn apply_item_actions(s: u32, ) -> Weight; + fn apply_item_actions_add(s: u32, ) -> Weight; + fn apply_item_actions_delete(n: u32, ) -> Weight; fn upsert_page(s: u32, ) -> Weight; fn delete_page() -> Weight; - fn apply_item_actions_with_signature(s: u32, ) -> Weight; - fn upsert_page_with_signature(s: u32, ) -> Weight; - fn delete_page_with_signature() -> Weight; + fn apply_item_actions_with_signature_v2_add(s: u32, ) -> Weight; + fn apply_item_actions_with_signature_v2_delete(n: u32, ) -> Weight; + fn upsert_page_with_signature_v2(s: u32, ) -> Weight; + fn delete_page_with_signature_v2() -> Weight; } /// Weights for pallet_stateful_storage using the Substrate node and recommended hardware. @@ -68,15 +70,33 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Msa::DelegatorAndProviderToDelegation` (`max_values`: None, `max_size`: Some(217), added: 2692, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0xbd1557c8db6bd8599a811a7175fbc2fc6400` (r:1 w:1) /// Proof: UNKNOWN KEY `0xbd1557c8db6bd8599a811a7175fbc2fc6400` (r:1 w:1) - /// The range of component `s` is `[1, 5121]`. - fn apply_item_actions(s: u32, ) -> Weight { + /// The range of component `s` is `[1024, 5120]`. + fn apply_item_actions_add(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `33346` - // Estimated: `45721` - // Minimum execution time: 97_596_000 picoseconds. - Weight::from_parts(95_196_204, 45721) - // Standard Error: 469 - .saturating_add(Weight::from_parts(7_119, 0).saturating_mul(s.into())) + // Measured: `392` + // Estimated: `5342` + // Minimum execution time: 29_588_000 picoseconds. + Weight::from_parts(30_954_038, 5342) + // Standard Error: 223 + .saturating_add(Weight::from_parts(669, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Schemas::SchemaInfos` (r:1 w:0) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Msa::PublicKeyToMsaId` (r:1 w:0) + /// Proof: `Msa::PublicKeyToMsaId` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Msa::DelegatorAndProviderToDelegation` (r:1 w:0) + /// Proof: `Msa::DelegatorAndProviderToDelegation` (`max_values`: None, `max_size`: Some(217), added: 2692, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xbd1557c8db6bd8599a811a7175fbc2fc6400` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xbd1557c8db6bd8599a811a7175fbc2fc6400` (r:1 w:1) + /// The range of component `n` is `[1, 5]`. + fn apply_item_actions_delete(_n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `10770` + // Estimated: `15720` + // Minimum execution time: 45_167_000 picoseconds. + Weight::from_parts(62_290_393, 15720) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -91,12 +111,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[1, 1024]`. fn upsert_page(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `392` - // Estimated: `12767` - // Minimum execution time: 29_612_000 picoseconds. - Weight::from_parts(31_961_486, 12767) - // Standard Error: 972 - .saturating_add(Weight::from_parts(596, 0).saturating_mul(s.into())) + // Measured: `1553` + // Estimated: `6503` + // Minimum execution time: 33_775_000 picoseconds. + Weight::from_parts(35_152_064, 6503) + // Standard Error: 166 + .saturating_add(Weight::from_parts(571, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -111,9 +131,9 @@ impl WeightInfo for SubstrateWeight { fn delete_page() -> Weight { // Proof Size summary in bytes: // Measured: `1551` - // Estimated: `13926` - // Minimum execution time: 32_983_000 picoseconds. - Weight::from_parts(33_821_000, 13926) + // Estimated: `6501` + // Minimum execution time: 32_232_000 picoseconds. + Weight::from_parts(34_067_000, 6501) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -123,15 +143,31 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0xbd1557c8db6bd8599a811a7175fbc2fc6400` (r:1 w:1) /// Proof: UNKNOWN KEY `0xbd1557c8db6bd8599a811a7175fbc2fc6400` (r:1 w:1) - /// The range of component `s` is `[1, 5121]`. - fn apply_item_actions_with_signature(s: u32, ) -> Weight { + /// The range of component `s` is `[1024, 5120]`. + fn apply_item_actions_with_signature_v2_add(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `399` + // Estimated: `5349` + // Minimum execution time: 89_671_000 picoseconds. + Weight::from_parts(82_650_215, 5349) + // Standard Error: 174 + .saturating_add(Weight::from_parts(6_392, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Msa::PublicKeyToMsaId` (r:1 w:0) + /// Proof: `Msa::PublicKeyToMsaId` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaInfos` (r:1 w:0) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xbd1557c8db6bd8599a811a7175fbc2fc6400` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xbd1557c8db6bd8599a811a7175fbc2fc6400` (r:1 w:1) + /// The range of component `n` is `[1, 5]`. + fn apply_item_actions_with_signature_v2_delete(_n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `33353` - // Estimated: `45728` - // Minimum execution time: 155_737_000 picoseconds. - Weight::from_parts(153_426_516, 45728) - // Standard Error: 639 - .saturating_add(Weight::from_parts(12_619, 0).saturating_mul(s.into())) + // Measured: `10777` + // Estimated: `15727` + // Minimum execution time: 99_857_000 picoseconds. + Weight::from_parts(104_741_500, 15727) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -142,14 +178,14 @@ impl WeightInfo for SubstrateWeight { /// Storage: UNKNOWN KEY `0x0763c98381dc89abe38627fe2f98cb7af1577fbf1d628fdddb4ebfc6e8d95fb1` (r:1 w:1) /// Proof: UNKNOWN KEY `0x0763c98381dc89abe38627fe2f98cb7af1577fbf1d628fdddb4ebfc6e8d95fb1` (r:1 w:1) /// The range of component `s` is `[1, 1024]`. - fn upsert_page_with_signature(s: u32, ) -> Weight { + fn upsert_page_with_signature_v2(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `325` - // Estimated: `12700` - // Minimum execution time: 83_880_000 picoseconds. - Weight::from_parts(85_475_027, 12700) - // Standard Error: 357 - .saturating_add(Weight::from_parts(5_950, 0).saturating_mul(s.into())) + // Measured: `1486` + // Estimated: `6436` + // Minimum execution time: 86_886_000 picoseconds. + Weight::from_parts(89_336_870, 6436) + // Standard Error: 474 + .saturating_add(Weight::from_parts(6_568, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -159,12 +195,12 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x0763c98381dc89abe38627fe2f98cb7af1577fbf1d628fdddb4ebfc6e8d95fb1` (r:1 w:1) /// Proof: UNKNOWN KEY `0x0763c98381dc89abe38627fe2f98cb7af1577fbf1d628fdddb4ebfc6e8d95fb1` (r:1 w:1) - fn delete_page_with_signature() -> Weight { + fn delete_page_with_signature_v2() -> Weight { // Proof Size summary in bytes: // Measured: `1484` - // Estimated: `13859` - // Minimum execution time: 87_068_000 picoseconds. - Weight::from_parts(87_902_000, 13859) + // Estimated: `6434` + // Minimum execution time: 86_921_000 picoseconds. + Weight::from_parts(88_759_000, 6434) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -180,15 +216,33 @@ impl WeightInfo for () { /// Proof: `Msa::DelegatorAndProviderToDelegation` (`max_values`: None, `max_size`: Some(217), added: 2692, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0xbd1557c8db6bd8599a811a7175fbc2fc6400` (r:1 w:1) /// Proof: UNKNOWN KEY `0xbd1557c8db6bd8599a811a7175fbc2fc6400` (r:1 w:1) - /// The range of component `s` is `[1, 5121]`. - fn apply_item_actions(s: u32, ) -> Weight { + /// The range of component `s` is `[1024, 5120]`. + fn apply_item_actions_add(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `33346` - // Estimated: `45721` - // Minimum execution time: 97_596_000 picoseconds. - Weight::from_parts(95_196_204, 45721) - // Standard Error: 469 - .saturating_add(Weight::from_parts(7_119, 0).saturating_mul(s.into())) + // Measured: `392` + // Estimated: `5342` + // Minimum execution time: 29_588_000 picoseconds. + Weight::from_parts(30_954_038, 5342) + // Standard Error: 223 + .saturating_add(Weight::from_parts(669, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Schemas::SchemaInfos` (r:1 w:0) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Msa::PublicKeyToMsaId` (r:1 w:0) + /// Proof: `Msa::PublicKeyToMsaId` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Msa::DelegatorAndProviderToDelegation` (r:1 w:0) + /// Proof: `Msa::DelegatorAndProviderToDelegation` (`max_values`: None, `max_size`: Some(217), added: 2692, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xbd1557c8db6bd8599a811a7175fbc2fc6400` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xbd1557c8db6bd8599a811a7175fbc2fc6400` (r:1 w:1) + /// The range of component `n` is `[1, 5]`. + fn apply_item_actions_delete(_n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `10770` + // Estimated: `15720` + // Minimum execution time: 45_167_000 picoseconds. + Weight::from_parts(62_290_393, 15720) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -203,12 +257,12 @@ impl WeightInfo for () { /// The range of component `s` is `[1, 1024]`. fn upsert_page(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `392` - // Estimated: `12767` - // Minimum execution time: 29_612_000 picoseconds. - Weight::from_parts(31_961_486, 12767) - // Standard Error: 972 - .saturating_add(Weight::from_parts(596, 0).saturating_mul(s.into())) + // Measured: `1553` + // Estimated: `6503` + // Minimum execution time: 33_775_000 picoseconds. + Weight::from_parts(35_152_064, 6503) + // Standard Error: 166 + .saturating_add(Weight::from_parts(571, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -223,9 +277,9 @@ impl WeightInfo for () { fn delete_page() -> Weight { // Proof Size summary in bytes: // Measured: `1551` - // Estimated: `13926` - // Minimum execution time: 32_983_000 picoseconds. - Weight::from_parts(33_821_000, 13926) + // Estimated: `6501` + // Minimum execution time: 32_232_000 picoseconds. + Weight::from_parts(34_067_000, 6501) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -235,15 +289,31 @@ impl WeightInfo for () { /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0xbd1557c8db6bd8599a811a7175fbc2fc6400` (r:1 w:1) /// Proof: UNKNOWN KEY `0xbd1557c8db6bd8599a811a7175fbc2fc6400` (r:1 w:1) - /// The range of component `s` is `[1, 5121]`. - fn apply_item_actions_with_signature(s: u32, ) -> Weight { + /// The range of component `s` is `[1024, 5120]`. + fn apply_item_actions_with_signature_v2_add(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `399` + // Estimated: `5349` + // Minimum execution time: 89_671_000 picoseconds. + Weight::from_parts(82_650_215, 5349) + // Standard Error: 174 + .saturating_add(Weight::from_parts(6_392, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Msa::PublicKeyToMsaId` (r:1 w:0) + /// Proof: `Msa::PublicKeyToMsaId` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaInfos` (r:1 w:0) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xbd1557c8db6bd8599a811a7175fbc2fc6400` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xbd1557c8db6bd8599a811a7175fbc2fc6400` (r:1 w:1) + /// The range of component `n` is `[1, 5]`. + fn apply_item_actions_with_signature_v2_delete(_n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `33353` - // Estimated: `45728` - // Minimum execution time: 155_737_000 picoseconds. - Weight::from_parts(153_426_516, 45728) - // Standard Error: 639 - .saturating_add(Weight::from_parts(12_619, 0).saturating_mul(s.into())) + // Measured: `10777` + // Estimated: `15727` + // Minimum execution time: 99_857_000 picoseconds. + Weight::from_parts(104_741_500, 15727) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -254,14 +324,14 @@ impl WeightInfo for () { /// Storage: UNKNOWN KEY `0x0763c98381dc89abe38627fe2f98cb7af1577fbf1d628fdddb4ebfc6e8d95fb1` (r:1 w:1) /// Proof: UNKNOWN KEY `0x0763c98381dc89abe38627fe2f98cb7af1577fbf1d628fdddb4ebfc6e8d95fb1` (r:1 w:1) /// The range of component `s` is `[1, 1024]`. - fn upsert_page_with_signature(s: u32, ) -> Weight { + fn upsert_page_with_signature_v2(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `325` - // Estimated: `12700` - // Minimum execution time: 83_880_000 picoseconds. - Weight::from_parts(85_475_027, 12700) - // Standard Error: 357 - .saturating_add(Weight::from_parts(5_950, 0).saturating_mul(s.into())) + // Measured: `1486` + // Estimated: `6436` + // Minimum execution time: 86_886_000 picoseconds. + Weight::from_parts(89_336_870, 6436) + // Standard Error: 474 + .saturating_add(Weight::from_parts(6_568, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -271,12 +341,12 @@ impl WeightInfo for () { /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: UNKNOWN KEY `0x0763c98381dc89abe38627fe2f98cb7af1577fbf1d628fdddb4ebfc6e8d95fb1` (r:1 w:1) /// Proof: UNKNOWN KEY `0x0763c98381dc89abe38627fe2f98cb7af1577fbf1d628fdddb4ebfc6e8d95fb1` (r:1 w:1) - fn delete_page_with_signature() -> Weight { + fn delete_page_with_signature_v2() -> Weight { // Proof Size summary in bytes: // Measured: `1484` - // Estimated: `13859` - // Minimum execution time: 87_068_000 picoseconds. - Weight::from_parts(87_902_000, 13859) + // Estimated: `6434` + // Minimum execution time: 86_921_000 picoseconds. + Weight::from_parts(88_759_000, 6434) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/pallets/time-release/src/weights.rs b/pallets/time-release/src/weights.rs index 24c5e004a2..908ea742ce 100644 --- a/pallets/time-release/src/weights.rs +++ b/pallets/time-release/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_time_release //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-30, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `benchmark-runner-44wtw-bw25f`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! HOSTNAME: `benchmark-runner-44wtw-sz2gt`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("frequency-bench"), DB CACHE: 1024 // Executed Command: @@ -33,7 +33,7 @@ // --chain=frequency-bench // --heap-pages=4096 // --wasm-execution=compiled -// --additional-trie-layers=20 +// --additional-trie-layers=5 // --steps=20 // --repeat=10 // --output=./scripts/../pallets/time-release/src/weights.rs @@ -70,9 +70,9 @@ impl WeightInfo for SubstrateWeight { fn transfer() -> Weight { // Proof Size summary in bytes: // Measured: `113` - // Estimated: `13824` - // Minimum execution time: 32_533_000 picoseconds. - Weight::from_parts(33_063_000, 13824) + // Estimated: `6399` + // Minimum execution time: 32_236_000 picoseconds. + Weight::from_parts(33_078_000, 6399) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -85,12 +85,14 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) /// The range of component `i` is `[1, 50]`. - fn claim(_i: u32, ) -> Weight { + fn claim(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `268` - // Estimated: `13824` - // Minimum execution time: 32_645_000 picoseconds. - Weight::from_parts(34_060_365, 13824) + // Estimated: `6399` + // Minimum execution time: 32_684_000 picoseconds. + Weight::from_parts(33_126_084, 6399) + // Standard Error: 30_315 + .saturating_add(Weight::from_parts(90_315, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -106,11 +108,11 @@ impl WeightInfo for SubstrateWeight { fn update_release_schedules(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `39` - // Estimated: `13674` - // Minimum execution time: 27_332_000 picoseconds. - Weight::from_parts(28_463_470, 13674) - // Standard Error: 2_264 - .saturating_add(Weight::from_parts(48_163, 0).saturating_mul(i.into())) + // Estimated: `6249` + // Minimum execution time: 27_164_000 picoseconds. + Weight::from_parts(28_903_937, 6249) + // Standard Error: 12_586 + .saturating_add(Weight::from_parts(26_982, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -131,9 +133,9 @@ impl WeightInfo for () { fn transfer() -> Weight { // Proof Size summary in bytes: // Measured: `113` - // Estimated: `13824` - // Minimum execution time: 32_533_000 picoseconds. - Weight::from_parts(33_063_000, 13824) + // Estimated: `6399` + // Minimum execution time: 32_236_000 picoseconds. + Weight::from_parts(33_078_000, 6399) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -146,12 +148,14 @@ impl WeightInfo for () { /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) /// The range of component `i` is `[1, 50]`. - fn claim(_i: u32, ) -> Weight { + fn claim(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `268` - // Estimated: `13824` - // Minimum execution time: 32_645_000 picoseconds. - Weight::from_parts(34_060_365, 13824) + // Estimated: `6399` + // Minimum execution time: 32_684_000 picoseconds. + Weight::from_parts(33_126_084, 6399) + // Standard Error: 30_315 + .saturating_add(Weight::from_parts(90_315, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -167,11 +171,11 @@ impl WeightInfo for () { fn update_release_schedules(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `39` - // Estimated: `13674` - // Minimum execution time: 27_332_000 picoseconds. - Weight::from_parts(28_463_470, 13674) - // Standard Error: 2_264 - .saturating_add(Weight::from_parts(48_163, 0).saturating_mul(i.into())) + // Estimated: `6249` + // Minimum execution time: 27_164_000 picoseconds. + Weight::from_parts(28_903_937, 6249) + // Standard Error: 12_586 + .saturating_add(Weight::from_parts(26_982, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } diff --git a/runtime/common/src/constants.rs b/runtime/common/src/constants.rs index a4439c1743..c039b939fb 100644 --- a/runtime/common/src/constants.rs +++ b/runtime/common/src/constants.rs @@ -306,12 +306,13 @@ parameter_types! { // --- Stateful Storage Pallet --- // Needs parameter_types! for the impls below parameter_types! { - /// The maximum size of a page (in bytes) for an Itemized storage model (64KB) - pub const MaxItemizedPageSizeBytes: u32 = 64 * 1024; - /// The maximum size of a page (in bytes) for a Paginated storage model (1KB) - pub const MaxPaginatedPageSizeBytes: u32 = 1 * 1024; /// The maximum size of a single item in an itemized storage model (in bytes) pub const MaxItemizedBlobSizeBytes: u32 = 1024; + /// The maximum size of a page (in bytes) for an Itemized storage model ~ (10KiB) + /// extra 2 bytes is for ItemHeader which enables us to simulate max PoV in benchmarks + pub const MaxItemizedPageSizeBytes: u32 = 10 * (1024 + 2); + /// The maximum size of a page (in bytes) for a Paginated storage model (1KiB) + pub const MaxPaginatedPageSizeBytes: u32 = 1 * 1024; } /// The maximum number of pages in a Paginated storage model pub type MaxPaginatedPageId = ConstU16<16>; diff --git a/runtime/common/src/weights/block_weights.rs b/runtime/common/src/weights/block_weights.rs index 421618ae05..4d921caac1 100644 --- a/runtime/common/src/weights/block_weights.rs +++ b/runtime/common/src/weights/block_weights.rs @@ -16,8 +16,8 @@ // limitations under the License. //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-30 (Y/M/D) -//! HOSTNAME: `benchmark-runner-44wtw-5mvh7`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! DATE: 2023-11-14 (Y/M/D) +//! HOSTNAME: `benchmark-runner-44wtw-sz2gt`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! //! SHORT-NAME: `block`, LONG-NAME: `BlockExecution`, RUNTIME: `Frequency Development (No Relay)` //! WARMUPS: `10`, REPEAT: `100` @@ -43,17 +43,17 @@ parameter_types! { /// Calculated by multiplying the *Average* with `1.0` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 375_451, 412_723 - /// Average: 385_457 - /// Median: 382_225 - /// Std-Dev: 8091.57 + /// Min, Max: 372_624, 417_634 + /// Average: 381_582 + /// Median: 377_949 + /// Std-Dev: 9353.25 /// /// Percentiles nanoseconds: - /// 99th: 408_813 - /// 95th: 401_245 - /// 75th: 388_447 + /// 99th: 411_640 + /// 95th: 404_686 + /// 75th: 382_129 pub const BlockExecutionWeight: Weight = - Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(385_457), 0); + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(381_582), 0); } #[cfg(test)] diff --git a/runtime/common/src/weights/extrinsic_weights.rs b/runtime/common/src/weights/extrinsic_weights.rs index caa8392903..f9f5e4274d 100644 --- a/runtime/common/src/weights/extrinsic_weights.rs +++ b/runtime/common/src/weights/extrinsic_weights.rs @@ -16,8 +16,8 @@ // limitations under the License. //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-30 (Y/M/D) -//! HOSTNAME: `benchmark-runner-44wtw-5mvh7`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! DATE: 2023-11-14 (Y/M/D) +//! HOSTNAME: `benchmark-runner-44wtw-sz2gt`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! //! SHORT-NAME: `extrinsic`, LONG-NAME: `ExtrinsicBase`, RUNTIME: `Frequency Development (No Relay)` //! WARMUPS: `10`, REPEAT: `100` @@ -43,17 +43,17 @@ parameter_types! { /// Calculated by multiplying the *Average* with `1.0` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 100_113, 110_777 - /// Average: 101_295 - /// Median: 101_040 - /// Std-Dev: 1335.33 + /// Min, Max: 99_285, 113_950 + /// Average: 100_397 + /// Median: 99_930 + /// Std-Dev: 2074.96 /// /// Percentiles nanoseconds: - /// 99th: 106_051 - /// 95th: 102_810 - /// 75th: 101_357 + /// 99th: 112_460 + /// 95th: 104_093 + /// 75th: 100_221 pub const ExtrinsicBaseWeight: Weight = - Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(101_295), 0); + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(100_397), 0); } #[cfg(test)] diff --git a/runtime/common/src/weights/pallet_balances.rs b/runtime/common/src/weights/pallet_balances.rs index 6141f9323a..4000a26c80 100644 --- a/runtime/common/src/weights/pallet_balances.rs +++ b/runtime/common/src/weights/pallet_balances.rs @@ -1,9 +1,9 @@ //! Autogenerated weights for pallet_balances //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `benchmark-runner-44wtw-4nbbq`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! HOSTNAME: `benchmark-runner-44wtw-sz2gt`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("frequency-bench"), DB CACHE: 1024 // Executed Command: @@ -16,7 +16,7 @@ // --chain=frequency-bench // --heap-pages=4096 // --wasm-execution=compiled -// --additional-trie-layers=20 +// --additional-trie-layers=5 // --steps=50 // --repeat=20 // --output=./scripts/../runtime/common/src/weights @@ -38,9 +38,9 @@ impl pallet_balances::WeightInfo for SubstrateWeight fn transfer_allow_death() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `12503` - // Minimum execution time: 54_053_000 picoseconds. - Weight::from_parts(55_238_000, 12503) + // Estimated: `5078` + // Minimum execution time: 53_239_000 picoseconds. + Weight::from_parts(54_516_000, 5078) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -49,9 +49,9 @@ impl pallet_balances::WeightInfo for SubstrateWeight fn transfer_keep_alive() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `12503` - // Minimum execution time: 40_987_000 picoseconds. - Weight::from_parts(42_180_000, 12503) + // Estimated: `5078` + // Minimum execution time: 40_630_000 picoseconds. + Weight::from_parts(41_261_000, 5078) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -60,9 +60,9 @@ impl pallet_balances::WeightInfo for SubstrateWeight fn force_set_balance_creating() -> Weight { // Proof Size summary in bytes: // Measured: `207` - // Estimated: `12503` - // Minimum execution time: 14_866_000 picoseconds. - Weight::from_parts(15_328_000, 12503) + // Estimated: `5078` + // Minimum execution time: 14_679_000 picoseconds. + Weight::from_parts(15_055_000, 5078) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -71,9 +71,9 @@ impl pallet_balances::WeightInfo for SubstrateWeight fn force_set_balance_killing() -> Weight { // Proof Size summary in bytes: // Measured: `207` - // Estimated: `12503` - // Minimum execution time: 21_650_000 picoseconds. - Weight::from_parts(22_472_000, 12503) + // Estimated: `5078` + // Minimum execution time: 21_957_000 picoseconds. + Weight::from_parts(22_664_000, 5078) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -82,9 +82,9 @@ impl pallet_balances::WeightInfo for SubstrateWeight fn force_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `103` - // Estimated: `15106` - // Minimum execution time: 55_888_000 picoseconds. - Weight::from_parts(56_759_000, 15106) + // Estimated: `7681` + // Minimum execution time: 55_803_000 picoseconds. + Weight::from_parts(56_838_000, 7681) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -93,9 +93,9 @@ impl pallet_balances::WeightInfo for SubstrateWeight fn transfer_all() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `12503` - // Minimum execution time: 51_363_000 picoseconds. - Weight::from_parts(53_377_000, 12503) + // Estimated: `5078` + // Minimum execution time: 50_414_000 picoseconds. + Weight::from_parts(51_285_000, 5078) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -104,9 +104,9 @@ impl pallet_balances::WeightInfo for SubstrateWeight fn force_unreserve() -> Weight { // Proof Size summary in bytes: // Measured: `207` - // Estimated: `12503` - // Minimum execution time: 17_681_000 picoseconds. - Weight::from_parts(18_345_000, 12503) + // Estimated: `5078` + // Minimum execution time: 17_592_000 picoseconds. + Weight::from_parts(18_094_000, 5078) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -116,11 +116,11 @@ impl pallet_balances::WeightInfo for SubstrateWeight fn upgrade_accounts(u: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + u * (136 ±0)` - // Estimated: `9900 + u * (2603 ±0)` - // Minimum execution time: 17_442_000 picoseconds. - Weight::from_parts(17_809_000, 9900) - // Standard Error: 15_946 - .saturating_add(Weight::from_parts(15_265_828, 0).saturating_mul(u.into())) + // Estimated: `2475 + u * (2603 ±0)` + // Minimum execution time: 17_106_000 picoseconds. + Weight::from_parts(17_400_000, 2475) + // Standard Error: 9_825 + .saturating_add(Weight::from_parts(14_758_712, 0).saturating_mul(u.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) diff --git a/runtime/common/src/weights/pallet_collator_selection.rs b/runtime/common/src/weights/pallet_collator_selection.rs index d25f683387..3f401bfc4b 100644 --- a/runtime/common/src/weights/pallet_collator_selection.rs +++ b/runtime/common/src/weights/pallet_collator_selection.rs @@ -1,9 +1,9 @@ //! Autogenerated weights for pallet_collator_selection //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `benchmark-runner-44wtw-4nbbq`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! HOSTNAME: `benchmark-runner-44wtw-sz2gt`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("frequency-bench"), DB CACHE: 1024 // Executed Command: @@ -16,7 +16,7 @@ // --chain=frequency-bench // --heap-pages=4096 // --wasm-execution=compiled -// --additional-trie-layers=20 +// --additional-trie-layers=5 // --steps=50 // --repeat=20 // --output=./scripts/../runtime/common/src/weights @@ -41,11 +41,11 @@ impl pallet_collator_selection::WeightInfo for Substrat fn set_invulnerables(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `334 + b * (79 ±0)` - // Estimated: `10234 + b * (2554 ±0)` - // Minimum execution time: 12_484_000 picoseconds. - Weight::from_parts(11_289_231, 10234) - // Standard Error: 9_450 - .saturating_add(Weight::from_parts(3_162_983, 0).saturating_mul(b.into())) + // Estimated: `2809 + b * (2554 ±0)` + // Minimum execution time: 13_060_000 picoseconds. + Weight::from_parts(11_639_631, 2809) + // Standard Error: 9_031 + .saturating_add(Weight::from_parts(3_192_959, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 2554).saturating_mul(b.into())) @@ -63,13 +63,13 @@ impl pallet_collator_selection::WeightInfo for Substrat fn add_invulnerable(b: u32, c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `1049 + b * (32 ±0) + c * (51 ±0)` - // Estimated: `13369 + b * (29 ±0) + c * (53 ±0)` - // Minimum execution time: 47_435_000 picoseconds. - Weight::from_parts(48_062_266, 13369) - // Standard Error: 10_786 - .saturating_add(Weight::from_parts(89_583, 0).saturating_mul(b.into())) - // Standard Error: 3_198 - .saturating_add(Weight::from_parts(220_467, 0).saturating_mul(c.into())) + // Estimated: `5944 + b * (29 ±0) + c * (53 ±0)` + // Minimum execution time: 48_721_000 picoseconds. + Weight::from_parts(48_781_078, 5944) + // Standard Error: 5_733 + .saturating_add(Weight::from_parts(59_914, 0).saturating_mul(b.into())) + // Standard Error: 1_700 + .saturating_add(Weight::from_parts(203_482, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 29).saturating_mul(b.into())) @@ -83,11 +83,11 @@ impl pallet_collator_selection::WeightInfo for Substrat fn remove_invulnerable(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `82 + b * (32 ±0)` - // Estimated: `12796` - // Minimum execution time: 13_013_000 picoseconds. - Weight::from_parts(13_390_102, 12796) - // Standard Error: 2_907 - .saturating_add(Weight::from_parts(173_711, 0).saturating_mul(b.into())) + // Estimated: `5371` + // Minimum execution time: 13_299_000 picoseconds. + Weight::from_parts(13_607_733, 5371) + // Standard Error: 2_892 + .saturating_add(Weight::from_parts(213_138, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -97,8 +97,8 @@ impl pallet_collator_selection::WeightInfo for Substrat // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_877_000 picoseconds. - Weight::from_parts(6_204_000, 0) + // Minimum execution time: 6_104_000 picoseconds. + Weight::from_parts(6_343_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) @@ -107,8 +107,8 @@ impl pallet_collator_selection::WeightInfo for Substrat // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_045_000 picoseconds. - Weight::from_parts(6_354_000, 0) + // Minimum execution time: 6_212_000 picoseconds. + Weight::from_parts(6_484_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `CollatorSelection::Candidates` (r:1 w:1) @@ -127,11 +127,11 @@ impl pallet_collator_selection::WeightInfo for Substrat fn register_as_candidate(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `752 + c * (57 ±0)` - // Estimated: `13131 + c * (57 ±0)` - // Minimum execution time: 42_390_000 picoseconds. - Weight::from_parts(44_404_164, 13131) - // Standard Error: 3_330 - .saturating_add(Weight::from_parts(319_731, 0).saturating_mul(c.into())) + // Estimated: `5706 + c * (57 ±0)` + // Minimum execution time: 42_281_000 picoseconds. + Weight::from_parts(45_186_285, 5706) + // Standard Error: 3_760 + .saturating_add(Weight::from_parts(268_788, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 57).saturating_mul(c.into())) @@ -146,11 +146,11 @@ impl pallet_collator_selection::WeightInfo for Substrat fn leave_intent(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `481 + c * (49 ±0)` - // Estimated: `12796` - // Minimum execution time: 30_531_000 picoseconds. - Weight::from_parts(34_110_058, 12796) - // Standard Error: 3_493 - .saturating_add(Weight::from_parts(276_667, 0).saturating_mul(c.into())) + // Estimated: `5371` + // Minimum execution time: 30_456_000 picoseconds. + Weight::from_parts(33_850_912, 5371) + // Standard Error: 2_883 + .saturating_add(Weight::from_parts(219_353, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -161,9 +161,9 @@ impl pallet_collator_selection::WeightInfo for Substrat fn note_author() -> Weight { // Proof Size summary in bytes: // Measured: `212` - // Estimated: `15106` - // Minimum execution time: 46_587_000 picoseconds. - Weight::from_parts(48_104_000, 15106) + // Estimated: `7681` + // Minimum execution time: 46_047_000 picoseconds. + Weight::from_parts(46_687_000, 7681) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -180,11 +180,11 @@ impl pallet_collator_selection::WeightInfo for Substrat fn new_session(r: u32, c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `1213 + c * (98 ±0) + r * (126 ±0)` - // Estimated: `12796 + c * (2519 ±0) + r * (2603 ±0)` - // Minimum execution time: 18_295_000 picoseconds. - Weight::from_parts(18_588_000, 12796) - // Standard Error: 339_226 - .saturating_add(Weight::from_parts(15_418_772, 0).saturating_mul(c.into())) + // Estimated: `5371 + c * (2519 ±0) + r * (2603 ±0)` + // Minimum execution time: 17_992_000 picoseconds. + Weight::from_parts(18_272_000, 5371) + // Standard Error: 332_210 + .saturating_add(Weight::from_parts(14_823_832, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) diff --git a/runtime/common/src/weights/pallet_collective_council.rs b/runtime/common/src/weights/pallet_collective_council.rs index 30d76d5ba2..57c71486e5 100644 --- a/runtime/common/src/weights/pallet_collective_council.rs +++ b/runtime/common/src/weights/pallet_collective_council.rs @@ -1,9 +1,9 @@ //! Autogenerated weights for pallet_collective //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `benchmark-runner-44wtw-4nbbq`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! HOSTNAME: `benchmark-runner-44wtw-sz2gt`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("frequency-bench"), DB CACHE: 1024 // Executed Command: @@ -16,7 +16,7 @@ // --chain=frequency-bench // --heap-pages=4096 // --wasm-execution=compiled -// --additional-trie-layers=20 +// --additional-trie-layers=5 // --steps=50 // --repeat=20 // --output=./scripts/../runtime/common/src/weights @@ -50,19 +50,19 @@ impl pallet_collective::WeightInfo for SubstrateWeight< fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + m * (832 ±0) + p * (310 ±0)` - // Estimated: `3379 + m * (769 ±3) + p * (2777 ±1)` - // Minimum execution time: 9_077_000 picoseconds. - Weight::from_parts(9_493_000, 3379) - // Standard Error: 63_289 - .saturating_add(Weight::from_parts(2_988_044, 0).saturating_mul(m.into())) - // Standard Error: 25_718 - .saturating_add(Weight::from_parts(3_413_593, 0).saturating_mul(p.into())) + // Estimated: `5779 + m * (489 ±3) + p * (2615 ±1)` + // Minimum execution time: 9_316_000 picoseconds. + Weight::from_parts(9_646_000, 5779) + // Standard Error: 63_335 + .saturating_add(Weight::from_parts(3_021_599, 0).saturating_mul(m.into())) + // Standard Error: 25_737 + .saturating_add(Weight::from_parts(3_417_842, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 769).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 2777).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 489).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 2615).saturating_mul(p.into())) } /// Storage: `Council::Members` (r:1 w:0) /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -73,13 +73,11 @@ impl pallet_collective::WeightInfo for SubstrateWeight< fn execute(b: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `70 + m * (32 ±0)` - // Estimated: `10464 + m * (32 ±0)` - // Minimum execution time: 13_322_000 picoseconds. - Weight::from_parts(13_703_697, 10464) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_457, 0).saturating_mul(b.into())) - // Standard Error: 1_967 - .saturating_add(Weight::from_parts(29_605, 0).saturating_mul(m.into())) + // Estimated: `3039 + m * (32 ±0)` + // Minimum execution time: 13_096_000 picoseconds. + Weight::from_parts(14_363_300, 3039) + // Standard Error: 77 + .saturating_add(Weight::from_parts(1_529, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } @@ -94,13 +92,13 @@ impl pallet_collective::WeightInfo for SubstrateWeight< fn propose_execute(b: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `70 + m * (32 ±0)` - // Estimated: `12444 + m * (32 ±0)` - // Minimum execution time: 16_159_000 picoseconds. - Weight::from_parts(16_446_780, 12444) - // Standard Error: 18 - .saturating_add(Weight::from_parts(1_570, 0).saturating_mul(b.into())) - // Standard Error: 1_977 - .saturating_add(Weight::from_parts(36_868, 0).saturating_mul(m.into())) + // Estimated: `5019 + m * (32 ±0)` + // Minimum execution time: 16_176_000 picoseconds. + Weight::from_parts(16_434_151, 5019) + // Standard Error: 22 + .saturating_add(Weight::from_parts(1_579, 0).saturating_mul(b.into())) + // Standard Error: 2_353 + .saturating_add(Weight::from_parts(38_186, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } @@ -123,15 +121,15 @@ impl pallet_collective::WeightInfo for SubstrateWeight< fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `113 + m * (32 ±0) + p * (51 ±0)` - // Estimated: `12440 + m * (42 ±0) + p * (49 ±0)` - // Minimum execution time: 23_109_000 picoseconds. - Weight::from_parts(21_675_305, 12440) - // Standard Error: 49 - .saturating_add(Weight::from_parts(2_440, 0).saturating_mul(b.into())) - // Standard Error: 5_902 - .saturating_add(Weight::from_parts(167_346, 0).saturating_mul(m.into())) - // Standard Error: 2_059 - .saturating_add(Weight::from_parts(288_842, 0).saturating_mul(p.into())) + // Estimated: `5015 + m * (42 ±0) + p * (49 ±0)` + // Minimum execution time: 22_825_000 picoseconds. + Weight::from_parts(21_731_804, 5015) + // Standard Error: 52 + .saturating_add(Weight::from_parts(2_435, 0).saturating_mul(b.into())) + // Standard Error: 6_224 + .saturating_add(Weight::from_parts(163_434, 0).saturating_mul(m.into())) + // Standard Error: 2_172 + .saturating_add(Weight::from_parts(270_426, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 42).saturating_mul(m.into())) @@ -146,11 +144,11 @@ impl pallet_collective::WeightInfo for SubstrateWeight< fn vote(m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `571 + m * (64 ±0)` - // Estimated: `12946 + m * (64 ±0)` - // Minimum execution time: 18_909_000 picoseconds. - Weight::from_parts(20_031_969, 12946) - // Standard Error: 6_118 - .saturating_add(Weight::from_parts(43_154, 0).saturating_mul(m.into())) + // Estimated: `5521 + m * (64 ±0)` + // Minimum execution time: 18_960_000 picoseconds. + Weight::from_parts(19_951_079, 5521) + // Standard Error: 4_868 + .saturating_add(Weight::from_parts(41_781, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -170,13 +168,13 @@ impl pallet_collective::WeightInfo for SubstrateWeight< fn close_early_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `41 + m * (64 ±0) + p * (54 ±0)` - // Estimated: `12434 + m * (80 ±0) + p * (50 ±0)` - // Minimum execution time: 23_576_000 picoseconds. - Weight::from_parts(23_544_646, 12434) - // Standard Error: 6_772 - .saturating_add(Weight::from_parts(166_905, 0).saturating_mul(m.into())) - // Standard Error: 1_806 - .saturating_add(Weight::from_parts(254_157, 0).saturating_mul(p.into())) + // Estimated: `5009 + m * (80 ±0) + p * (50 ±0)` + // Minimum execution time: 24_036_000 picoseconds. + Weight::from_parts(23_682_449, 5009) + // Standard Error: 13_553 + .saturating_add(Weight::from_parts(199_916, 0).saturating_mul(m.into())) + // Standard Error: 3_615 + .saturating_add(Weight::from_parts(249_411, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(m.into())) @@ -199,15 +197,15 @@ impl pallet_collective::WeightInfo for SubstrateWeight< fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `72 + b * (1 ±0) + m * (64 ±0) + p * (72 ±0)` - // Estimated: `12440 + b * (1 ±0) + m * (85 ±0) + p * (65 ±0)` - // Minimum execution time: 33_773_000 picoseconds. - Weight::from_parts(30_973_138, 12440) - // Standard Error: 112 - .saturating_add(Weight::from_parts(2_961, 0).saturating_mul(b.into())) - // Standard Error: 17_426 - .saturating_add(Weight::from_parts(157_906, 0).saturating_mul(m.into())) - // Standard Error: 4_662 - .saturating_add(Weight::from_parts(508_483, 0).saturating_mul(p.into())) + // Estimated: `5015 + b * (1 ±0) + m * (85 ±0) + p * (65 ±0)` + // Minimum execution time: 34_767_000 picoseconds. + Weight::from_parts(32_201_177, 5015) + // Standard Error: 104 + .saturating_add(Weight::from_parts(2_977, 0).saturating_mul(b.into())) + // Standard Error: 16_062 + .saturating_add(Weight::from_parts(121_281, 0).saturating_mul(m.into())) + // Standard Error: 4_297 + .saturating_add(Weight::from_parts(474_680, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) @@ -231,13 +229,13 @@ impl pallet_collective::WeightInfo for SubstrateWeight< fn close_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `61 + m * (64 ±0) + p * (54 ±0)` - // Estimated: `12454 + m * (80 ±0) + p * (50 ±0)` - // Minimum execution time: 26_250_000 picoseconds. - Weight::from_parts(25_450_622, 12454) - // Standard Error: 9_050 - .saturating_add(Weight::from_parts(241_874, 0).saturating_mul(m.into())) - // Standard Error: 2_414 - .saturating_add(Weight::from_parts(260_862, 0).saturating_mul(p.into())) + // Estimated: `5029 + m * (80 ±0) + p * (50 ±0)` + // Minimum execution time: 26_465_000 picoseconds. + Weight::from_parts(26_355_757, 5029) + // Standard Error: 6_109 + .saturating_add(Weight::from_parts(197_118, 0).saturating_mul(m.into())) + // Standard Error: 1_629 + .saturating_add(Weight::from_parts(248_457, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(m.into())) @@ -262,15 +260,15 @@ impl pallet_collective::WeightInfo for SubstrateWeight< fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `92 + b * (1 ±0) + m * (64 ±0) + p * (72 ±0)` - // Estimated: `12460 + b * (1 ±0) + m * (85 ±0) + p * (65 ±0)` - // Minimum execution time: 36_783_000 picoseconds. - Weight::from_parts(39_249_149, 12460) - // Standard Error: 130 - .saturating_add(Weight::from_parts(1_984, 0).saturating_mul(b.into())) - // Standard Error: 20_187 - .saturating_add(Weight::from_parts(55_559, 0).saturating_mul(m.into())) - // Standard Error: 5_400 - .saturating_add(Weight::from_parts(380_808, 0).saturating_mul(p.into())) + // Estimated: `5035 + b * (1 ±0) + m * (85 ±0) + p * (65 ±0)` + // Minimum execution time: 37_284_000 picoseconds. + Weight::from_parts(38_468_550, 5035) + // Standard Error: 151 + .saturating_add(Weight::from_parts(1_751, 0).saturating_mul(b.into())) + // Standard Error: 23_369 + .saturating_add(Weight::from_parts(74_279, 0).saturating_mul(m.into())) + // Standard Error: 6_252 + .saturating_add(Weight::from_parts(422_716, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) @@ -288,11 +286,11 @@ impl pallet_collective::WeightInfo for SubstrateWeight< fn disapprove_proposal(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `226 + p * (32 ±0)` - // Estimated: `10621 + p * (32 ±0)` - // Minimum execution time: 14_244_000 picoseconds. - Weight::from_parts(15_010_215, 10621) - // Standard Error: 1_227 - .saturating_add(Weight::from_parts(237_257, 0).saturating_mul(p.into())) + // Estimated: `3196 + p * (32 ±0)` + // Minimum execution time: 14_259_000 picoseconds. + Weight::from_parts(14_977_733, 3196) + // Standard Error: 1_364 + .saturating_add(Weight::from_parts(208_700, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(p.into())) diff --git a/runtime/common/src/weights/pallet_collective_technical_committee.rs b/runtime/common/src/weights/pallet_collective_technical_committee.rs index ae0388dd70..afae41e9e6 100644 --- a/runtime/common/src/weights/pallet_collective_technical_committee.rs +++ b/runtime/common/src/weights/pallet_collective_technical_committee.rs @@ -1,9 +1,9 @@ //! Autogenerated weights for pallet_collective //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `benchmark-runner-44wtw-4nbbq`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! HOSTNAME: `benchmark-runner-44wtw-sz2gt`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("frequency-bench"), DB CACHE: 1024 // Executed Command: @@ -16,7 +16,7 @@ // --chain=frequency-bench // --heap-pages=4096 // --wasm-execution=compiled -// --additional-trie-layers=20 +// --additional-trie-layers=5 // --steps=50 // --repeat=20 // --output=./scripts/../runtime/common/src/weights @@ -50,19 +50,19 @@ impl pallet_collective::WeightInfo for SubstrateWeight< fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + m * (832 ±0) + p * (310 ±0)` - // Estimated: `3446 + m * (769 ±1) + p * (2777 ±0)` - // Minimum execution time: 9_186_000 picoseconds. - Weight::from_parts(9_521_000, 3446) - // Standard Error: 63_741 - .saturating_add(Weight::from_parts(3_085_515, 0).saturating_mul(m.into())) - // Standard Error: 25_902 - .saturating_add(Weight::from_parts(3_429_012, 0).saturating_mul(p.into())) + // Estimated: `5846 + m * (489 ±3) + p * (2615 ±1)` + // Minimum execution time: 9_275_000 picoseconds. + Weight::from_parts(9_597_000, 5846) + // Standard Error: 63_837 + .saturating_add(Weight::from_parts(3_069_414, 0).saturating_mul(m.into())) + // Standard Error: 25_941 + .saturating_add(Weight::from_parts(3_459_751, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 769).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 2777).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(0, 489).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 2615).saturating_mul(p.into())) } /// Storage: `TechnicalCommittee::Members` (r:1 w:0) /// Proof: `TechnicalCommittee::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -73,13 +73,13 @@ impl pallet_collective::WeightInfo for SubstrateWeight< fn execute(b: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `137 + m * (32 ±0)` - // Estimated: `10531 + m * (32 ±0)` - // Minimum execution time: 13_269_000 picoseconds. - Weight::from_parts(13_556_265, 10531) - // Standard Error: 14 - .saturating_add(Weight::from_parts(1_467, 0).saturating_mul(b.into())) - // Standard Error: 1_540 - .saturating_add(Weight::from_parts(34_869, 0).saturating_mul(m.into())) + // Estimated: `3106 + m * (32 ±0)` + // Minimum execution time: 13_445_000 picoseconds. + Weight::from_parts(13_919_154, 3106) + // Standard Error: 33 + .saturating_add(Weight::from_parts(1_539, 0).saturating_mul(b.into())) + // Standard Error: 3_631 + .saturating_add(Weight::from_parts(735, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } @@ -94,13 +94,13 @@ impl pallet_collective::WeightInfo for SubstrateWeight< fn propose_execute(b: u32, m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `137 + m * (32 ±0)` - // Estimated: `12511 + m * (32 ±0)` - // Minimum execution time: 16_087_000 picoseconds. - Weight::from_parts(16_123_796, 12511) - // Standard Error: 13 - .saturating_add(Weight::from_parts(1_643, 0).saturating_mul(b.into())) - // Standard Error: 1_403 - .saturating_add(Weight::from_parts(46_341, 0).saturating_mul(m.into())) + // Estimated: `5086 + m * (32 ±0)` + // Minimum execution time: 16_250_000 picoseconds. + Weight::from_parts(16_477_498, 5086) + // Standard Error: 18 + .saturating_add(Weight::from_parts(1_510, 0).saturating_mul(b.into())) + // Standard Error: 1_969 + .saturating_add(Weight::from_parts(34_789, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) } @@ -123,15 +123,15 @@ impl pallet_collective::WeightInfo for SubstrateWeight< fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `180 + m * (32 ±0) + p * (51 ±0)` - // Estimated: `12507 + m * (42 ±0) + p * (49 ±0)` - // Minimum execution time: 22_699_000 picoseconds. - Weight::from_parts(21_638_042, 12507) + // Estimated: `5082 + m * (42 ±0) + p * (49 ±0)` + // Minimum execution time: 22_760_000 picoseconds. + Weight::from_parts(21_543_385, 5082) // Standard Error: 51 - .saturating_add(Weight::from_parts(2_472, 0).saturating_mul(b.into())) - // Standard Error: 6_144 - .saturating_add(Weight::from_parts(157_053, 0).saturating_mul(m.into())) - // Standard Error: 2_144 - .saturating_add(Weight::from_parts(288_931, 0).saturating_mul(p.into())) + .saturating_add(Weight::from_parts(2_458, 0).saturating_mul(b.into())) + // Standard Error: 6_060 + .saturating_add(Weight::from_parts(163_683, 0).saturating_mul(m.into())) + // Standard Error: 2_115 + .saturating_add(Weight::from_parts(272_134, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(Weight::from_parts(0, 42).saturating_mul(m.into())) @@ -146,11 +146,11 @@ impl pallet_collective::WeightInfo for SubstrateWeight< fn vote(m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `638 + m * (64 ±0)` - // Estimated: `13013 + m * (64 ±0)` - // Minimum execution time: 19_028_000 picoseconds. - Weight::from_parts(20_042_587, 13013) - // Standard Error: 5_924 - .saturating_add(Weight::from_parts(55_371, 0).saturating_mul(m.into())) + // Estimated: `5588 + m * (64 ±0)` + // Minimum execution time: 19_071_000 picoseconds. + Weight::from_parts(19_920_144, 5588) + // Standard Error: 4_539 + .saturating_add(Weight::from_parts(45_762, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) @@ -170,13 +170,13 @@ impl pallet_collective::WeightInfo for SubstrateWeight< fn close_early_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `108 + m * (64 ±0) + p * (54 ±0)` - // Estimated: `12501 + m * (80 ±0) + p * (50 ±0)` - // Minimum execution time: 23_684_000 picoseconds. - Weight::from_parts(23_574_915, 12501) - // Standard Error: 6_477 - .saturating_add(Weight::from_parts(176_313, 0).saturating_mul(m.into())) - // Standard Error: 1_727 - .saturating_add(Weight::from_parts(242_037, 0).saturating_mul(p.into())) + // Estimated: `5076 + m * (80 ±0) + p * (50 ±0)` + // Minimum execution time: 24_311_000 picoseconds. + Weight::from_parts(23_976_563, 5076) + // Standard Error: 5_765 + .saturating_add(Weight::from_parts(182_588, 0).saturating_mul(m.into())) + // Standard Error: 1_537 + .saturating_add(Weight::from_parts(233_626, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(m.into())) @@ -199,15 +199,15 @@ impl pallet_collective::WeightInfo for SubstrateWeight< fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `139 + b * (1 ±0) + m * (64 ±0) + p * (72 ±0)` - // Estimated: `12507 + b * (1 ±0) + m * (85 ±0) + p * (65 ±0)` - // Minimum execution time: 33_571_000 picoseconds. - Weight::from_parts(30_954_291, 12507) - // Standard Error: 111 - .saturating_add(Weight::from_parts(3_041, 0).saturating_mul(b.into())) - // Standard Error: 17_160 - .saturating_add(Weight::from_parts(154_944, 0).saturating_mul(m.into())) - // Standard Error: 4_591 - .saturating_add(Weight::from_parts(495_752, 0).saturating_mul(p.into())) + // Estimated: `5082 + b * (1 ±0) + m * (85 ±0) + p * (65 ±0)` + // Minimum execution time: 34_612_000 picoseconds. + Weight::from_parts(32_490_124, 5082) + // Standard Error: 104 + .saturating_add(Weight::from_parts(2_742, 0).saturating_mul(b.into())) + // Standard Error: 16_059 + .saturating_add(Weight::from_parts(111_149, 0).saturating_mul(m.into())) + // Standard Error: 4_296 + .saturating_add(Weight::from_parts(467_177, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) @@ -231,13 +231,13 @@ impl pallet_collective::WeightInfo for SubstrateWeight< fn close_disapproved(m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `128 + m * (64 ±0) + p * (54 ±0)` - // Estimated: `12521 + m * (80 ±0) + p * (50 ±0)` - // Minimum execution time: 26_336_000 picoseconds. - Weight::from_parts(25_480_461, 12521) - // Standard Error: 7_880 - .saturating_add(Weight::from_parts(258_844, 0).saturating_mul(m.into())) - // Standard Error: 2_102 - .saturating_add(Weight::from_parts(254_430, 0).saturating_mul(p.into())) + // Estimated: `5096 + m * (80 ±0) + p * (50 ±0)` + // Minimum execution time: 26_855_000 picoseconds. + Weight::from_parts(26_331_492, 5096) + // Standard Error: 5_739 + .saturating_add(Weight::from_parts(207_436, 0).saturating_mul(m.into())) + // Standard Error: 1_531 + .saturating_add(Weight::from_parts(238_697, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 80).saturating_mul(m.into())) @@ -262,15 +262,15 @@ impl pallet_collective::WeightInfo for SubstrateWeight< fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `159 + b * (1 ±0) + m * (64 ±0) + p * (72 ±0)` - // Estimated: `12527 + b * (1 ±0) + m * (85 ±0) + p * (65 ±0)` - // Minimum execution time: 36_271_000 picoseconds. - Weight::from_parts(37_352_472, 12527) - // Standard Error: 86 - .saturating_add(Weight::from_parts(2_515, 0).saturating_mul(b.into())) - // Standard Error: 13_364 - .saturating_add(Weight::from_parts(118_517, 0).saturating_mul(m.into())) - // Standard Error: 3_575 - .saturating_add(Weight::from_parts(410_040, 0).saturating_mul(p.into())) + // Estimated: `5102 + b * (1 ±0) + m * (85 ±0) + p * (65 ±0)` + // Minimum execution time: 37_549_000 picoseconds. + Weight::from_parts(38_540_724, 5102) + // Standard Error: 64 + .saturating_add(Weight::from_parts(1_909, 0).saturating_mul(b.into())) + // Standard Error: 9_910 + .saturating_add(Weight::from_parts(65_105, 0).saturating_mul(m.into())) + // Standard Error: 2_651 + .saturating_add(Weight::from_parts(406_291, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) @@ -288,11 +288,11 @@ impl pallet_collective::WeightInfo for SubstrateWeight< fn disapprove_proposal(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `293 + p * (32 ±0)` - // Estimated: `10688 + p * (32 ±0)` - // Minimum execution time: 14_304_000 picoseconds. - Weight::from_parts(15_019_975, 10688) - // Standard Error: 1_223 - .saturating_add(Weight::from_parts(235_471, 0).saturating_mul(p.into())) + // Estimated: `3263 + p * (32 ±0)` + // Minimum execution time: 14_275_000 picoseconds. + Weight::from_parts(14_944_887, 3263) + // Standard Error: 1_012 + .saturating_add(Weight::from_parts(204_715, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) .saturating_add(Weight::from_parts(0, 32).saturating_mul(p.into())) diff --git a/runtime/common/src/weights/pallet_democracy.rs b/runtime/common/src/weights/pallet_democracy.rs index 318879e07f..c8cfe468d1 100644 --- a/runtime/common/src/weights/pallet_democracy.rs +++ b/runtime/common/src/weights/pallet_democracy.rs @@ -1,9 +1,9 @@ //! Autogenerated weights for pallet_democracy //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `benchmark-runner-44wtw-4nbbq`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! HOSTNAME: `benchmark-runner-44wtw-sz2gt`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("frequency-bench"), DB CACHE: 1024 // Executed Command: @@ -16,7 +16,7 @@ // --chain=frequency-bench // --heap-pages=4096 // --wasm-execution=compiled -// --additional-trie-layers=20 +// --additional-trie-layers=5 // --steps=50 // --repeat=20 // --output=./scripts/../runtime/common/src/weights @@ -44,9 +44,9 @@ impl pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `4772` - // Estimated: `27097` - // Minimum execution time: 45_960_000 picoseconds. - Weight::from_parts(48_009_000, 27097) + // Estimated: `19672` + // Minimum execution time: 45_496_000 picoseconds. + Weight::from_parts(47_337_000, 19672) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -55,9 +55,9 @@ impl pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `3523` - // Estimated: `15605` - // Minimum execution time: 39_839_000 picoseconds. - Weight::from_parts(41_967_000, 15605) + // Estimated: `8180` + // Minimum execution time: 38_818_000 picoseconds. + Weight::from_parts(40_188_000, 8180) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -72,9 +72,9 @@ impl pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `3404` - // Estimated: `16170` - // Minimum execution time: 61_334_000 picoseconds. - Weight::from_parts(64_502_000, 16170) + // Estimated: `8745` + // Minimum execution time: 59_067_000 picoseconds. + Weight::from_parts(60_622_000, 8745) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -89,9 +89,9 @@ impl pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `3426` - // Estimated: `16170` - // Minimum execution time: 63_831_000 picoseconds. - Weight::from_parts(68_608_000, 16170) + // Estimated: `8745` + // Minimum execution time: 63_455_000 picoseconds. + Weight::from_parts(64_915_000, 8745) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -104,9 +104,9 @@ impl pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `299` - // Estimated: `12576` - // Minimum execution time: 24_973_000 picoseconds. - Weight::from_parts(25_346_000, 12576) + // Estimated: `5151` + // Minimum execution time: 24_561_000 picoseconds. + Weight::from_parts(25_154_000, 5151) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -127,9 +127,9 @@ impl pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `5942` - // Estimated: `27097` - // Minimum execution time: 97_180_000 picoseconds. - Weight::from_parts(100_478_000, 27097) + // Estimated: `19672` + // Minimum execution time: 96_018_000 picoseconds. + Weight::from_parts(97_882_000, 19672) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -140,9 +140,9 @@ impl pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `3349` - // Estimated: `15613` - // Minimum execution time: 12_237_000 picoseconds. - Weight::from_parts(13_217_000, 15613) + // Estimated: `8188` + // Minimum execution time: 12_379_000 picoseconds. + Weight::from_parts(13_036_000, 8188) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -152,8 +152,8 @@ impl pallet_democracy::WeightInfo for SubstrateWeight pallet_democracy::WeightInfo for SubstrateWeight pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `219` - // Estimated: `12428` - // Minimum execution time: 25_921_000 picoseconds. - Weight::from_parts(26_398_000, 12428) + // Estimated: `5003` + // Minimum execution time: 25_791_000 picoseconds. + Weight::from_parts(26_288_000, 5003) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -192,9 +192,9 @@ impl pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `3452` - // Estimated: `15613` - // Minimum execution time: 29_228_000 picoseconds. - Weight::from_parts(30_571_000, 15613) + // Estimated: `8188` + // Minimum execution time: 28_517_000 picoseconds. + Weight::from_parts(29_416_000, 8188) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -209,9 +209,9 @@ impl pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `5853` - // Estimated: `27097` - // Minimum execution time: 77_209_000 picoseconds. - Weight::from_parts(78_873_000, 27097) + // Estimated: `19672` + // Minimum execution time: 76_933_000 picoseconds. + Weight::from_parts(79_712_000, 19672) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -222,9 +222,9 @@ impl pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `204` - // Estimated: `12428` - // Minimum execution time: 18_118_000 picoseconds. - Weight::from_parts(18_616_000, 12428) + // Estimated: `5003` + // Minimum execution time: 17_820_000 picoseconds. + Weight::from_parts(18_287_000, 5003) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -238,11 +238,11 @@ impl pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `177 + r * (86 ±0)` - // Estimated: `10399 + r * (2676 ±0)` - // Minimum execution time: 6_709_000 picoseconds. - Weight::from_parts(8_079_694, 10399) - // Standard Error: 7_940 - .saturating_add(Weight::from_parts(3_156_061, 0).saturating_mul(r.into())) + // Estimated: `2974 + r * (2676 ±0)` + // Minimum execution time: 6_621_000 picoseconds. + Weight::from_parts(9_488_189, 2974) + // Standard Error: 20_481 + .saturating_add(Weight::from_parts(3_122_378, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -264,11 +264,11 @@ impl pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `177 + r * (86 ±0)` - // Estimated: `27097 + r * (2676 ±0)` - // Minimum execution time: 10_083_000 picoseconds. - Weight::from_parts(9_476_150, 27097) - // Standard Error: 34_787 - .saturating_add(Weight::from_parts(3_318_352, 0).saturating_mul(r.into())) + // Estimated: `19672 + r * (2676 ±0)` + // Minimum execution time: 9_829_000 picoseconds. + Weight::from_parts(11_695_825, 19672) + // Standard Error: 6_748 + .saturating_add(Weight::from_parts(3_120_087, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) @@ -286,11 +286,11 @@ impl pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `768 + r * (108 ±0)` - // Estimated: `28710 + r * (2676 ±0)` - // Minimum execution time: 39_955_000 picoseconds. - Weight::from_parts(45_783_871, 28710) - // Standard Error: 11_473 - .saturating_add(Weight::from_parts(4_185_834, 0).saturating_mul(r.into())) + // Estimated: `21285 + r * (2676 ±0)` + // Minimum execution time: 39_553_000 picoseconds. + Weight::from_parts(45_051_539, 21285) + // Standard Error: 7_356 + .saturating_add(Weight::from_parts(4_093_398, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -305,11 +305,11 @@ impl pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `426 + r * (108 ±0)` - // Estimated: `22440 + r * (2676 ±0)` - // Minimum execution time: 19_228_000 picoseconds. - Weight::from_parts(17_647_159, 22440) - // Standard Error: 11_432 - .saturating_add(Weight::from_parts(4_233_718, 0).saturating_mul(r.into())) + // Estimated: `15015 + r * (2676 ±0)` + // Minimum execution time: 19_358_000 picoseconds. + Weight::from_parts(20_494_338, 15015) + // Standard Error: 8_348 + .saturating_add(Weight::from_parts(4_035_694, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -322,8 +322,8 @@ impl pallet_democracy::WeightInfo for SubstrateWeight pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `496` - // Estimated: `16170` - // Minimum execution time: 24_129_000 picoseconds. - Weight::from_parts(38_793_255, 16170) - // Standard Error: 3_812 - .saturating_add(Weight::from_parts(124_798, 0).saturating_mul(r.into())) + // Estimated: `8745` + // Minimum execution time: 23_409_000 picoseconds. + Weight::from_parts(38_351_730, 8745) + // Standard Error: 3_613 + .saturating_add(Weight::from_parts(94_913, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -358,11 +358,11 @@ impl pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `497 + r * (22 ±0)` - // Estimated: `16170` - // Minimum execution time: 33_881_000 picoseconds. - Weight::from_parts(36_934_309, 16170) - // Standard Error: 1_951 - .saturating_add(Weight::from_parts(155_496, 0).saturating_mul(r.into())) + // Estimated: `8745` + // Minimum execution time: 33_430_000 picoseconds. + Weight::from_parts(37_000_618, 8745) + // Standard Error: 1_725 + .saturating_add(Weight::from_parts(131_050, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -374,11 +374,11 @@ impl pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `661 + r * (26 ±0)` - // Estimated: `16170` - // Minimum execution time: 14_788_000 picoseconds. - Weight::from_parts(18_233_045, 16170) - // Standard Error: 1_869 - .saturating_add(Weight::from_parts(130_651, 0).saturating_mul(r.into())) + // Estimated: `8745` + // Minimum execution time: 14_793_000 picoseconds. + Weight::from_parts(19_170_672, 8745) + // Standard Error: 2_791 + .saturating_add(Weight::from_parts(117_540, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -390,11 +390,11 @@ impl pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `661 + r * (26 ±0)` - // Estimated: `16170` - // Minimum execution time: 14_866_000 picoseconds. - Weight::from_parts(18_810_559, 16170) - // Standard Error: 2_001 - .saturating_add(Weight::from_parts(122_437, 0).saturating_mul(r.into())) + // Estimated: `8745` + // Minimum execution time: 15_014_000 picoseconds. + Weight::from_parts(18_433_481, 8745) + // Standard Error: 1_889 + .saturating_add(Weight::from_parts(130_455, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -407,9 +407,9 @@ impl pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `289` - // Estimated: `12466` - // Minimum execution time: 17_880_000 picoseconds. - Weight::from_parts(18_392_000, 12466) + // Estimated: `5041` + // Minimum execution time: 17_386_000 picoseconds. + Weight::from_parts(17_878_000, 5041) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -420,9 +420,9 @@ impl pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `219` - // Estimated: `12428` - // Minimum execution time: 15_898_000 picoseconds. - Weight::from_parts(16_406_000, 12428) + // Estimated: `5003` + // Minimum execution time: 15_715_000 picoseconds. + Weight::from_parts(15_943_000, 5003) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -435,9 +435,9 @@ impl pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `4821` - // Estimated: `27097` - // Minimum execution time: 42_462_000 picoseconds. - Weight::from_parts(45_072_000, 27097) + // Estimated: `19672` + // Minimum execution time: 42_360_000 picoseconds. + Weight::from_parts(43_382_000, 19672) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -448,9 +448,9 @@ impl pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `4755` - // Estimated: `27097` - // Minimum execution time: 39_054_000 picoseconds. - Weight::from_parts(40_980_000, 27097) + // Estimated: `19672` + // Minimum execution time: 38_921_000 picoseconds. + Weight::from_parts(39_793_000, 19672) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -461,9 +461,9 @@ impl pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `144` - // Estimated: `12466` - // Minimum execution time: 13_354_000 picoseconds. - Weight::from_parts(14_153_000, 12466) + // Estimated: `5041` + // Minimum execution time: 13_411_000 picoseconds. + Weight::from_parts(13_697_000, 5041) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -474,9 +474,9 @@ impl pallet_democracy::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `235` - // Estimated: `12576` - // Minimum execution time: 17_708_000 picoseconds. - Weight::from_parts(18_146_000, 12576) + // Estimated: `5151` + // Minimum execution time: 17_284_000 picoseconds. + Weight::from_parts(17_989_000, 5151) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } diff --git a/runtime/common/src/weights/pallet_multisig.rs b/runtime/common/src/weights/pallet_multisig.rs index 289f8295f6..e46a188b30 100644 --- a/runtime/common/src/weights/pallet_multisig.rs +++ b/runtime/common/src/weights/pallet_multisig.rs @@ -1,9 +1,9 @@ //! Autogenerated weights for pallet_multisig //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `benchmark-runner-44wtw-4nbbq`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! HOSTNAME: `benchmark-runner-44wtw-sz2gt`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("frequency-bench"), DB CACHE: 1024 // Executed Command: @@ -16,7 +16,7 @@ // --chain=frequency-bench // --heap-pages=4096 // --wasm-execution=compiled -// --additional-trie-layers=20 +// --additional-trie-layers=5 // --steps=50 // --repeat=20 // --output=./scripts/../runtime/common/src/weights @@ -38,10 +38,10 @@ impl pallet_multisig::WeightInfo for SubstrateWeight // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 13_535_000 picoseconds. - Weight::from_parts(14_424_095, 0) - // Standard Error: 7 - .saturating_add(Weight::from_parts(569, 0).saturating_mul(z.into())) + // Minimum execution time: 13_032_000 picoseconds. + Weight::from_parts(14_063_287, 0) + // Standard Error: 4 + .saturating_add(Weight::from_parts(595, 0).saturating_mul(z.into())) } /// Storage: `Multisig::Multisigs` (r:1 w:1) /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) @@ -50,13 +50,13 @@ impl pallet_multisig::WeightInfo for SubstrateWeight fn as_multi_create(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `192 + s * (2 ±0)` - // Estimated: `15721` - // Minimum execution time: 43_377_000 picoseconds. - Weight::from_parts(31_926_594, 15721) - // Standard Error: 2_545 - .saturating_add(Weight::from_parts(126_923, 0).saturating_mul(s.into())) - // Standard Error: 24 - .saturating_add(Weight::from_parts(1_508, 0).saturating_mul(z.into())) + // Estimated: `8296` + // Minimum execution time: 42_234_000 picoseconds. + Weight::from_parts(30_773_584, 8296) + // Standard Error: 3_304 + .saturating_add(Weight::from_parts(129_907, 0).saturating_mul(s.into())) + // Standard Error: 32 + .saturating_add(Weight::from_parts(1_622, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -67,13 +67,13 @@ impl pallet_multisig::WeightInfo for SubstrateWeight fn as_multi_approve(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `211` - // Estimated: `15721` - // Minimum execution time: 27_081_000 picoseconds. - Weight::from_parts(17_354_872, 15721) - // Standard Error: 732 - .saturating_add(Weight::from_parts(115_449, 0).saturating_mul(s.into())) - // Standard Error: 7 - .saturating_add(Weight::from_parts(1_437, 0).saturating_mul(z.into())) + // Estimated: `8296` + // Minimum execution time: 26_903_000 picoseconds. + Weight::from_parts(16_698_367, 8296) + // Standard Error: 1_772 + .saturating_add(Weight::from_parts(115_347, 0).saturating_mul(s.into())) + // Standard Error: 17 + .saturating_add(Weight::from_parts(1_537, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -86,13 +86,13 @@ impl pallet_multisig::WeightInfo for SubstrateWeight fn as_multi_complete(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `314 + s * (33 ±0)` - // Estimated: `15721` - // Minimum execution time: 48_126_000 picoseconds. - Weight::from_parts(32_496_418, 15721) - // Standard Error: 1_630 - .saturating_add(Weight::from_parts(175_563, 0).saturating_mul(s.into())) - // Standard Error: 15 - .saturating_add(Weight::from_parts(1_722, 0).saturating_mul(z.into())) + // Estimated: `8296` + // Minimum execution time: 48_133_000 picoseconds. + Weight::from_parts(34_672_400, 8296) + // Standard Error: 962 + .saturating_add(Weight::from_parts(152_693, 0).saturating_mul(s.into())) + // Standard Error: 9 + .saturating_add(Weight::from_parts(1_566, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -102,11 +102,11 @@ impl pallet_multisig::WeightInfo for SubstrateWeight fn approve_as_multi_create(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `193 + s * (2 ±0)` - // Estimated: `15721` - // Minimum execution time: 29_631_000 picoseconds. - Weight::from_parts(30_825_024, 15721) - // Standard Error: 976 - .saturating_add(Weight::from_parts(131_945, 0).saturating_mul(s.into())) + // Estimated: `8296` + // Minimum execution time: 29_033_000 picoseconds. + Weight::from_parts(30_328_113, 8296) + // Standard Error: 855 + .saturating_add(Weight::from_parts(130_733, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -116,11 +116,11 @@ impl pallet_multisig::WeightInfo for SubstrateWeight fn approve_as_multi_approve(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `211` - // Estimated: `15721` - // Minimum execution time: 15_801_000 picoseconds. - Weight::from_parts(16_410_458, 15721) - // Standard Error: 709 - .saturating_add(Weight::from_parts(113_039, 0).saturating_mul(s.into())) + // Estimated: `8296` + // Minimum execution time: 15_575_000 picoseconds. + Weight::from_parts(16_091_627, 8296) + // Standard Error: 536 + .saturating_add(Weight::from_parts(112_842, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -130,11 +130,11 @@ impl pallet_multisig::WeightInfo for SubstrateWeight fn cancel_as_multi(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `383 + s * (1 ±0)` - // Estimated: `15721` - // Minimum execution time: 30_504_000 picoseconds. - Weight::from_parts(32_341_609, 15721) - // Standard Error: 1_438 - .saturating_add(Weight::from_parts(120_276, 0).saturating_mul(s.into())) + // Estimated: `8296` + // Minimum execution time: 29_584_000 picoseconds. + Weight::from_parts(31_479_882, 8296) + // Standard Error: 850 + .saturating_add(Weight::from_parts(126_162, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } diff --git a/runtime/common/src/weights/pallet_preimage.rs b/runtime/common/src/weights/pallet_preimage.rs index 48b3379022..52519a3903 100644 --- a/runtime/common/src/weights/pallet_preimage.rs +++ b/runtime/common/src/weights/pallet_preimage.rs @@ -1,9 +1,9 @@ //! Autogenerated weights for pallet_preimage //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `benchmark-runner-44wtw-4nbbq`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! HOSTNAME: `benchmark-runner-44wtw-sz2gt`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("frequency-bench"), DB CACHE: 1024 // Executed Command: @@ -16,7 +16,7 @@ // --chain=frequency-bench // --heap-pages=4096 // --wasm-execution=compiled -// --additional-trie-layers=20 +// --additional-trie-layers=5 // --steps=50 // --repeat=20 // --output=./scripts/../runtime/common/src/weights @@ -41,11 +41,11 @@ impl pallet_preimage::WeightInfo for SubstrateWeight fn note_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `182` - // Estimated: `12466` - // Minimum execution time: 28_933_000 picoseconds. - Weight::from_parts(29_089_000, 12466) - // Standard Error: 3 - .saturating_add(Weight::from_parts(2_403, 0).saturating_mul(s.into())) + // Estimated: `5041` + // Minimum execution time: 28_831_000 picoseconds. + Weight::from_parts(29_438_000, 5041) + // Standard Error: 1 + .saturating_add(Weight::from_parts(2_180, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -57,11 +57,11 @@ impl pallet_preimage::WeightInfo for SubstrateWeight fn note_requested_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `12466` - // Minimum execution time: 14_783_000 picoseconds. - Weight::from_parts(15_014_000, 12466) - // Standard Error: 3 - .saturating_add(Weight::from_parts(2_382, 0).saturating_mul(s.into())) + // Estimated: `5041` + // Minimum execution time: 14_968_000 picoseconds. + Weight::from_parts(15_069_000, 5041) + // Standard Error: 1 + .saturating_add(Weight::from_parts(2_172, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -73,11 +73,11 @@ impl pallet_preimage::WeightInfo for SubstrateWeight fn note_no_deposit_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `12466` - // Minimum execution time: 14_014_000 picoseconds. - Weight::from_parts(14_222_000, 12466) - // Standard Error: 3 - .saturating_add(Weight::from_parts(2_392, 0).saturating_mul(s.into())) + // Estimated: `5041` + // Minimum execution time: 14_060_000 picoseconds. + Weight::from_parts(14_505_000, 5041) + // Standard Error: 1 + .saturating_add(Weight::from_parts(2_169, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -88,9 +88,9 @@ impl pallet_preimage::WeightInfo for SubstrateWeight fn unnote_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `328` - // Estimated: `12466` - // Minimum execution time: 55_421_000 picoseconds. - Weight::from_parts(59_463_000, 12466) + // Estimated: `5041` + // Minimum execution time: 39_500_000 picoseconds. + Weight::from_parts(40_503_000, 5041) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -101,9 +101,9 @@ impl pallet_preimage::WeightInfo for SubstrateWeight fn unnote_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `144` - // Estimated: `12466` - // Minimum execution time: 31_696_000 picoseconds. - Weight::from_parts(35_578_000, 12466) + // Estimated: `5041` + // Minimum execution time: 21_161_000 picoseconds. + Weight::from_parts(22_288_000, 5041) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -112,9 +112,9 @@ impl pallet_preimage::WeightInfo for SubstrateWeight fn request_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `188` - // Estimated: `12466` - // Minimum execution time: 30_409_000 picoseconds. - Weight::from_parts(34_167_000, 12466) + // Estimated: `5041` + // Minimum execution time: 19_529_000 picoseconds. + Weight::from_parts(20_755_000, 5041) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -123,9 +123,9 @@ impl pallet_preimage::WeightInfo for SubstrateWeight fn request_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `144` - // Estimated: `12466` - // Minimum execution time: 19_725_000 picoseconds. - Weight::from_parts(22_260_000, 12466) + // Estimated: `5041` + // Minimum execution time: 12_429_000 picoseconds. + Weight::from_parts(13_182_000, 5041) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -134,9 +134,9 @@ impl pallet_preimage::WeightInfo for SubstrateWeight fn request_unnoted_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `42` - // Estimated: `12466` - // Minimum execution time: 23_970_000 picoseconds. - Weight::from_parts(26_887_000, 12466) + // Estimated: `5041` + // Minimum execution time: 14_152_000 picoseconds. + Weight::from_parts(14_520_000, 5041) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -145,9 +145,9 @@ impl pallet_preimage::WeightInfo for SubstrateWeight fn request_requested_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `12466` - // Minimum execution time: 9_562_000 picoseconds. - Weight::from_parts(10_780_000, 12466) + // Estimated: `5041` + // Minimum execution time: 7_963_000 picoseconds. + Weight::from_parts(8_506_000, 5041) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -158,9 +158,9 @@ impl pallet_preimage::WeightInfo for SubstrateWeight fn unrequest_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `144` - // Estimated: `12466` - // Minimum execution time: 31_579_000 picoseconds. - Weight::from_parts(33_967_000, 12466) + // Estimated: `5041` + // Minimum execution time: 19_859_000 picoseconds. + Weight::from_parts(21_037_000, 5041) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -169,9 +169,9 @@ impl pallet_preimage::WeightInfo for SubstrateWeight fn unrequest_unnoted_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `12466` - // Minimum execution time: 9_016_000 picoseconds. - Weight::from_parts(10_269_000, 12466) + // Estimated: `5041` + // Minimum execution time: 8_127_000 picoseconds. + Weight::from_parts(8_554_000, 5041) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -180,9 +180,9 @@ impl pallet_preimage::WeightInfo for SubstrateWeight fn unrequest_multi_referenced_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `106` - // Estimated: `12466` - // Minimum execution time: 9_581_000 picoseconds. - Weight::from_parts(10_391_000, 12466) + // Estimated: `5041` + // Minimum execution time: 7_888_000 picoseconds. + Weight::from_parts(8_181_000, 5041) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } diff --git a/runtime/common/src/weights/pallet_scheduler.rs b/runtime/common/src/weights/pallet_scheduler.rs index 46fd291ae9..3e86a667c5 100644 --- a/runtime/common/src/weights/pallet_scheduler.rs +++ b/runtime/common/src/weights/pallet_scheduler.rs @@ -1,9 +1,9 @@ //! Autogenerated weights for pallet_scheduler //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `benchmark-runner-44wtw-4nbbq`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! HOSTNAME: `benchmark-runner-44wtw-sz2gt`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("frequency-bench"), DB CACHE: 1024 // Executed Command: @@ -16,7 +16,7 @@ // --chain=frequency-bench // --heap-pages=4096 // --wasm-execution=compiled -// --additional-trie-layers=20 +// --additional-trie-layers=5 // --steps=50 // --repeat=20 // --output=./scripts/../runtime/common/src/weights @@ -38,9 +38,9 @@ impl pallet_scheduler::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `31` - // Estimated: `10399` - // Minimum execution time: 3_144_000 picoseconds. - Weight::from_parts(3_313_000, 10399) + // Estimated: `2974` + // Minimum execution time: 3_197_000 picoseconds. + Weight::from_parts(3_380_000, 2974) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -50,11 +50,11 @@ impl pallet_scheduler::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `78 + s * (177 ±0)` - // Estimated: `22838` - // Minimum execution time: 3_154_000 picoseconds. - Weight::from_parts(5_999_678, 22838) - // Standard Error: 1_833 - .saturating_add(Weight::from_parts(402_284, 0).saturating_mul(s.into())) + // Estimated: `15413` + // Minimum execution time: 3_080_000 picoseconds. + Weight::from_parts(5_845_277, 15413) + // Standard Error: 1_408 + .saturating_add(Weight::from_parts(389_076, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -62,8 +62,8 @@ impl pallet_scheduler::WeightInfo for SubstrateWeight pallet_scheduler::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `179 + s * (1 ±0)` - // Estimated: `12554 + s * (1 ±0)` - // Minimum execution time: 18_464_000 picoseconds. - Weight::from_parts(19_074_000, 12554) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_368, 0).saturating_mul(s.into())) + // Estimated: `5129 + s * (1 ±0)` + // Minimum execution time: 18_167_000 picoseconds. + Weight::from_parts(7_053_349, 5129) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_238, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) @@ -88,30 +88,30 @@ impl pallet_scheduler::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_691_000 picoseconds. - Weight::from_parts(4_895_000, 0) + // Minimum execution time: 4_408_000 picoseconds. + Weight::from_parts(4_659_000, 0) } fn execute_dispatch_signed() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_086_000 picoseconds. - Weight::from_parts(2_172_000, 0) + // Minimum execution time: 1_936_000 picoseconds. + Weight::from_parts(2_055_000, 0) } fn execute_dispatch_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_022_000 picoseconds. - Weight::from_parts(2_141_000, 0) + // Minimum execution time: 1_846_000 picoseconds. + Weight::from_parts(1_998_000, 0) } /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(10463), added: 12938, mode: `MaxEncodedLen`) @@ -119,11 +119,11 @@ impl pallet_scheduler::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `78 + s * (177 ±0)` - // Estimated: `22838` - // Minimum execution time: 11_125_000 picoseconds. - Weight::from_parts(14_005_030, 22838) - // Standard Error: 1_576 - .saturating_add(Weight::from_parts(444_536, 0).saturating_mul(s.into())) + // Estimated: `15413` + // Minimum execution time: 11_025_000 picoseconds. + Weight::from_parts(13_761_305, 15413) + // Standard Error: 1_529 + .saturating_add(Weight::from_parts(424_769, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -135,11 +135,11 @@ impl pallet_scheduler::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `78 + s * (177 ±0)` - // Estimated: `22838` - // Minimum execution time: 14_770_000 picoseconds. - Weight::from_parts(14_969_947, 22838) - // Standard Error: 2_034 - .saturating_add(Weight::from_parts(668_444, 0).saturating_mul(s.into())) + // Estimated: `15413` + // Minimum execution time: 14_684_000 picoseconds. + Weight::from_parts(14_433_734, 15413) + // Standard Error: 1_345 + .saturating_add(Weight::from_parts(645_630, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -151,11 +151,11 @@ impl pallet_scheduler::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `255 + s * (185 ±0)` - // Estimated: `22838` - // Minimum execution time: 14_410_000 picoseconds. - Weight::from_parts(18_435_960, 22838) - // Standard Error: 2_420 - .saturating_add(Weight::from_parts(478_247, 0).saturating_mul(s.into())) + // Estimated: `15413` + // Minimum execution time: 14_521_000 picoseconds. + Weight::from_parts(18_257_288, 15413) + // Standard Error: 2_232 + .saturating_add(Weight::from_parts(453_220, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -167,11 +167,11 @@ impl pallet_scheduler::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `281 + s * (185 ±0)` - // Estimated: `22838` - // Minimum execution time: 17_434_000 picoseconds. - Weight::from_parts(17_397_786, 22838) - // Standard Error: 1_613 - .saturating_add(Weight::from_parts(693_648, 0).saturating_mul(s.into())) + // Estimated: `15413` + // Minimum execution time: 16_881_000 picoseconds. + Weight::from_parts(17_169_542, 15413) + // Standard Error: 1_174 + .saturating_add(Weight::from_parts(671_078, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } diff --git a/runtime/common/src/weights/pallet_session.rs b/runtime/common/src/weights/pallet_session.rs index e1ce9baffb..29e2a5f874 100644 --- a/runtime/common/src/weights/pallet_session.rs +++ b/runtime/common/src/weights/pallet_session.rs @@ -1,9 +1,9 @@ //! Autogenerated weights for pallet_session //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `benchmark-runner-44wtw-4nbbq`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! HOSTNAME: `benchmark-runner-44wtw-sz2gt`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("frequency-bench"), DB CACHE: 1024 // Executed Command: @@ -16,7 +16,7 @@ // --chain=frequency-bench // --heap-pages=4096 // --wasm-execution=compiled -// --additional-trie-layers=20 +// --additional-trie-layers=5 // --steps=50 // --repeat=20 // --output=./scripts/../runtime/common/src/weights @@ -40,9 +40,9 @@ impl pallet_session::WeightInfo for SubstrateWeight fn set_keys() -> Weight { // Proof Size summary in bytes: // Measured: `518` - // Estimated: `12893` - // Minimum execution time: 15_213_000 picoseconds. - Weight::from_parts(15_515_000, 12893) + // Estimated: `5468` + // Minimum execution time: 15_414_000 picoseconds. + Weight::from_parts(15_929_000, 5468) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -53,9 +53,9 @@ impl pallet_session::WeightInfo for SubstrateWeight fn purge_keys() -> Weight { // Proof Size summary in bytes: // Measured: `409` - // Estimated: `12784` - // Minimum execution time: 10_926_000 picoseconds. - Weight::from_parts(11_490_000, 12784) + // Estimated: `5359` + // Minimum execution time: 11_047_000 picoseconds. + Weight::from_parts(11_510_000, 5359) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } diff --git a/runtime/common/src/weights/pallet_timestamp.rs b/runtime/common/src/weights/pallet_timestamp.rs index fd8a960943..e196dcd4c0 100644 --- a/runtime/common/src/weights/pallet_timestamp.rs +++ b/runtime/common/src/weights/pallet_timestamp.rs @@ -1,9 +1,9 @@ //! Autogenerated weights for pallet_timestamp //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `benchmark-runner-44wtw-4nbbq`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! HOSTNAME: `benchmark-runner-44wtw-sz2gt`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("frequency-bench"), DB CACHE: 1024 // Executed Command: @@ -16,7 +16,7 @@ // --chain=frequency-bench // --heap-pages=4096 // --wasm-execution=compiled -// --additional-trie-layers=20 +// --additional-trie-layers=5 // --steps=50 // --repeat=20 // --output=./scripts/../runtime/common/src/weights @@ -38,9 +38,9 @@ impl pallet_timestamp::WeightInfo for SubstrateWeight Weight { // Proof Size summary in bytes: // Measured: `42` - // Estimated: `10403` - // Minimum execution time: 6_247_000 picoseconds. - Weight::from_parts(6_653_000, 10403) + // Estimated: `2978` + // Minimum execution time: 6_355_000 picoseconds. + Weight::from_parts(6_713_000, 2978) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -48,7 +48,7 @@ impl pallet_timestamp::WeightInfo for SubstrateWeight pallet_treasury::WeightInfo for SubstrateWeight // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 233_000 picoseconds. - Weight::from_parts(272_000, 0) + // Minimum execution time: 249_000 picoseconds. + Weight::from_parts(298_000, 0) } /// Storage: `Treasury::ProposalCount` (r:1 w:1) /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -47,9 +47,9 @@ impl pallet_treasury::WeightInfo for SubstrateWeight fn propose_spend() -> Weight { // Proof Size summary in bytes: // Measured: `146` - // Estimated: `10399` - // Minimum execution time: 25_834_000 picoseconds. - Weight::from_parts(26_548_000, 10399) + // Estimated: `2974` + // Minimum execution time: 25_570_000 picoseconds. + Weight::from_parts(26_207_000, 2974) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -60,9 +60,9 @@ impl pallet_treasury::WeightInfo for SubstrateWeight fn reject_proposal() -> Weight { // Proof Size summary in bytes: // Measured: `304` - // Estimated: `12503` - // Minimum execution time: 27_264_000 picoseconds. - Weight::from_parts(27_836_000, 12503) + // Estimated: `5078` + // Minimum execution time: 27_831_000 picoseconds. + Weight::from_parts(28_883_000, 5078) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -74,11 +74,11 @@ impl pallet_treasury::WeightInfo for SubstrateWeight fn approve_proposal(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `317 + p * (12 ±0)` - // Estimated: `12483` - // Minimum execution time: 7_974_000 picoseconds. - Weight::from_parts(9_910_403, 12483) - // Standard Error: 2_302 - .saturating_add(Weight::from_parts(114_596, 0).saturating_mul(p.into())) + // Estimated: `5058` + // Minimum execution time: 8_007_000 picoseconds. + Weight::from_parts(10_000_767, 5058) + // Standard Error: 1_236 + .saturating_add(Weight::from_parts(104_264, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -87,9 +87,9 @@ impl pallet_treasury::WeightInfo for SubstrateWeight fn remove_approval() -> Weight { // Proof Size summary in bytes: // Measured: `90` - // Estimated: `10653` - // Minimum execution time: 6_139_000 picoseconds. - Weight::from_parts(6_314_000, 10653) + // Estimated: `3228` + // Minimum execution time: 6_224_000 picoseconds. + Weight::from_parts(6_434_000, 3228) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -105,11 +105,11 @@ impl pallet_treasury::WeightInfo for SubstrateWeight fn on_initialize_proposals(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `526 + p * (256 ±0)` - // Estimated: `12503 + p * (5206 ±0)` - // Minimum execution time: 27_214_000 picoseconds. - Weight::from_parts(31_210_384, 12503) - // Standard Error: 32_915 - .saturating_add(Weight::from_parts(38_997_019, 0).saturating_mul(p.into())) + // Estimated: `5078 + p * (5206 ±0)` + // Minimum execution time: 26_669_000 picoseconds. + Weight::from_parts(36_288_214, 5078) + // Standard Error: 25_936 + .saturating_add(Weight::from_parts(38_150_353, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(p.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) diff --git a/runtime/common/src/weights/pallet_utility.rs b/runtime/common/src/weights/pallet_utility.rs index 5e7b13d734..c2a8df258d 100644 --- a/runtime/common/src/weights/pallet_utility.rs +++ b/runtime/common/src/weights/pallet_utility.rs @@ -1,9 +1,9 @@ //! Autogenerated weights for pallet_utility //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `benchmark-runner-44wtw-4nbbq`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! HOSTNAME: `benchmark-runner-44wtw-sz2gt`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("frequency-bench"), DB CACHE: 1024 // Executed Command: @@ -16,7 +16,7 @@ // --chain=frequency-bench // --heap-pages=4096 // --wasm-execution=compiled -// --additional-trie-layers=20 +// --additional-trie-layers=5 // --steps=50 // --repeat=20 // --output=./scripts/../runtime/common/src/weights @@ -38,43 +38,43 @@ impl pallet_utility::WeightInfo for SubstrateWeight // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_847_000 picoseconds. - Weight::from_parts(10_001_832, 0) - // Standard Error: 3_022 - .saturating_add(Weight::from_parts(4_377_100, 0).saturating_mul(c.into())) + // Minimum execution time: 6_045_000 picoseconds. + Weight::from_parts(7_163_412, 0) + // Standard Error: 1_537 + .saturating_add(Weight::from_parts(4_425_775, 0).saturating_mul(c.into())) } fn as_derivative() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_946_000 picoseconds. - Weight::from_parts(4_160_000, 0) + // Minimum execution time: 4_276_000 picoseconds. + Weight::from_parts(4_488_000, 0) } /// The range of component `c` is `[0, 1000]`. fn batch_all(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_791_000 picoseconds. - Weight::from_parts(7_022_751, 0) - // Standard Error: 2_565 - .saturating_add(Weight::from_parts(4_582_273, 0).saturating_mul(c.into())) + // Minimum execution time: 6_022_000 picoseconds. + Weight::from_parts(6_657_684, 0) + // Standard Error: 2_552 + .saturating_add(Weight::from_parts(4_643_390, 0).saturating_mul(c.into())) } fn dispatch_as() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_776_000 picoseconds. - Weight::from_parts(8_192_000, 0) + // Minimum execution time: 7_806_000 picoseconds. + Weight::from_parts(8_256_000, 0) } /// The range of component `c` is `[0, 1000]`. fn force_batch(c: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_810_000 picoseconds. - Weight::from_parts(15_630_031, 0) - // Standard Error: 2_727 - .saturating_add(Weight::from_parts(4_336_080, 0).saturating_mul(c.into())) + // Minimum execution time: 6_014_000 picoseconds. + Weight::from_parts(1_911_834, 0) + // Standard Error: 1_881 + .saturating_add(Weight::from_parts(4_438_235, 0).saturating_mul(c.into())) } } diff --git a/runtime/frequency/src/lib.rs b/runtime/frequency/src/lib.rs index 07a70720b9..1af8603e70 100644 --- a/runtime/frequency/src/lib.rs +++ b/runtime/frequency/src/lib.rs @@ -261,7 +261,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("frequency"), impl_name: create_runtime_str!("frequency"), authoring_version: 1, - spec_version: 63, + spec_version: 64, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -275,7 +275,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("frequency-rococo"), impl_name: create_runtime_str!("frequency"), authoring_version: 1, - spec_version: 63, + spec_version: 64, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/scripts/run_benchmarks.sh b/scripts/run_benchmarks.sh index c51d36f6d1..fbb1173758 100755 --- a/scripts/run_benchmarks.sh +++ b/scripts/run_benchmarks.sh @@ -183,7 +183,7 @@ function run_benchmark() { --chain="frequency-bench" \ --heap-pages=4096 \ --wasm-execution=compiled \ - --additional-trie-layers=20 \ + --additional-trie-layers=5 \ --steps=${2} \ --repeat=${3} \ --output=${4} \ From afa4490c714e899370987f1302f99c6442e4333b Mon Sep 17 00:00:00 2001 From: Shannon Wells Date: Fri, 1 Dec 2023 01:28:43 +0000 Subject: [PATCH 9/9] Feat/separate unlock chunks #1753 (#1763) separate unstaking unlock chunks into their own storage and do a storage migration which includes staking type, for convenience of the planned Provider Boost feature so that another storage migration is not needed for that feature. --- designdocs/capacity.md | 4 +- ...capacity_staking_rewards_implementation.md | 35 ++-- pallets/capacity/src/benchmarking.rs | 34 ++-- pallets/capacity/src/lib.rs | 151 +++++++++++++----- pallets/capacity/src/migration/mod.rs | 2 + pallets/capacity/src/migration/v2.rs | 115 +++++++++++++ .../capacity/src/tests/migrate_v2_tests.rs | 56 +++++++ pallets/capacity/src/tests/mod.rs | 2 + pallets/capacity/src/tests/other_tests.rs | 15 +- .../src/tests/stake_and_deposit_tests.rs | 52 +++--- .../tests/staking_account_details_tests.rs | 112 ++----------- .../capacity/src/tests/unlock_chunks_tests.rs | 38 +++++ pallets/capacity/src/tests/unstaking_tests.rs | 51 +++++- .../src/tests/withdraw_unstaked_tests.rs | 121 ++++++-------- pallets/capacity/src/types.rs | 146 ++++++++--------- pallets/capacity/src/weights.rs | 104 ++++++------ runtime/frequency/src/lib.rs | 5 +- 17 files changed, 638 insertions(+), 405 deletions(-) create mode 100644 pallets/capacity/src/migration/mod.rs create mode 100644 pallets/capacity/src/migration/v2.rs create mode 100644 pallets/capacity/src/tests/migrate_v2_tests.rs create mode 100644 pallets/capacity/src/tests/unlock_chunks_tests.rs diff --git a/designdocs/capacity.md b/designdocs/capacity.md index 160fab6a86..8f0a911ce8 100644 --- a/designdocs/capacity.md +++ b/designdocs/capacity.md @@ -302,7 +302,7 @@ Storage for keeping records of staking accounting. /// Storage for keeping a ledger of staked token amounts for accounts. #[pallet::storage] pub type StakingAccountLedger = - StorageMap<_, Twox64Concat, T::AccountId, StakingAccountDetails>; + StorageMap<_, Twox64Concat, T::AccountId, StakingDetails>; ``` @@ -355,7 +355,7 @@ The type used for storing information about staking details. ```rust -pub struct StakingAccountDetails { +pub struct StakingDetails { /// The amount a Staker has staked, minus the sum of all tokens in `unlocking`. pub active: Balance, /// The total amount of tokens in `active` and `unlocking` diff --git a/designdocs/capacity_staking_rewards_implementation.md b/designdocs/capacity_staking_rewards_implementation.md index 24f8b02af5..0a7f0ac99d 100644 --- a/designdocs/capacity_staking_rewards_implementation.md +++ b/designdocs/capacity_staking_rewards_implementation.md @@ -1,11 +1,13 @@ # Capacity Staking Rewards Implementation ## Overview -Staking Capacity for rewards is a new feature which allows token holders to stake FRQCY and split the staking -rewards with a Provider they choose. The Provider receives a small reward in Capacity -(which is periodically replenished), and the staker receives a periodic return in FRQCY token. -The amount of Capacity that the Provider would receive in such case is a fraction of what they would get from a -`MaximumCapacity` stake. +This document describes a new type of staking which allows token holders to stake FRQCY and split staking rewards with a Provider the staker chooses. + +Currently, when staking token for Capacity, the only choice is to assign all the generated Capacity to the designated target. +The target, who must be a Provider, may then spend this Capacity to pay for specific transactions. This is called **Maximized Capacity** staking. + +In this new type of staking, called **Provider Boosting**, the Provider receives a reward in Capacity and the staker receives a periodic return in FRQCY token. +The amount of Capacity that the Provider would receive in such case is a less than what they would get from a `MaximumCapacity` stake. The period of Capacity replenishment - the `Epoch` - and the period of token reward - the `RewardEra`- are different. Epochs much necessarily be much shorter than rewards because Capacity replenishment needs to be multiple times a day to meet the needs of a high traffic network, and to allow Providers the ability to delay transactions to a time of day with lower network activity if necessary. @@ -13,12 +15,13 @@ Reward eras need to be on a much longer scale, such as every two weeks, because In addition, this lets the chain to store Reward history for much longer rather than forcing people to have to take steps to claim rewards. ### Diagram -This illustrates roughly (and not to scale) how Provider Boost staking works. Just like the current staking behavior, now called Maximized staking, The Capacity generated by staking is added to the Provider's Capacity ledger immediately so it can be used right away. The amount staked is locked in Alice's account, preventing transfer. +This illustrates roughly -- not to scale and **NOT reflecting actual reward amounts** -- how Provider Boost staking is expected to work. Just like the current staking behavior, now called Maximium staking, The Capacity generated by staking is added to the Provider's Capacity ledger immediately so it can be used right away. The amount staked is locked in Alice's account, preventing transfer. Provider Boost token rewards are earned only for token staked for a complete Reward Era. So Alice does not begin earning rewards until Reward Era 5 in the diagram, and this means Alice must wait until Reward Era 6 to claim rewards for Reward Era 5. Unclaimed reward amounts are actually not minted or transferred until they are claimed, and may also not be calculated until then, depending on the economic model. This process will be described in more detail in the Economic Model Design Document. +### NOTE: Actual reward amounts are TBD; amounts are for illustration purposes only ![Provider boosted staking](https://github.com/LibertyDSNP/frequency/assets/502640/ffb632f2-79c2-4a09-a906-e4de02e4f348) The proposed feature is a design for staking FRQCY token in exchange for Capacity and/or FRQCY. @@ -50,23 +53,15 @@ It does not give regard to what the economic model actually is, since that is ye ## Staking Token Rewards -### StakingAccountDetails updates +### StakingAccountDetails --> StakingDetails New fields are added. The field **`last_rewarded_at`** is to keep track of the last time rewards were claimed for this Staking Account. MaximumCapacity staking accounts MUST always have the value `None` for `last_rewarded_at`. -Finally, `stake_change_unlocking`, is added, which stores an `UnlockChunk` when a staking account has changed. -targets for some amount of funds. This is to prevent retarget spamming. -This will be a V2 of this storage and original StakingAccountDetails will need to be migrated. +This is a second version of this storage, to replace StakingAccountDetails, and StakingAccountDetails data will need to be migrated. ```rust -pub struct StakingAccountDetailsV2 { +pub struct StakingDetails { pub active: BalanceOf, - pub total: BalanceOf, - pub unlocking: BoundedVec, T::EpochNumber>, T::MaxUnlockingChunks>, - /// The number of the last StakingEra that this account's rewards were claimed. pub last_rewards_claimed_at: Option, // NEW None means never rewarded, Some(RewardEra) means last rewarded RewardEra. - /// staking amounts that have been retargeted are prevented from being retargeted again for the - /// configured Thawing Period number of blocks. - pub stake_change_unlocking: BoundedVec, T::RewardEra>, T::MaxUnlockingChunks>, // NEW } ``` @@ -150,7 +145,7 @@ pub struct StakingRewardClaim { /// How much is claimed, in token pub claimed_reward: Balance, /// The end state of the staking account if the operations are valid - pub staking_account_end_state: StakingAccountDetails, + pub staking_account_end_state: StakingDetails, /// The starting era for the claimed reward period, inclusive pub from_era: T::RewardEra, /// The ending era for the claimed reward period, inclusive @@ -264,7 +259,7 @@ calculate rewards on chain at all. Regardless, on success, the claimed rewards are minted and transferred as locked token to the origin, with the existing unstaking thaw period for withdrawal (which simply unlocks thawed token amounts as before). -There is no chunk added; instead the existing unstaking thaw period is applied to last_rewards_claimed_at in StakingAccountDetails. +There is no chunk added; instead the existing unstaking thaw period is applied to last_rewards_claimed_at in StakingDetails. Forcing stakers to wait a thaw period for every claim is an incentive to claim rewards sooner than later, leveling out possible inflationary effects and helping prevent unclaimed rewards from expiring. @@ -336,7 +331,7 @@ No more than `T::MaxUnlockingChunks` staking amounts may be retargeted within th Each call creates one chunk. Emits a `StakingTargetChanged` event with the parameters of the extrinsic. ```rust /// Sets the target of the staking capacity to a new target. -/// This adds a chunk to `StakingAccountDetails.stake_change_unlocking chunks`, up to `T::MaxUnlockingChunks`. +/// This adds a chunk to `StakingDetails.stake_change_unlocking chunks`, up to `T::MaxUnlockingChunks`. /// The staked amount and Capacity generated by `amount` originally targeted to the `from` MSA Id is reassigned to the `to` MSA Id. /// Does not affect unstaking process or additional stake amounts. /// Changing a staking target to a Provider when Origin has nothing staked them will retain the staking type. diff --git a/pallets/capacity/src/benchmarking.rs b/pallets/capacity/src/benchmarking.rs index 3f6fe24099..4ca8fc99cb 100644 --- a/pallets/capacity/src/benchmarking.rs +++ b/pallets/capacity/src/benchmarking.rs @@ -2,7 +2,7 @@ use super::*; use crate::Pallet as Capacity; use frame_benchmarking::{account, benchmarks, whitelist_account}; -use frame_support::{assert_ok, traits::Currency}; +use frame_support::{assert_ok, traits::Currency, BoundedVec}; use frame_system::RawOrigin; use parity_scale_codec::alloc::vec::Vec; @@ -56,21 +56,19 @@ benchmarks! { withdraw_unstaked { let caller: T::AccountId = create_funded_account::("account", SEED, 5u32); - let amount: BalanceOf = T::MinimumStakingAmount::get(); - - let mut staking_account = StakingAccountDetails::::default(); - staking_account.deposit(500u32.into()); - - // set new unlock chunks using tuples of (value, thaw_at) - let new_unlocks: Vec<(u32, u32)> = Vec::from([(50u32, 3u32), (50u32, 5u32)]); - assert_eq!(true, staking_account.set_unlock_chunks(&new_unlocks)); + let mut unlocking: UnlockChunkList = BoundedVec::default(); + for _i in 0..T::MaxUnlockingChunks::get() { + let unlock_chunk: UnlockChunk, T::EpochNumber> = UnlockChunk { value: 1u32.into(), thaw_at: 3u32.into() }; + assert_ok!(unlocking.try_push(unlock_chunk)); + } + UnstakeUnlocks::::set(&caller, Some(unlocking)); - Capacity::::set_staking_account(&caller.clone(), &staking_account); CurrentEpoch::::set(T::EpochNumber::from(5u32)); }: _ (RawOrigin::Signed(caller.clone())) verify { - assert_last_event::(Event::::StakeWithdrawn {account: caller, amount: 100u32.into() }.into()); + let total = T::MaxUnlockingChunks::get(); + assert_last_event::(Event::::StakeWithdrawn {account: caller, amount: total.into() }.into()); } on_initialize { @@ -91,7 +89,7 @@ benchmarks! { let target = 1; let block_number = 4u32; - let mut staking_account = StakingAccountDetails::::default(); + let mut staking_account = StakingDetails::::default(); let mut target_details = StakingTargetDetails::>::default(); let mut capacity_details = CapacityDetails::, ::EpochNumber>::default(); @@ -99,10 +97,20 @@ benchmarks! { target_details.deposit(staking_amount, capacity_amount); capacity_details.deposit(&staking_amount, &capacity_amount); - Capacity::::set_staking_account(&caller.clone(), &staking_account); + let _ = Capacity::::set_staking_account_and_lock(&caller.clone(), &staking_account); Capacity::::set_target_details_for(&caller.clone(), target, target_details); Capacity::::set_capacity_for(target, capacity_details); + // fill up unlock chunks to max bound - 1 + let count = T::MaxUnlockingChunks::get()-1; + let mut unlocking: UnlockChunkList = BoundedVec::default(); + for _i in 0..count { + let unlock_chunk: UnlockChunk, T::EpochNumber> = UnlockChunk { value: 1u32.into(), thaw_at: 3u32.into() }; + assert_ok!(unlocking.try_push(unlock_chunk)); + } + UnstakeUnlocks::::set(&caller, Some(unlocking)); + + }: _ (RawOrigin::Signed(caller.clone()), target, unstaking_amount.into()) verify { assert_last_event::(Event::::UnStaked {account: caller, target: target, amount: unstaking_amount.into(), capacity: Capacity::::calculate_capacity_reduction(unstaking_amount.into(), staking_amount, capacity_amount) }.into()); diff --git a/pallets/capacity/src/lib.rs b/pallets/capacity/src/lib.rs index 61640341a5..8444e6da46 100644 --- a/pallets/capacity/src/lib.rs +++ b/pallets/capacity/src/lib.rs @@ -49,7 +49,6 @@ )] use frame_support::{ - dispatch::DispatchResult, ensure, traits::{Currency, Get, Hooks, LockIdentifier, LockableCurrency, WithdrawReasons}, weights::{constants::RocksDbWeight, Weight}, @@ -81,6 +80,8 @@ mod benchmarking; #[cfg(test)] mod tests; +/// storage migrations +pub mod migration; pub mod weights; type BalanceOf = @@ -88,13 +89,20 @@ type BalanceOf = const STAKING_ID: LockIdentifier = *b"netstkng"; use frame_system::pallet_prelude::*; + #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::{pallet_prelude::*, Twox64Concat}; + use frame_support::{ + pallet_prelude::{StorageVersion, *}, + Twox64Concat, + }; use sp_runtime::traits::{AtLeast32BitUnsigned, MaybeDisplay}; + /// the storage version for this pallet + pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); + #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. @@ -154,11 +162,11 @@ pub mod pallet { /// Storage for keeping a ledger of staked token amounts for accounts. /// - Keys: AccountId - /// - Value: [`StakingAccountDetails`](types::StakingAccountDetails) + /// - Value: [`StakingDetails`](types::StakingDetails) #[pallet::storage] #[pallet::getter(fn get_staking_account_for)] pub type StakingAccountLedger = - StorageMap<_, Twox64Concat, T::AccountId, StakingAccountDetails>; + StorageMap<_, Twox64Concat, T::AccountId, StakingDetails>; /// Storage to record how many tokens were targeted to an MSA. /// - Keys: AccountId, MSA Id @@ -206,9 +214,15 @@ pub mod pallet { pub type EpochLength = StorageValue<_, BlockNumberFor, ValueQuery, EpochLengthDefault>; + #[pallet::storage] + #[pallet::getter(fn get_unstake_unlocking_for)] + pub type UnstakeUnlocks = + StorageMap<_, Twox64Concat, T::AccountId, UnlockChunkList>; + // Simple declaration of the `Pallet` type. It is placeholder we use to implement traits and // method. #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); #[pallet::event] @@ -288,6 +302,8 @@ pub mod pallet { MaxEpochLengthExceeded, /// Staker is attempting to stake an amount that leaves a token balance below the minimum amount. BalanceTooLowtoStake, + /// None of the token amounts in UnlockChunks has thawed yet. + NoThawedTokenAvailable, } #[pallet::hooks] @@ -335,25 +351,17 @@ pub mod pallet { Ok(()) } - /// removes all thawed UnlockChunks from caller's StakingAccount and unlocks the sum of the thawed values + /// Removes all thawed UnlockChunks from caller's UnstakeUnlocks and unlocks the sum of the thawed values /// in the caller's token account. /// /// ### Errors - /// - Returns `Error::NotAStakingAccount` if no StakingAccountDetails are found for `origin`. - /// - Returns `Error::NoUnstakedTokensAvailable` if the account has no unstaking chunks or none are thawed. + /// - Returns `Error::NoUnstakedTokensAvailable` if the account has no unstaking chunks. + /// - Returns `Error::NoThawedTokenAvailable` if there are unstaking chunks, but none are thawed. #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::withdraw_unstaked())] pub fn withdraw_unstaked(origin: OriginFor) -> DispatchResult { let staker = ensure_signed(origin)?; - - let mut staking_account = - Self::get_staking_account_for(&staker).ok_or(Error::::NotAStakingAccount)?; - - let current_epoch = Self::get_current_epoch(); - let amount_withdrawn = staking_account.reap_thawed(current_epoch); - ensure!(!amount_withdrawn.is_zero(), Error::::NoUnstakedTokensAvailable); - - Self::update_or_delete_staking_account(&staker, &mut staking_account); + let amount_withdrawn = Self::do_withdraw_unstaked(&staker)?; Self::deposit_event(Event::::StakeWithdrawn { account: staker, amount: amount_withdrawn, @@ -367,8 +375,9 @@ pub mod pallet { /// - Returns `Error::UnstakedAmountIsZero` if `amount` is not greater than zero. /// - Returns `Error::MaxUnlockingChunksExceeded` if attempting to unlock more times than config::MaxUnlockingChunks. /// - Returns `Error::AmountToUnstakeExceedsAmountStaked` if `amount` exceeds the amount currently staked. - /// - Returns `Error::InvalidTarget` if `target` is not a valid staking target - /// - Returns `Error:: NotAStakingAccount` if `origin` has nothing staked + /// - Returns `Error::InvalidTarget` if `target` is not a valid staking target (not a Provider) + /// - Returns `Error:: NotAStakingAccount` if `origin` has nothing staked at all + /// - Returns `Error::StakerTargetRelationshipNotFound` if `origin` has nothing staked to `target` #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::unstake())] pub fn unstake( @@ -381,6 +390,8 @@ pub mod pallet { ensure!(requested_amount > Zero::zero(), Error::::UnstakedAmountIsZero); let actual_amount = Self::decrease_active_staking_balance(&unstaker, requested_amount)?; + Self::add_unlock_chunk(&unstaker, actual_amount)?; + let capacity_reduction = Self::reduce_capacity(&unstaker, target, actual_amount)?; Self::deposit_event(Event::UnStaked { @@ -426,12 +437,12 @@ impl Pallet { staker: &T::AccountId, target: MessageSourceId, amount: BalanceOf, - ) -> Result<(StakingAccountDetails, BalanceOf), DispatchError> { + ) -> Result<(StakingDetails, BalanceOf), DispatchError> { ensure!(amount > Zero::zero(), Error::::ZeroAmountNotAllowed); ensure!(T::TargetValidator::validate(target), Error::::InvalidTarget); let staking_account = Self::get_staking_account_for(&staker).unwrap_or_default(); - let stakable_amount = staking_account.get_stakable_amount_for(&staker, amount); + let stakable_amount = Self::get_stakable_amount_for(&staker, amount); ensure!(stakable_amount > Zero::zero(), Error::::BalanceTooLowtoStake); @@ -452,7 +463,7 @@ impl Pallet { /// Additionally, it issues Capacity to the MSA target. fn increase_stake_and_issue_capacity( staker: &T::AccountId, - staking_account: &mut StakingAccountDetails, + staking_account: &mut StakingDetails, target: MessageSourceId, amount: BalanceOf, ) -> Result, DispatchError> { @@ -465,34 +476,34 @@ impl Pallet { let mut capacity_details = Self::get_capacity_for(target).unwrap_or_default(); capacity_details.deposit(&amount, &capacity).ok_or(ArithmeticError::Overflow)?; - Self::set_staking_account(&staker, staking_account); + Self::set_staking_account_and_lock(&staker, staking_account)?; + Self::set_target_details_for(&staker, target, target_details); Self::set_capacity_for(target, capacity_details); Ok(capacity) } - /// Sets staking account details. - fn set_staking_account(staker: &T::AccountId, staking_account: &StakingAccountDetails) { - T::Currency::set_lock(STAKING_ID, &staker, staking_account.total, WithdrawReasons::all()); - StakingAccountLedger::::insert(staker, staking_account); - } - - /// Deletes staking account details - fn delete_staking_account(staker: &T::AccountId) { - T::Currency::remove_lock(STAKING_ID, &staker); - StakingAccountLedger::::remove(&staker); + /// Sets staking account details after a deposit + fn set_staking_account_and_lock( + staker: &T::AccountId, + staking_account: &StakingDetails, + ) -> Result<(), DispatchError> { + let unlocks = Self::get_unstake_unlocking_for(staker).unwrap_or_default(); + let total_to_lock: BalanceOf = staking_account + .active + .checked_add(&unlock_chunks_total::(&unlocks)) + .ok_or(ArithmeticError::Overflow)?; + T::Currency::set_lock(STAKING_ID, &staker, total_to_lock, WithdrawReasons::all()); + Self::set_staking_account(staker, staking_account); + Ok(()) } - /// If the staking account total is zero we reap storage, otherwise set the account to the new details. - fn update_or_delete_staking_account( - staker: &T::AccountId, - staking_account: &StakingAccountDetails, - ) { - if staking_account.total.is_zero() { - Self::delete_staking_account(&staker); + fn set_staking_account(staker: &T::AccountId, staking_account: &StakingDetails) { + if staking_account.active.is_zero() { + StakingAccountLedger::::set(staker, None); } else { - Self::set_staking_account(&staker, &staking_account) + StakingAccountLedger::::insert(staker, staking_account); } } @@ -513,7 +524,7 @@ impl Pallet { CapacityLedger::::insert(target, capacity_details); } - /// Decrease a staking account's active token and create an unlocking chunk to be thawed at some future block. + /// Decrease a staking account's active token. fn decrease_active_staking_balance( unstaker: &T::AccountId, amount: BalanceOf, @@ -522,15 +533,67 @@ impl Pallet { Self::get_staking_account_for(unstaker).ok_or(Error::::NotAStakingAccount)?; ensure!(amount <= staking_account.active, Error::::AmountToUnstakeExceedsAmountStaked); + let actual_unstaked_amount = staking_account.withdraw(amount)?; + Self::set_staking_account(unstaker, &staking_account); + Ok(actual_unstaked_amount) + } + + fn add_unlock_chunk( + unstaker: &T::AccountId, + actual_unstaked_amount: BalanceOf, + ) -> Result<(), DispatchError> { let current_epoch: T::EpochNumber = Self::get_current_epoch(); let thaw_at = current_epoch.saturating_add(T::EpochNumber::from(T::UnstakingThawPeriod::get())); + let mut unlocks = Self::get_unstake_unlocking_for(unstaker).unwrap_or_default(); + + let unlock_chunk: UnlockChunk, T::EpochNumber> = + UnlockChunk { value: actual_unstaked_amount, thaw_at }; + unlocks + .try_push(unlock_chunk) + .map_err(|_| Error::::MaxUnlockingChunksExceeded)?; - let unstake_result = staking_account.withdraw(amount, thaw_at)?; + UnstakeUnlocks::::set(unstaker, Some(unlocks)); + Ok(()) + } - Self::set_staking_account(&unstaker, &staking_account); + // Calculates a stakable amount from a proposed amount. + pub(crate) fn get_stakable_amount_for( + staker: &T::AccountId, + proposed_amount: BalanceOf, + ) -> BalanceOf { + let account_balance = T::Currency::free_balance(&staker); + account_balance + .saturating_sub(T::MinimumTokenBalance::get()) + .min(proposed_amount) + } - Ok(unstake_result) + pub(crate) fn do_withdraw_unstaked( + staker: &T::AccountId, + ) -> Result, DispatchError> { + let current_epoch = Self::get_current_epoch(); + let mut total_unlocking: BalanceOf = Zero::zero(); + + let mut unlocks = + Self::get_unstake_unlocking_for(staker).ok_or(Error::::NoUnstakedTokensAvailable)?; + let amount_withdrawn = unlock_chunks_reap_thawed::(&mut unlocks, current_epoch); + ensure!(!amount_withdrawn.is_zero(), Error::::NoThawedTokenAvailable); + + if unlocks.is_empty() { + UnstakeUnlocks::::set(staker, None); + } else { + total_unlocking = unlock_chunks_total::(&unlocks); + UnstakeUnlocks::::set(staker, Some(unlocks)); + } + + let staking_account = Self::get_staking_account_for(staker).unwrap_or_default(); + let total_locked = staking_account.active.saturating_add(total_unlocking); + if total_locked.is_zero() { + T::Currency::remove_lock(STAKING_ID, &staker); + } else { + T::Currency::set_lock(STAKING_ID, &staker, total_locked, WithdrawReasons::all()); + } + Ok(amount_withdrawn) } /// Reduce available capacity of target and return the amount of capacity reduction. diff --git a/pallets/capacity/src/migration/mod.rs b/pallets/capacity/src/migration/mod.rs new file mode 100644 index 0000000000..c34354a101 --- /dev/null +++ b/pallets/capacity/src/migration/mod.rs @@ -0,0 +1,2 @@ +/// migrations to v2 +pub mod v2; diff --git a/pallets/capacity/src/migration/v2.rs b/pallets/capacity/src/migration/v2.rs new file mode 100644 index 0000000000..943bd6fa5b --- /dev/null +++ b/pallets/capacity/src/migration/v2.rs @@ -0,0 +1,115 @@ +use crate::{ + types::{StakingDetails, UnlockChunk}, + BalanceOf, Config, Pallet, StakingAccountLedger, StakingType, UnlockChunkList, UnstakeUnlocks, +}; +use frame_support::{ + pallet_prelude::{GetStorageVersion, Weight}, + traits::{Get, OnRuntimeUpgrade, StorageVersion}, +}; + +const LOG_TARGET: &str = "runtime::capacity"; + +#[cfg(feature = "try-runtime")] +use sp_std::{fmt::Debug, vec::Vec}; + +/// Only contains V1 storage format +pub mod v1 { + use super::*; + use frame_support::{storage_alias, BoundedVec, Twox64Concat}; + use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; + use scale_info::TypeInfo; + + #[derive(Default, Encode, Decode, PartialEq, Debug, TypeInfo, Eq, MaxEncodedLen)] + /// Old StakingAccountDetails struct + pub struct StakingAccountDetails { + /// The amount a Staker has staked, minus the sum of all tokens in `unlocking`. + pub active: BalanceOf, + /// The total amount of tokens in `active` and `unlocking` + pub total: BalanceOf, + /// Unstaked balances that are thawing or awaiting withdrawal. + pub unlocking: BoundedVec< + UnlockChunk, T::EpochNumber>, + ::MaxUnlockingChunks, + >, + } + + #[storage_alias] + /// alias to StakingAccountLedger storage + pub(crate) type StakingAccountLedger = StorageMap< + Pallet, + Twox64Concat, + ::AccountId, + StakingAccountDetails, + >; +} + +/// migrate StakingAccountLedger to use new StakingDetails +pub fn migrate_to_v2() -> Weight { + let on_chain_version = Pallet::::on_chain_storage_version(); // 1r + + if on_chain_version.lt(&2) { + log::info!(target: LOG_TARGET, "🔄 StakingAccountLedger migration started"); + let mut maybe_count = 0u32; + StakingAccountLedger::::translate( + |key: ::AccountId, + old_details: v1::StakingAccountDetails| { + let new_account: StakingDetails = StakingDetails { + active: old_details.active, + staking_type: StakingType::MaximumCapacity, + }; + let new_unlocks: UnlockChunkList = old_details.unlocking; + UnstakeUnlocks::::insert(key, new_unlocks); + maybe_count += 1; + log::info!(target: LOG_TARGET,"migrated {:?}", maybe_count); + Some(new_account) + }, + ); + StorageVersion::new(2).put::>(); // 1 w + let reads = (maybe_count + 1) as u64; + let writes = (maybe_count * 2 + 1) as u64; + log::info!(target: LOG_TARGET, "🔄 migration finished"); + let weight = T::DbWeight::get().reads_writes(reads, writes); + log::info!(target: LOG_TARGET, "Migration calculated weight = {:?}", weight); + weight + } else { + // storage was already migrated. + log::info!(target: LOG_TARGET, "Old StorageAccountLedger migration attempted to run. Please remove"); + T::DbWeight::get().reads(1) + } +} +/// The OnRuntimeUpgrade implementation for this storage migration +pub struct MigrateToV2(sp_std::marker::PhantomData); +impl OnRuntimeUpgrade for MigrateToV2 { + fn on_runtime_upgrade() -> Weight { + migrate_to_v2::() + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + use frame_support::storage::generator::StorageMap; + use parity_scale_codec::Encode; + let pallet_prefix = v1::StakingAccountLedger::::module_prefix(); + let storage_prefix = v1::StakingAccountLedger::::storage_prefix(); + assert_eq!(&b"Capacity"[..], pallet_prefix); + assert_eq!(&b"StakingAccountLedger"[..], storage_prefix); + log::info!(target: LOG_TARGET, "Running pre_upgrade..."); + + let count = v1::StakingAccountLedger::::iter().count() as u32; + log::info!(target: LOG_TARGET, "Finish pre_upgrade for {:?} records", count); + Ok(count.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + use parity_scale_codec::Decode; + let pre_upgrade_count: u32 = Decode::decode(&mut state.as_slice()).unwrap_or_default(); + let on_chain_version = Pallet::::on_chain_storage_version(); + + assert_eq!(on_chain_version, crate::pallet::STORAGE_VERSION); + assert_eq!(pre_upgrade_count as usize, StakingAccountLedger::::iter().count()); + assert_eq!(pre_upgrade_count as usize, UnstakeUnlocks::::iter().count()); + + log::info!(target: LOG_TARGET, "✅ migration post_upgrade checks passed"); + Ok(()) + } +} diff --git a/pallets/capacity/src/tests/migrate_v2_tests.rs b/pallets/capacity/src/tests/migrate_v2_tests.rs new file mode 100644 index 0000000000..ce90dc83c3 --- /dev/null +++ b/pallets/capacity/src/tests/migrate_v2_tests.rs @@ -0,0 +1,56 @@ +#[cfg(test)] +mod test { + use crate::{ + migration::{ + v2::v1::{ + StakingAccountDetails as OldStakingAccountDetails, + StakingAccountLedger as OldStakingAccountLedger, + }, + *, + }, + tests::mock::*, + types::*, + BalanceOf, Config, StakingAccountLedger, + StakingType::MaximumCapacity, + UnstakeUnlocks, + }; + use frame_support::{traits::StorageVersion, BoundedVec}; + + #[test] + #[allow(deprecated)] + fn test_v1_to_v2_works() { + new_test_ext().execute_with(|| { + StorageVersion::new(1).put::(); + for i in 0..3u32 { + let storage_key: ::AccountId = i.into(); + let unlocks: BoundedVec< + UnlockChunk, ::EpochNumber>, + ::MaxUnlockingChunks, + > = BoundedVec::try_from(vec![ + UnlockChunk { value: i as u64, thaw_at: i + 10 }, + UnlockChunk { value: (i + 1) as u64, thaw_at: i + 20 }, + UnlockChunk { value: (i + 2) as u64, thaw_at: i + 30 }, + ]) + .unwrap_or_default(); + + let old_record = + OldStakingAccountDetails:: { active: 3, total: 5, unlocking: unlocks }; + OldStakingAccountLedger::::insert(storage_key, old_record); + } + + assert_eq!(OldStakingAccountLedger::::iter().count(), 3); + + let _w = v2::migrate_to_v2::(); + + assert_eq!(StakingAccountLedger::::iter().count(), 3); + assert_eq!(UnstakeUnlocks::::iter().count(), 3); + + // check that this is really the new type + let last_account: StakingDetails = Capacity::get_staking_account_for(2).unwrap(); + assert_eq!(last_account.staking_type, MaximumCapacity); + + let last_unlocks = Capacity::get_unstake_unlocking_for(2).unwrap(); + assert_eq!(9u64, unlock_chunks_total::(&last_unlocks)); + }) + } +} diff --git a/pallets/capacity/src/tests/mod.rs b/pallets/capacity/src/tests/mod.rs index afef352dc8..871e227ae0 100644 --- a/pallets/capacity/src/tests/mod.rs +++ b/pallets/capacity/src/tests/mod.rs @@ -1,5 +1,6 @@ pub mod capacity_details_tests; pub mod epochs_tests; +mod migrate_v2_tests; pub mod mock; pub mod other_tests; pub mod replenishment_tests; @@ -7,6 +8,7 @@ pub mod stake_and_deposit_tests; pub mod staking_account_details_tests; pub mod staking_target_details_tests; pub mod testing_utils; +mod unlock_chunks_tests; pub mod unstaking_tests; pub mod withdraw_unstaked_tests; pub mod withdrawal_tests; diff --git a/pallets/capacity/src/tests/other_tests.rs b/pallets/capacity/src/tests/other_tests.rs index 7a32c1a87c..9c559f8aef 100644 --- a/pallets/capacity/src/tests/other_tests.rs +++ b/pallets/capacity/src/tests/other_tests.rs @@ -1,12 +1,15 @@ -use frame_support::traits::{Currency, Get}; +use frame_support::{ + assert_ok, + traits::{Currency, Get}, +}; use frame_system::pallet_prelude::BlockNumberFor; use sp_runtime::traits::Zero; use common_primitives::{capacity::Nontransferable, msa::MessageSourceId}; use crate::{ - BalanceOf, CapacityDetails, Config, CurrentEpoch, CurrentEpochInfo, EpochInfo, - StakingAccountDetails, StakingTargetDetails, + BalanceOf, CapacityDetails, Config, CurrentEpoch, CurrentEpochInfo, EpochInfo, StakingDetails, + StakingTargetDetails, }; use super::{mock::*, testing_utils::*}; @@ -64,13 +67,13 @@ fn start_new_epoch_works() { } #[test] -fn set_staking_account_is_succesful() { +fn set_staking_account_is_successful() { new_test_ext().execute_with(|| { let staker = 100; - let mut staking_account = StakingAccountDetails::::default(); + let mut staking_account = StakingDetails::::default(); staking_account.deposit(55); - Capacity::set_staking_account(&staker, &staking_account); + assert_ok!(Capacity::set_staking_account_and_lock(&staker, &staking_account)); assert_eq!(Balances::locks(&staker)[0].amount, 55); }); diff --git a/pallets/capacity/src/tests/stake_and_deposit_tests.rs b/pallets/capacity/src/tests/stake_and_deposit_tests.rs index 95e5b94c8d..b91039730e 100644 --- a/pallets/capacity/src/tests/stake_and_deposit_tests.rs +++ b/pallets/capacity/src/tests/stake_and_deposit_tests.rs @@ -1,5 +1,5 @@ use super::{mock::*, testing_utils::*}; -use crate::{BalanceOf, CapacityDetails, Error, Event, StakingAccountDetails}; +use crate::{BalanceOf, CapacityDetails, Error, Event, StakingDetails}; use common_primitives::{capacity::Nontransferable, msa::MessageSourceId}; use frame_support::{assert_noop, assert_ok, traits::WithdrawReasons}; use sp_runtime::ArithmeticError; @@ -15,9 +15,7 @@ fn stake_works() { assert_ok!(Capacity::stake(RuntimeOrigin::signed(account), target, amount)); // Check that StakingAccountLedger is updated. - assert_eq!(Capacity::get_staking_account_for(account).unwrap().total, 50); assert_eq!(Capacity::get_staking_account_for(account).unwrap().active, 50); - assert_eq!(Capacity::get_staking_account_for(account).unwrap().unlocking.len(), 0); // Check that StakingTargetLedger is updated. assert_eq!(Capacity::get_target_for(account, target).unwrap().amount, 50); @@ -115,9 +113,7 @@ fn stake_increase_stake_amount_works() { assert_ok!(Capacity::stake(RuntimeOrigin::signed(account), target, additional_amount)); // Check that StakingAccountLedger is updated. - assert_eq!(Capacity::get_staking_account_for(account).unwrap().total, 150); assert_eq!(Capacity::get_staking_account_for(account).unwrap().active, 150); - assert_eq!(Capacity::get_staking_account_for(account).unwrap().unlocking.len(), 0); // Check that StakingTargetLedger is updated. assert_eq!(Capacity::get_target_for(account, target).unwrap().amount, 150); @@ -151,9 +147,7 @@ fn stake_multiple_accounts_can_stake_to_the_same_target() { assert_ok!(Capacity::stake(RuntimeOrigin::signed(account_1), target, stake_amount_1)); // Check that StakingAccountLedger is updated. - assert_eq!(Capacity::get_staking_account_for(account_1).unwrap().total, 50); assert_eq!(Capacity::get_staking_account_for(account_1).unwrap().active, 50); - assert_eq!(Capacity::get_staking_account_for(account_1).unwrap().unlocking.len(), 0); // Check that StakingTargetLedger is updated. assert_eq!(Capacity::get_target_for(account_1, target).unwrap().amount, 50); @@ -175,9 +169,7 @@ fn stake_multiple_accounts_can_stake_to_the_same_target() { assert_ok!(Capacity::stake(RuntimeOrigin::signed(account_2), target, stake_amount_2)); // Check that StakingAccountLedger is updated. - assert_eq!(Capacity::get_staking_account_for(account_2).unwrap().total, 100); assert_eq!(Capacity::get_staking_account_for(account_2).unwrap().active, 100); - assert_eq!(Capacity::get_staking_account_for(account_2).unwrap().unlocking.len(), 0); // Check that StakingTargetLedger is updated. assert_eq!(Capacity::get_target_for(account_2, target).unwrap().amount, 100); @@ -204,7 +196,6 @@ fn stake_an_account_can_stake_to_multiple_targets() { let amount_2 = 200; assert_ok!(Capacity::stake(RuntimeOrigin::signed(account), target_1, amount_1)); - assert_eq!(Capacity::get_staking_account_for(account).unwrap().total, amount_1); assert_ok!(Capacity::set_epoch_length(RuntimeOrigin::root(), 10)); @@ -213,9 +204,7 @@ fn stake_an_account_can_stake_to_multiple_targets() { assert_ok!(Capacity::stake(RuntimeOrigin::signed(account), target_2, amount_2)); // Check that StakingAccountLedger is updated. - assert_eq!(Capacity::get_staking_account_for(account).unwrap().total, 300); assert_eq!(Capacity::get_staking_account_for(account).unwrap().active, 300); - assert_eq!(Capacity::get_staking_account_for(account).unwrap().unlocking.len(), 0); // Check that StakingTargetLedger is updated for target 1. assert_eq!(Capacity::get_target_for(account, target_1).unwrap().amount, 100); @@ -249,9 +238,7 @@ fn stake_when_staking_amount_is_greater_than_free_balance_it_stakes_maximum() { assert_ok!(Capacity::stake(RuntimeOrigin::signed(account), target, amount)); // Check that StakingAccountLedger is updated. - assert_eq!(Capacity::get_staking_account_for(account).unwrap().total, 190); assert_eq!(Capacity::get_staking_account_for(account).unwrap().active, 190); - assert_eq!(Capacity::get_staking_account_for(account).unwrap().unlocking.len(), 0); // Check that StakingTargetLedger is updated. assert_eq!(Capacity::get_target_for(account, target).unwrap().amount, 190); @@ -264,6 +251,17 @@ fn stake_when_staking_amount_is_greater_than_free_balance_it_stakes_maximum() { }); } +#[test] +fn get_stakable_amount_for_works() { + new_test_ext().execute_with(|| { + let account = 200; + // An amount greater than the free balance + let amount = 230; + let res: u64 = Capacity::get_stakable_amount_for(&account, amount); + assert_eq!(res, 190); + }) +} + #[test] fn stake_when_staking_amount_is_less_than_min_token_balance_it_errors() { new_test_ext().execute_with(|| { @@ -354,7 +352,7 @@ fn ensure_can_stake_is_successful() { let amount = 10; register_provider(target, String::from("Foo")); - let staking_details = StakingAccountDetails::::default(); + let staking_details = StakingDetails::::default(); assert_ok!( Capacity::ensure_can_stake(&account, target, amount), (staking_details, BalanceOf::::from(10u64)) @@ -368,7 +366,7 @@ fn increase_stake_and_issue_capacity_is_successful() { let staker = 10_000; // has 10_000 token let target: MessageSourceId = 1; let amount = 550; - let mut staking_account = StakingAccountDetails::::default(); + let mut staking_account = StakingDetails::::default(); assert_ok!(Capacity::increase_stake_and_issue_capacity( &staker, @@ -377,9 +375,7 @@ fn increase_stake_and_issue_capacity_is_successful() { amount )); - assert_eq!(staking_account.total, amount); assert_eq!(staking_account.active, amount); - assert_eq!(staking_account.unlocking.len(), 0); let capacity_details = Capacity::get_capacity_for(&target).unwrap(); @@ -394,6 +390,26 @@ fn increase_stake_and_issue_capacity_is_successful() { }); } +#[test] +fn stake_when_there_are_unlocks_sets_lock_correctly() { + new_test_ext().execute_with(|| { + let staker = 600; + let target1 = 2; + let target2 = 3; + register_provider(target1, String::from("target1")); + register_provider(target2, String::from("target2")); + assert_ok!(Capacity::stake(RuntimeOrigin::signed(staker), target1, 20)); + + assert_ok!(Capacity::unstake(RuntimeOrigin::signed(staker), target1, 5)); + + assert_ok!(Capacity::stake(RuntimeOrigin::signed(staker), target2, 20)); + + // should all still be locked. + assert_eq!(Balances::locks(&staker)[0].amount, 40); + assert_eq!(Balances::locks(&staker)[0].reasons, WithdrawReasons::all().into()); + }) +} + #[test] fn impl_deposit_is_successful() { new_test_ext().execute_with(|| { diff --git a/pallets/capacity/src/tests/staking_account_details_tests.rs b/pallets/capacity/src/tests/staking_account_details_tests.rs index e49ed9082b..eed5e1f158 100644 --- a/pallets/capacity/src/tests/staking_account_details_tests.rs +++ b/pallets/capacity/src/tests/staking_account_details_tests.rs @@ -1,110 +1,50 @@ use super::mock::*; use crate::*; -use frame_support::assert_err; -use sp_core::bounded::BoundedVec; - -type UnlockBVec = BoundedVec< - UnlockChunk, ::EpochNumber>, - ::MaxUnlockingChunks, ->; - #[test] -fn staking_account_details_withdraw_reduces_active_staking_balance_and_creates_unlock_chunk() { - let mut staking_account_details = StakingAccountDetails:: { +fn staking_account_details_withdraw_reduces_active_staking_balance() { + let mut staking_account_details = StakingDetails:: { active: BalanceOf::::from(15u64), - total: BalanceOf::::from(15u64), - unlocking: BoundedVec::default(), + staking_type: StakingType::MaximumCapacity, }; - assert_eq!(Ok(3u64), staking_account_details.withdraw(3, 3)); - let expected_chunks: UnlockBVec = - BoundedVec::try_from(vec![UnlockChunk { value: 3u64, thaw_at: 3u32 }]).unwrap(); + assert_eq!(Ok(3u64), staking_account_details.withdraw(3)); assert_eq!( staking_account_details, - StakingAccountDetails:: { + StakingDetails:: { active: BalanceOf::::from(12u64), - total: BalanceOf::::from(15u64), - unlocking: expected_chunks, + staking_type: StakingType::MaximumCapacity, } ) } #[test] fn staking_account_details_withdraw_goes_to_zero_when_result_below_minimum() { - let mut staking_account_details = StakingAccountDetails:: { + let mut staking_account_details = StakingDetails:: { active: BalanceOf::::from(10u64), - total: BalanceOf::::from(10u64), - unlocking: BoundedVec::default(), + staking_type: StakingType::MaximumCapacity, }; - assert_eq!(Ok(10u64), staking_account_details.withdraw(6, 3)); + assert_eq!(Ok(10u64), staking_account_details.withdraw(6)); assert_eq!(0u64, staking_account_details.active); - assert_eq!(10u64, staking_account_details.total); staking_account_details.deposit(10); - assert_eq!(Ok(10u64), staking_account_details.withdraw(9, 3)); + assert_eq!(Ok(10u64), staking_account_details.withdraw(9)); assert_eq!(0u64, staking_account_details.active); staking_account_details.deposit(10); - assert_eq!(Ok(10u64), staking_account_details.withdraw(11, 3)); + assert_eq!(Ok(10u64), staking_account_details.withdraw(11)); assert_eq!(0u64, staking_account_details.active); } -#[test] -fn staking_account_details_withdraw_returns_err_when_too_many_chunks() { - let maximum_chunks: UnlockBVec = BoundedVec::try_from(vec![ - UnlockChunk { value: 1u64, thaw_at: 3u32 }, - UnlockChunk { value: 1u64, thaw_at: 3u32 }, - UnlockChunk { value: 1u64, thaw_at: 3u32 }, - UnlockChunk { value: 1u64, thaw_at: 3u32 }, - ]) - .unwrap(); - - let mut staking_account_details = StakingAccountDetails:: { - active: BalanceOf::::from(10u64), - total: BalanceOf::::from(10u64), - unlocking: maximum_chunks, - }; - - assert_err!(staking_account_details.withdraw(6, 3), Error::::MaxUnlockingChunksExceeded); - assert_eq!(10u64, staking_account_details.active); - assert_eq!(10u64, staking_account_details.total); -} - -#[test] -fn staking_account_details_reap_thawed_happy_path() { - let mut staking_account = StakingAccountDetails::::default(); - staking_account.deposit(10); - - // 10 token total, 6 token unstaked - let new_unlocks: Vec<(u32, u32)> = vec![(1u32, 2u32), (2u32, 3u32), (3u32, 4u32)]; - assert_eq!(true, staking_account.set_unlock_chunks(&new_unlocks)); - assert_eq!(10, staking_account.total); - assert_eq!(3, staking_account.unlocking.len()); - - // At epoch 3, the first two chunks should be thawed. - assert_eq!(3u64, staking_account.reap_thawed(3u32)); - assert_eq!(1, staking_account.unlocking.len()); - // ...leaving 10-3 = 7 total in staking - assert_eq!(7, staking_account.total); - - // At epoch 5, all unstaking is done. - assert_eq!(3u64, staking_account.reap_thawed(5u32)); - assert_eq!(0, staking_account.unlocking.len()); - // ...leaving 7-3 = 4 total - assert_eq!(4, staking_account.total); -} - #[test] fn impl_staking_account_details_increase_by() { - let mut staking_account = StakingAccountDetails::::default(); + let mut staking_account = StakingDetails::::default(); assert_eq!(staking_account.deposit(10), Some(())); assert_eq!( staking_account, - StakingAccountDetails:: { + StakingDetails:: { active: BalanceOf::::from(10u64), - total: BalanceOf::::from(10u64), - unlocking: BoundedVec::default(), + staking_type: StakingType::MaximumCapacity, } ) } @@ -112,28 +52,10 @@ fn impl_staking_account_details_increase_by() { #[test] fn impl_staking_account_details_default() { assert_eq!( - StakingAccountDetails::::default(), - StakingAccountDetails:: { + StakingDetails::::default(), + StakingDetails:: { active: BalanceOf::::zero(), - total: BalanceOf::::zero(), - unlocking: BoundedVec::default(), + staking_type: StakingType::MaximumCapacity, }, ); } - -#[test] -fn impl_staking_account_details_get_stakable_amount_for() { - new_test_ext().execute_with(|| { - let account = 200; - let staking_account = StakingAccountDetails::::default(); - - // When staking all of free balance. - assert_eq!(staking_account.get_stakable_amount_for(&account, 10), 10); - - // When staking an amount below free balance. - assert_eq!(staking_account.get_stakable_amount_for(&account, 5), 5); - - // When staking an amount above account free balance. It stakes all of the free balance. - assert_eq!(staking_account.get_stakable_amount_for(&account, 200), 190); - }); -} diff --git a/pallets/capacity/src/tests/unlock_chunks_tests.rs b/pallets/capacity/src/tests/unlock_chunks_tests.rs new file mode 100644 index 0000000000..423e0db3de --- /dev/null +++ b/pallets/capacity/src/tests/unlock_chunks_tests.rs @@ -0,0 +1,38 @@ +use crate::{ + tests::mock::{new_test_ext, Test}, + unlock_chunks_from_vec, unlock_chunks_reap_thawed, unlock_chunks_total, UnlockChunkList, +}; +use sp_runtime::BoundedVec; + +#[test] +fn unlock_chunks_reap_thawed_happy_path() { + new_test_ext().execute_with(|| { + // 10 token total, 6 token unstaked + let new_unlocks: Vec<(u32, u32)> = vec![(1u32, 2u32), (2u32, 3u32), (3u32, 4u32)]; + let mut chunks = unlock_chunks_from_vec::(&new_unlocks); + assert_eq!(3, chunks.len()); + + // At epoch 3, the first two chunks should be thawed. + let reaped = unlock_chunks_reap_thawed::(&mut chunks, 3); + assert_eq!(3, reaped); + assert_eq!(1, chunks.len()); + assert_eq!(3, unlock_chunks_total::(&chunks)); + + // At epoch 5, all unstaking is done. + assert_eq!(3u64, unlock_chunks_reap_thawed::(&mut chunks, 5u32)); + assert_eq!(0, chunks.len()); + + assert_eq!(0u64, unlock_chunks_reap_thawed::(&mut chunks, 5u32)); + }) +} + +#[test] +fn unlock_chunks_total_works() { + new_test_ext().execute_with(|| { + let mut chunks: UnlockChunkList = BoundedVec::default(); + assert_eq!(0u64, unlock_chunks_total::(&chunks)); + let new_unlocks: Vec<(u32, u32)> = vec![(1u32, 2u32), (2u32, 3u32), (3u32, 4u32)]; + chunks = unlock_chunks_from_vec::(&new_unlocks); + assert_eq!(6u64, unlock_chunks_total::(&chunks)); + }) +} diff --git a/pallets/capacity/src/tests/unstaking_tests.rs b/pallets/capacity/src/tests/unstaking_tests.rs index 916e354040..8e930cc141 100644 --- a/pallets/capacity/src/tests/unstaking_tests.rs +++ b/pallets/capacity/src/tests/unstaking_tests.rs @@ -1,6 +1,6 @@ use super::{mock::*, testing_utils::*}; use crate as pallet_capacity; -use crate::{CapacityDetails, StakingAccountDetails, StakingTargetDetails, UnlockChunk}; +use crate::{CapacityDetails, StakingDetails, StakingTargetDetails, StakingType, UnlockChunk}; use common_primitives::msa::MessageSourceId; use frame_support::{assert_noop, assert_ok, traits::Get}; use pallet_capacity::{BalanceOf, Config, Error, Event}; @@ -26,18 +26,19 @@ fn unstake_happy_path() { // Assert that staking account detail values are decremented correctly after unstaking let staking_account_details = Capacity::get_staking_account_for(token_account).unwrap(); - assert_eq!(staking_account_details.unlocking.len(), 1); let expected_unlocking_chunks: BoundedVec< UnlockChunk, ::EpochNumber>, ::MaxUnlockingChunks, > = BoundedVec::try_from(vec![UnlockChunk { value: unstaking_amount, thaw_at: 2u32 }]) .unwrap(); + let unlocking = Capacity::get_unstake_unlocking_for(token_account).unwrap(); + assert_eq!(unlocking, expected_unlocking_chunks); + assert_eq!( - StakingAccountDetails:: { + StakingDetails:: { active: BalanceOf::::from(60u64), - total: BalanceOf::::from(staking_amount), - unlocking: expected_unlocking_chunks, + staking_type: StakingType::MaximumCapacity, }, staking_account_details, ); @@ -109,7 +110,7 @@ fn unstake_errors_max_unlocking_chunks_exceeded() { assert_ok!(Capacity::stake(RuntimeOrigin::signed(token_account), target, staking_amount)); - for _n in 0..::MaxUnlockingChunks::get() { + for _n in 0..::MaxUnlockingChunks::get() { assert_ok!(Capacity::unstake( RuntimeOrigin::signed(token_account), target, @@ -158,3 +159,41 @@ fn unstake_errors_not_a_staking_account() { ); }); } + +#[test] +fn unstaking_everything_reaps_staking_account() { + new_test_ext().execute_with(|| { + let staker = 500; + let target = 1; + let amount = 20; + assert_ok!(Capacity::set_epoch_length(RuntimeOrigin::root(), 10)); + + register_provider(target, String::from("WithdrawUnst")); + assert_ok!(Capacity::stake(RuntimeOrigin::signed(staker), target, amount)); + + run_to_block(1); + // unstake everything + assert_ok!(Capacity::unstake(RuntimeOrigin::signed(staker), target, 20)); + assert_eq!(1, Balances::locks(&staker).len()); + assert_eq!(20u64, Balances::locks(&staker)[0].amount); + + // it should reap the staking account right away + assert!(Capacity::get_staking_account_for(&staker).is_none()); + }) +} + +#[test] +fn unstake_when_not_staking_to_target_errors() { + new_test_ext().execute_with(|| { + let staker = 500; + let target = 1; + let amount = 20; + register_provider(target, String::from("WithdrawUnst")); + + assert_ok!(Capacity::stake(RuntimeOrigin::signed(staker), target, amount)); + assert_noop!( + Capacity::unstake(RuntimeOrigin::signed(staker), 2, 20), + Error::::StakerTargetRelationshipNotFound + ); + }) +} diff --git a/pallets/capacity/src/tests/withdraw_unstaked_tests.rs b/pallets/capacity/src/tests/withdraw_unstaked_tests.rs index c8356b86e7..5e37d86e34 100644 --- a/pallets/capacity/src/tests/withdraw_unstaked_tests.rs +++ b/pallets/capacity/src/tests/withdraw_unstaked_tests.rs @@ -1,39 +1,31 @@ -use super::{mock::*, testing_utils::run_to_block}; -use crate as pallet_capacity; -use crate::StakingAccountDetails; +use super::{ + mock::*, + testing_utils::{register_provider, run_to_block}, +}; +use crate::{ + unlock_chunks_from_vec, CurrentEpoch, CurrentEpochInfo, EpochInfo, Error, Event, UnstakeUnlocks, +}; use frame_support::{assert_noop, assert_ok}; -use pallet_capacity::{BalanceOf, Error, Event}; #[test] fn withdraw_unstaked_happy_path() { new_test_ext().execute_with(|| { - // set up staker and staking account let staker = 500; - // set new unlock chunks using tuples of (value, thaw_at in number of Epochs) - let unlocks: Vec<(u32, u32)> = vec![(1u32, 2u32), (2u32, 3u32), (3u32, 4u32)]; - - // setup_staking_account_for::(staker, staking_amount, &unlocks); - let mut staking_account = StakingAccountDetails::::default(); - - // we have 10 total staked, and 6 of those are unstaking. - staking_account.deposit(10); - assert_eq!(true, staking_account.set_unlock_chunks(&unlocks)); - assert_eq!(10u64, staking_account.total); - Capacity::set_staking_account(&staker, &staking_account.into()); - - let starting_account = Capacity::get_staking_account_for(&staker).unwrap(); - + CurrentEpoch::::set(0); + CurrentEpochInfo::::set(EpochInfo { epoch_start: 0 }); assert_ok!(Capacity::set_epoch_length(RuntimeOrigin::root(), 10)); + // set new unlock chunks using tuples of (value, thaw_at in number of Epochs) + let new_unlocks: Vec<(u32, u32)> = vec![(1u32, 2u32), (2u32, 3u32), (3u32, 4u32)]; + let unlocking = unlock_chunks_from_vec::(&new_unlocks); + UnstakeUnlocks::::set(&staker, Some(unlocking)); + // We want to advance to epoch 3 to unlock the first two sets. run_to_block(31); assert_eq!(3u32, Capacity::get_current_epoch()); assert_ok!(Capacity::withdraw_unstaked(RuntimeOrigin::signed(staker))); - let current_account: StakingAccountDetails = - Capacity::get_staking_account_for(&staker).unwrap(); let expected_reaped_value = 3u64; - assert_eq!(starting_account.total - expected_reaped_value, current_account.total); System::assert_last_event( Event::StakeWithdrawn { account: staker, amount: expected_reaped_value }.into(), ); @@ -44,59 +36,63 @@ fn withdraw_unstaked_happy_path() { fn withdraw_unstaked_correctly_sets_new_lock_state() { new_test_ext().execute_with(|| { let staker = 500; - let mut staking_account = StakingAccountDetails::::default(); - staking_account.deposit(10); + let target = 1; + let amount = 20; + assert_ok!(Capacity::set_epoch_length(RuntimeOrigin::root(), 10)); - // set new unlock chunks using tuples of (value, thaw_at) - let new_unlocks: Vec<(u32, u32)> = vec![(1u32, 2u32), (2u32, 3u32), (3u32, 4u32)]; - assert_eq!(true, staking_account.set_unlock_chunks(&new_unlocks)); + register_provider(target, String::from("WithdrawUnst")); + assert_ok!(Capacity::stake(RuntimeOrigin::signed(staker), target, amount)); - Capacity::set_staking_account(&staker, &staking_account); + run_to_block(1); + assert_ok!(Capacity::unstake(RuntimeOrigin::signed(staker), target, 1)); assert_eq!(1, Balances::locks(&staker).len()); - assert_eq!(10u64, Balances::locks(&staker)[0].amount); + assert_eq!(20u64, Balances::locks(&staker)[0].amount); - assert_ok!(Capacity::set_epoch_length(RuntimeOrigin::root(), 10)); + // thaw period in mock is 2 Epochs * 10 blocks = 20 blocks. + run_to_block(21); + assert_ok!(Capacity::unstake(RuntimeOrigin::signed(staker), target, 2)); + assert_ok!(Capacity::withdraw_unstaked(RuntimeOrigin::signed(staker))); + assert_eq!(1, Balances::locks(&staker).len()); + assert_eq!(19u64, Balances::locks(&staker)[0].amount); - // Epoch length = 10, we want to run to epoch 3 - run_to_block(31); + run_to_block(41); + assert_ok!(Capacity::unstake(RuntimeOrigin::signed(staker), target, 3)); assert_ok!(Capacity::withdraw_unstaked(RuntimeOrigin::signed(staker))); + assert_eq!(1, Balances::locks(&staker).len()); + assert_eq!(17u64, Balances::locks(&staker)[0].amount); + run_to_block(61); + assert_ok!(Capacity::withdraw_unstaked(RuntimeOrigin::signed(staker))); assert_eq!(1, Balances::locks(&staker).len()); - assert_eq!(7u64, Balances::locks(&staker)[0].amount); + assert_eq!(14u64, Balances::locks(&staker)[0].amount); }) } #[test] fn withdraw_unstaked_cleans_up_storage_and_removes_all_locks_if_no_stake_left() { new_test_ext().execute_with(|| { - let mut staking_account = StakingAccountDetails::::default(); - let staking_amount: BalanceOf = 10; - staking_account.deposit(staking_amount); - - // set new unlock chunks using tuples of (value, thaw_at) - let new_unlocks: Vec<(u32, u32)> = vec![(10u32, 2u32)]; - assert_eq!(true, staking_account.set_unlock_chunks(&new_unlocks)); - let staker = 500; - Capacity::set_staking_account(&staker, &staking_account); + let target = 1; + let amount = 20; assert_ok!(Capacity::set_epoch_length(RuntimeOrigin::root(), 10)); - // Epoch Length = 10 and UnstakingThawPeriod = 2 (epochs) - run_to_block(30); - assert_ok!(Capacity::withdraw_unstaked(RuntimeOrigin::signed(staker))); - assert!(Capacity::get_staking_account_for(&staker).is_none()); + register_provider(target, String::from("WithdrawUnst")); + assert_ok!(Capacity::stake(RuntimeOrigin::signed(staker), target, amount)); + run_to_block(1); + // unstake everything + assert_ok!(Capacity::unstake(RuntimeOrigin::signed(staker), target, 20)); + // wait for thaw + run_to_block(21); + assert_ok!(Capacity::withdraw_unstaked(RuntimeOrigin::signed(staker))); assert_eq!(0, Balances::locks(&staker).len()); + assert!(Capacity::get_unstake_unlocking_for(&staker).is_none()); }) } #[test] fn withdraw_unstaked_cannot_withdraw_if_no_unstaking_chunks() { new_test_ext().execute_with(|| { - let staker = 500; - let mut staking_account = StakingAccountDetails::::default(); - staking_account.deposit(10); - Capacity::set_staking_account(&staker, &staking_account); assert_noop!( Capacity::withdraw_unstaked(RuntimeOrigin::signed(500)), Error::::NoUnstakedTokensAvailable @@ -107,29 +103,16 @@ fn withdraw_unstaked_cannot_withdraw_if_no_unstaking_chunks() { fn withdraw_unstaked_cannot_withdraw_if_unstaking_chunks_not_thawed() { new_test_ext().execute_with(|| { let staker = 500; - let mut staking_account = StakingAccountDetails::::default(); - staking_account.deposit(10); - - // set new unlock chunks using tuples of (value, thaw_at) - let new_unlocks: Vec<(u32, u32)> = vec![(1u32, 3u32), (2u32, 40u32), (3u32, 9u32)]; - assert_eq!(true, staking_account.set_unlock_chunks(&new_unlocks)); - - Capacity::set_staking_account(&staker, &staking_account); + let target = 1; + let amount = 10; + assert_ok!(Capacity::set_epoch_length(RuntimeOrigin::root(), 10)); + register_provider(target, String::from("WithdrawUnst")); + assert_ok!(Capacity::stake(RuntimeOrigin::signed(staker), target, amount)); - run_to_block(2); + run_to_block(11); assert_noop!( Capacity::withdraw_unstaked(RuntimeOrigin::signed(500)), Error::::NoUnstakedTokensAvailable ); }) } - -#[test] -fn withdraw_unstaked_error_if_not_a_staking_account() { - new_test_ext().execute_with(|| { - assert_noop!( - Capacity::withdraw_unstaked(RuntimeOrigin::signed(999)), - Error::::NotAStakingAccount - ); - }) -} diff --git a/pallets/capacity/src/types.rs b/pallets/capacity/src/types.rs index 72f5168cb8..80dc35c396 100644 --- a/pallets/capacity/src/types.rs +++ b/pallets/capacity/src/types.rs @@ -1,30 +1,39 @@ //! Types for the Capacity Pallet use super::*; use frame_support::{BoundedVec, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound}; -use log::warn; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_runtime::{ traits::{CheckedAdd, CheckedSub, Saturating, Zero}, RuntimeDebug, }; - #[cfg(any(feature = "runtime-benchmarks", test))] use sp_std::vec::Vec; +#[derive( + Clone, Copy, Debug, Decode, Encode, TypeInfo, Eq, MaxEncodedLen, PartialEq, PartialOrd, +)] +/// The type of staking a given Staking Account is doing. +pub enum StakingType { + /// Staking account targets Providers for capacity only, no token reward + MaximumCapacity, + /// Staking account targets Providers and splits reward between capacity to the Provider + /// and token for the account holder + ProviderBoost, +} + /// The type used for storing information about staking details. #[derive( TypeInfo, RuntimeDebugNoBound, PartialEqNoBound, EqNoBound, Clone, Decode, Encode, MaxEncodedLen, )] #[scale_info(skip_type_params(T))] -pub struct StakingAccountDetails { +pub struct StakingDetails { /// The amount a Staker has staked, minus the sum of all tokens in `unlocking`. pub active: BalanceOf, - /// The total amount of tokens in `active` and `unlocking` - pub total: BalanceOf, - /// Unstaked balances that are thawing or awaiting withdrawal. - pub unlocking: BoundedVec, T::EpochNumber>, T::MaxUnlockingChunks>, + /// The type of staking for this staking account + pub staking_type: StakingType, } + /// The type that is used to record a single request for a number of tokens to be unlocked. #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct UnlockChunk { @@ -34,79 +43,17 @@ pub struct UnlockChunk { pub thaw_at: EpochNumber, } -impl StakingAccountDetails { +impl StakingDetails { /// Increases total and active balances by an amount. pub fn deposit(&mut self, amount: BalanceOf) -> Option<()> { - self.total = amount.checked_add(&self.total)?; self.active = amount.checked_add(&self.active)?; - Some(()) } - /// Calculates a stakable amount from a proposed amount. - pub fn get_stakable_amount_for( - &self, - staker: &T::AccountId, - proposed_amount: BalanceOf, - ) -> BalanceOf { - let account_balance = T::Currency::free_balance(&staker); - let available_staking_balance = account_balance.saturating_sub(self.total); - available_staking_balance - .saturating_sub(T::MinimumTokenBalance::get()) - .min(proposed_amount) - } - - #[cfg(any(feature = "runtime-benchmarks", test))] - #[allow(clippy::unwrap_used)] - /// tmp fn for testing only - /// set unlock chunks with (balance, thaw_at). does not check that the unlock chunks - /// don't exceed total. - /// returns true on success, false on failure (?) - pub fn set_unlock_chunks(&mut self, chunks: &Vec<(u32, u32)>) -> bool { - let result: Vec, ::EpochNumber>> = chunks - .into_iter() - .map(|chunk| UnlockChunk { value: chunk.0.into(), thaw_at: chunk.1.into() }) - .collect(); - self.unlocking = BoundedVec::try_from(result).unwrap(); - self.unlocking.len() == chunks.len() - } - - /// deletes thawed chunks, updates `total`, Caller is responsible for updating free/locked - /// balance on the token account. - /// Returns: the total amount reaped from `unlocking` - pub fn reap_thawed(&mut self, current_epoch: ::EpochNumber) -> BalanceOf { - let mut total_reaped: BalanceOf = 0u32.into(); - self.unlocking.retain(|chunk| { - if current_epoch.ge(&chunk.thaw_at) { - total_reaped = total_reaped.saturating_add(chunk.value); - match self.total.checked_sub(&chunk.value) { - Some(new_total) => self.total = new_total, - None => warn!( - "Underflow when subtracting {:?} from staking total {:?}", - chunk.value, self.total - ), - } - false - } else { - true - } - }); - total_reaped - } - /// Decrease the amount of active stake by an amount and create an UnlockChunk. - pub fn withdraw( - &mut self, - amount: BalanceOf, - thaw_at: T::EpochNumber, - ) -> Result, DispatchError> { - // let's check for an early exit before doing all these calcs - ensure!( - self.unlocking.len() < T::MaxUnlockingChunks::get() as usize, - Error::::MaxUnlockingChunksExceeded - ); - + pub fn withdraw(&mut self, amount: BalanceOf) -> Result, DispatchError> { let current_active = self.active; + let mut new_active = self.active.saturating_sub(amount); let mut actual_unstaked: BalanceOf = amount; @@ -114,21 +61,15 @@ impl StakingAccountDetails { actual_unstaked = current_active; new_active = Zero::zero(); } - let unlock_chunk = UnlockChunk { value: actual_unstaked, thaw_at }; - - // we've already done the check but it's fine, we need to handle possible errors. - self.unlocking - .try_push(unlock_chunk) - .map_err(|_| Error::::MaxUnlockingChunksExceeded)?; self.active = new_active; Ok(actual_unstaked) } } -impl Default for StakingAccountDetails { +impl Default for StakingDetails { fn default() -> Self { - Self { active: Zero::zero(), total: Zero::zero(), unlocking: Default::default() } + Self { active: Zero::zero(), staking_type: StakingType::MaximumCapacity } } } @@ -237,3 +178,48 @@ pub struct EpochInfo { /// The block number when this epoch started. pub epoch_start: BlockNumber, } + +/// A BoundedVec containing UnlockChunks +pub type UnlockChunkList = BoundedVec< + UnlockChunk, ::EpochNumber>, + ::MaxUnlockingChunks, +>; + +/// Computes and returns the total token held in an UnlockChunkList. +pub fn unlock_chunks_total(unlock_chunks: &UnlockChunkList) -> BalanceOf { + unlock_chunks + .iter() + .fold(Zero::zero(), |acc: BalanceOf, chunk| acc.saturating_add(chunk.value)) +} + +/// Deletes thawed chunks +/// Caller is responsible for updating free/locked balance on the token account. +/// Returns: the total amount reaped from `unlocking` +pub fn unlock_chunks_reap_thawed( + unlock_chunks: &mut UnlockChunkList, + current_epoch: ::EpochNumber, +) -> BalanceOf { + let mut total_reaped: BalanceOf = 0u32.into(); + unlock_chunks.retain(|chunk| { + if current_epoch.ge(&chunk.thaw_at) { + total_reaped = total_reaped.saturating_add(chunk.value); + false + } else { + true + } + }); + total_reaped +} +#[cfg(any(feature = "runtime-benchmarks", test))] +#[allow(clippy::unwrap_used)] +/// set unlock chunks with (balance, thaw_at). Does not check BoundedVec limit. +/// returns true on success, false on failure (?) +/// For testing and benchmarks ONLY, note possible panic via BoundedVec::try_from + unwrap +pub fn unlock_chunks_from_vec(chunks: &Vec<(u32, u32)>) -> UnlockChunkList { + let result: Vec, ::EpochNumber>> = chunks + .into_iter() + .map(|chunk| UnlockChunk { value: chunk.0.into(), thaw_at: chunk.1.into() }) + .collect(); + // CAUTION + BoundedVec::try_from(result).unwrap() +} diff --git a/pallets/capacity/src/weights.rs b/pallets/capacity/src/weights.rs index fb7f024db5..c5d7ed84e5 100644 --- a/pallets/capacity/src/weights.rs +++ b/pallets/capacity/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_capacity //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-14, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-30, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `benchmark-runner-44wtw-sz2gt`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! HOSTNAME: `benchmark-runner-44wtw-5slfv`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("frequency-bench"), DB CACHE: 1024 // Executed Command: @@ -62,11 +62,13 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Msa::ProviderToRegistryEntry` (r:1 w:0) /// Proof: `Msa::ProviderToRegistryEntry` (`max_values`: None, `max_size`: Some(33), added: 2508, mode: `MaxEncodedLen`) /// Storage: `Capacity::StakingAccountLedger` (r:1 w:1) - /// Proof: `Capacity::StakingAccountLedger` (`max_values`: None, `max_size`: Some(153), added: 2628, mode: `MaxEncodedLen`) + /// Proof: `Capacity::StakingAccountLedger` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) /// Storage: `Capacity::StakingTargetLedger` (r:1 w:1) /// Proof: `Capacity::StakingTargetLedger` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) /// Storage: `Capacity::CapacityLedger` (r:1 w:1) /// Proof: `Capacity::CapacityLedger` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + /// Storage: `Capacity::UnstakeUnlocks` (r:1 w:0) + /// Proof: `Capacity::UnstakeUnlocks` (`max_values`: None, `max_size`: Some(121), added: 2596, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -75,24 +77,26 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `223` // Estimated: `6249` - // Minimum execution time: 43_154_000 picoseconds. - Weight::from_parts(44_525_000, 6249) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Minimum execution time: 44_240_000 picoseconds. + Weight::from_parts(45_555_000, 6249) + .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } - /// Storage: `Capacity::StakingAccountLedger` (r:1 w:1) - /// Proof: `Capacity::StakingAccountLedger` (`max_values`: None, `max_size`: Some(153), added: 2628, mode: `MaxEncodedLen`) + /// Storage: `Capacity::UnstakeUnlocks` (r:1 w:1) + /// Proof: `Capacity::UnstakeUnlocks` (`max_values`: None, `max_size`: Some(121), added: 2596, mode: `MaxEncodedLen`) + /// Storage: `Capacity::StakingAccountLedger` (r:1 w:0) + /// Proof: `Capacity::StakingAccountLedger` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn withdraw_unstaked() -> Weight { // Proof Size summary in bytes: - // Measured: `339` + // Measured: `285` // Estimated: `6249` - // Minimum execution time: 33_031_000 picoseconds. - Weight::from_parts(34_284_000, 6249) - .saturating_add(T::DbWeight::get().reads(3_u64)) + // Minimum execution time: 28_294_000 picoseconds. + Weight::from_parts(29_114_000, 6249) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Capacity::CurrentEpochInfo` (r:1 w:1) @@ -103,28 +107,26 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `2974` - // Minimum execution time: 3_876_000 picoseconds. - Weight::from_parts(4_036_000, 2974) + // Minimum execution time: 3_912_000 picoseconds. + Weight::from_parts(4_081_000, 2974) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Capacity::StakingAccountLedger` (r:1 w:1) - /// Proof: `Capacity::StakingAccountLedger` (`max_values`: None, `max_size`: Some(153), added: 2628, mode: `MaxEncodedLen`) - /// Storage: `Balances::Locks` (r:1 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Proof: `Capacity::StakingAccountLedger` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Capacity::UnstakeUnlocks` (r:1 w:1) + /// Proof: `Capacity::UnstakeUnlocks` (`max_values`: None, `max_size`: Some(121), added: 2596, mode: `MaxEncodedLen`) /// Storage: `Capacity::StakingTargetLedger` (r:1 w:1) /// Proof: `Capacity::StakingTargetLedger` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) /// Storage: `Capacity::CapacityLedger` (r:1 w:1) /// Proof: `Capacity::CapacityLedger` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn unstake() -> Weight { // Proof Size summary in bytes: - // Measured: `433` - // Estimated: `6249` - // Minimum execution time: 38_465_000 picoseconds. - Weight::from_parts(39_656_000, 6249) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Measured: `271` + // Estimated: `5071` + // Minimum execution time: 25_110_000 picoseconds. + Weight::from_parts(25_886_000, 5071) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: `Capacity::EpochLength` (r:0 w:1) @@ -133,8 +135,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_805_000 picoseconds. - Weight::from_parts(7_569_000, 0) + // Minimum execution time: 6_335_000 picoseconds. + Weight::from_parts(6_662_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -144,11 +146,13 @@ impl WeightInfo for () { /// Storage: `Msa::ProviderToRegistryEntry` (r:1 w:0) /// Proof: `Msa::ProviderToRegistryEntry` (`max_values`: None, `max_size`: Some(33), added: 2508, mode: `MaxEncodedLen`) /// Storage: `Capacity::StakingAccountLedger` (r:1 w:1) - /// Proof: `Capacity::StakingAccountLedger` (`max_values`: None, `max_size`: Some(153), added: 2628, mode: `MaxEncodedLen`) + /// Proof: `Capacity::StakingAccountLedger` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) /// Storage: `Capacity::StakingTargetLedger` (r:1 w:1) /// Proof: `Capacity::StakingTargetLedger` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) /// Storage: `Capacity::CapacityLedger` (r:1 w:1) /// Proof: `Capacity::CapacityLedger` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + /// Storage: `Capacity::UnstakeUnlocks` (r:1 w:0) + /// Proof: `Capacity::UnstakeUnlocks` (`max_values`: None, `max_size`: Some(121), added: 2596, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -157,24 +161,26 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `223` // Estimated: `6249` - // Minimum execution time: 43_154_000 picoseconds. - Weight::from_parts(44_525_000, 6249) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Minimum execution time: 44_240_000 picoseconds. + Weight::from_parts(45_555_000, 6249) + .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } - /// Storage: `Capacity::StakingAccountLedger` (r:1 w:1) - /// Proof: `Capacity::StakingAccountLedger` (`max_values`: None, `max_size`: Some(153), added: 2628, mode: `MaxEncodedLen`) + /// Storage: `Capacity::UnstakeUnlocks` (r:1 w:1) + /// Proof: `Capacity::UnstakeUnlocks` (`max_values`: None, `max_size`: Some(121), added: 2596, mode: `MaxEncodedLen`) + /// Storage: `Capacity::StakingAccountLedger` (r:1 w:0) + /// Proof: `Capacity::StakingAccountLedger` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn withdraw_unstaked() -> Weight { // Proof Size summary in bytes: - // Measured: `339` + // Measured: `285` // Estimated: `6249` - // Minimum execution time: 33_031_000 picoseconds. - Weight::from_parts(34_284_000, 6249) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + // Minimum execution time: 28_294_000 picoseconds. + Weight::from_parts(29_114_000, 6249) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Capacity::CurrentEpochInfo` (r:1 w:1) @@ -185,28 +191,26 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `2974` - // Minimum execution time: 3_876_000 picoseconds. - Weight::from_parts(4_036_000, 2974) + // Minimum execution time: 3_912_000 picoseconds. + Weight::from_parts(4_081_000, 2974) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Capacity::StakingAccountLedger` (r:1 w:1) - /// Proof: `Capacity::StakingAccountLedger` (`max_values`: None, `max_size`: Some(153), added: 2628, mode: `MaxEncodedLen`) - /// Storage: `Balances::Locks` (r:1 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Proof: `Capacity::StakingAccountLedger` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Capacity::UnstakeUnlocks` (r:1 w:1) + /// Proof: `Capacity::UnstakeUnlocks` (`max_values`: None, `max_size`: Some(121), added: 2596, mode: `MaxEncodedLen`) /// Storage: `Capacity::StakingTargetLedger` (r:1 w:1) /// Proof: `Capacity::StakingTargetLedger` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) /// Storage: `Capacity::CapacityLedger` (r:1 w:1) /// Proof: `Capacity::CapacityLedger` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn unstake() -> Weight { // Proof Size summary in bytes: - // Measured: `433` - // Estimated: `6249` - // Minimum execution time: 38_465_000 picoseconds. - Weight::from_parts(39_656_000, 6249) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Measured: `271` + // Estimated: `5071` + // Minimum execution time: 25_110_000 picoseconds. + Weight::from_parts(25_886_000, 5071) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: `Capacity::EpochLength` (r:0 w:1) @@ -215,8 +219,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_805_000 picoseconds. - Weight::from_parts(7_569_000, 0) + // Minimum execution time: 6_335_000 picoseconds. + Weight::from_parts(6_662_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/runtime/frequency/src/lib.rs b/runtime/frequency/src/lib.rs index 1af8603e70..f553726c91 100644 --- a/runtime/frequency/src/lib.rs +++ b/runtime/frequency/src/lib.rs @@ -223,6 +223,7 @@ pub type Executive = frame_executive::Executive< ( pallet_messages::migration::v2::MigrateToV2, pallet_schemas::migration::v2::MigrateToV2, + pallet_capacity::migration::v2::MigrateToV2, ), >; @@ -261,7 +262,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("frequency"), impl_name: create_runtime_str!("frequency"), authoring_version: 1, - spec_version: 64, + spec_version: 65, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -275,7 +276,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("frequency-rococo"), impl_name: create_runtime_str!("frequency"), authoring_version: 1, - spec_version: 64, + spec_version: 65, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1,