From 121eba7ad776be5a20ef03f17cfc7c7e709d184f Mon Sep 17 00:00:00 2001 From: Aramik Date: Tue, 5 Dec 2023 10:37:12 -0800 Subject: [PATCH 1/3] adding schema names support (#1784) # Goal The goal of this PR is to add support for schema names and versions. Closes #1693 # Design Storing the names in a map such that names are the key and points to the list of different versions and their schema ids for that name ![SchemaNames drawio](https://github.com/LibertyDSNP/frequency/assets/9152501/e4ccea6c-df6a-48fd-bd4a-f6148e0a620b) # what is added? - added new storage `SchemaNameToIds` which allows getting schema ids and versions from a schema name - added new `create_schema_v3`, `propose_to_create_schema_v2` and `create_schema_via_governance_v2` extrinsics which support adding an optional name during creation - added new `propose_to_create_schema_name` and `create_schema_name_via_governance` to assign a name to an schema without a name - Added migration to assign dsnp schema names to well-known schemas in rococo and mainnet - Added an RPC which returns the versions and schema ids for a certain schema name or namespace # name limitations - only supporting ascii character of [a-z] [A-Z] or - - name format is : `.` - namespace min size 3 characters - descriptor min size is 1 character - name max size 32 characters - namespace must not start or end with - - descriptor must not start or end with - # Checklist - [x] Chain spec updated - [X] Custom RPC OR Runtime API added/changed? Updated js/api-augment. - [X] Tests added - [X] Benchmarks added - [X] Weights updated --------- Co-authored-by: Frequency CI [bot] --- common/primitives/src/schema.rs | 16 + e2e/package-lock.json | 2 +- e2e/scaffolding/extrinsicHelpers.ts | 47 + e2e/scaffolding/helpers.ts | 17 + e2e/schemas/createSchema.test.ts | 80 +- e2e/sudo/sudo.test.ts | 26 + js/api-augment/definitions/schemas.ts | 28 +- pallets/schemas/src/benchmarking.rs | 81 + pallets/schemas/src/lib.rs | 371 ++++- pallets/schemas/src/migration/mod.rs | 4 +- pallets/schemas/src/migration/v3.rs | 179 +++ pallets/schemas/src/rpc/src/lib.rs | 11 + pallets/schemas/src/rpc/src/tests/mod.rs | 37 + pallets/schemas/src/runtime-api/src/lib.rs | 5 +- pallets/schemas/src/tests/deprecated_tests.rs | 470 ++++++ pallets/schemas/src/tests/migrations_tests.rs | 52 + pallets/schemas/src/tests/mock.rs | 22 + pallets/schemas/src/tests/mod.rs | 2 + pallets/schemas/src/tests/other_tests.rs | 1348 ++++++++++------- pallets/schemas/src/types.rs | 161 +- pallets/schemas/src/weights.rs | 289 +++- runtime/frequency/src/lib.rs | 20 +- 22 files changed, 2664 insertions(+), 604 deletions(-) create mode 100644 pallets/schemas/src/migration/v3.rs create mode 100644 pallets/schemas/src/tests/deprecated_tests.rs create mode 100644 pallets/schemas/src/tests/migrations_tests.rs diff --git a/common/primitives/src/schema.rs b/common/primitives/src/schema.rs index 6d765f5cb1..2516625368 100644 --- a/common/primitives/src/schema.rs +++ b/common/primitives/src/schema.rs @@ -14,6 +14,9 @@ use utils::*; /// Schema Id is the unique identifier for a Schema pub type SchemaId = u16; +/// Schema version number +pub type SchemaVersion = u8; + /// Types of modeling in which a message payload may be defined #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Copy, Clone, Encode, Decode, PartialEq, Debug, TypeInfo, Eq, MaxEncodedLen)] @@ -130,6 +133,19 @@ impl SchemaSettings { } impl_codec_bitflags!(SchemaSettings, u16, SchemaSetting); +/// RPC Response from a schema name query +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[derive(Clone, Encode, Decode, PartialEq, Debug, TypeInfo, Eq)] +pub struct SchemaVersionResponse { + /// Schema name in following format: namespace.descriptor + #[cfg_attr(feature = "std", serde(with = "as_string"))] + pub schema_name: Vec, + /// The version for this schema + pub schema_version: SchemaVersion, + /// The unique identifier for this Schema + pub schema_id: SchemaId, +} + #[cfg(test)] mod tests { use super::*; diff --git a/e2e/package-lock.json b/e2e/package-lock.json index f2b313a7ce..990a81e2e2 100644 --- a/e2e/package-lock.json +++ b/e2e/package-lock.json @@ -260,7 +260,7 @@ "node_modules/@frequency-chain/api-augment": { "version": "0.0.0", "resolved": "file:../js/api-augment/dist/frequency-chain-api-augment-0.0.0.tgz", - "integrity": "sha512-SjELGw36ccBPvWV19CU73HAOU1hiYJfQGqY1G3Qd7MJUuH3EaaB7Qr85dqjKcwIt37L7hYZ29LjddBw9//jRkw==", + "integrity": "sha512-6sgGQboelVknC2F8DE0/rSo79d9r96zeqg7z6el7ZqyMrqX2cQacvgcnHHsqTbTib7W7LTSbpK9t0IbO0vcsRg==", "license": "Apache-2.0", "dependencies": { "@polkadot/api": "^10.9.1", diff --git a/e2e/scaffolding/extrinsicHelpers.ts b/e2e/scaffolding/extrinsicHelpers.ts index 3ab94997b1..b3b4797aa4 100644 --- a/e2e/scaffolding/extrinsicHelpers.ts +++ b/e2e/scaffolding/extrinsicHelpers.ts @@ -353,6 +353,29 @@ export class ExtrinsicHelper { ); } + /** Schema v3 Extrinsics */ + public static createSchemaV3( + keys: KeyringPair, + model: any, + modelType: 'AvroBinary' | 'Parquet', + payloadLocation: 'OnChain' | 'IPFS' | 'Itemized' | 'Paginated', + grant: ('AppendOnly' | 'SignatureRequired')[], + schemaNme: string | null + ) { + return new Extrinsic( + () => + ExtrinsicHelper.api.tx.schemas.createSchemaV3( + JSON.stringify(model), + modelType, + payloadLocation, + grant, + schemaNme + ), + keys, + ExtrinsicHelper.api.events.schemas.SchemaCreated + ); + } + /** Generic Schema Extrinsics */ public static createSchemaWithSettingsGov( keys: KeyringPair, @@ -375,6 +398,30 @@ export class ExtrinsicHelper { ); } + /** Generic Schema Extrinsics v2 */ + public static createSchemaWithSettingsGovV2( + keys: KeyringPair, + model: any, + modelType: 'AvroBinary' | 'Parquet', + payloadLocation: 'OnChain' | 'IPFS' | 'Itemized' | 'Paginated', + grant: 'AppendOnly' | 'SignatureRequired', + schemaName: string | null + ) { + return new Extrinsic( + () => + ExtrinsicHelper.api.tx.schemas.createSchemaViaGovernanceV2( + keys.publicKey, + JSON.stringify(model), + modelType, + payloadLocation, + [grant], + schemaName + ), + keys, + ExtrinsicHelper.api.events.schemas.SchemaCreated + ); + } + /** Get Schema RPC */ public static getSchema(schemaId: u16): Promise> { return ExtrinsicHelper.apiPromise.rpc.schemas.getBySchemaId(schemaId); diff --git a/e2e/scaffolding/helpers.ts b/e2e/scaffolding/helpers.ts index 550403b62c..6916bbd294 100644 --- a/e2e/scaffolding/helpers.ts +++ b/e2e/scaffolding/helpers.ts @@ -534,3 +534,20 @@ export async function assertAddNewKey( assertEvent(eventMap, 'system.ExtrinsicSuccess'); assertEvent(eventMap, 'msa.PublicKeyAdded'); } + +export function generateSchemaPartialName(length: number): string { + let result = ''; + const characters = 'abcdefghijklmnopqrstuvwxyz-'; + const charactersLength = characters.length; + let counter = 0; + while (counter < length) { + const randomChar = characters.charAt(Math.floor(Math.random() * charactersLength)); + if ((counter == 0 || counter == length - 1) && randomChar === '-') { + // avoid creating invalid name + continue; + } + result += randomChar; + counter += 1; + } + return result; +} diff --git a/e2e/schemas/createSchema.test.ts b/e2e/schemas/createSchema.test.ts index bd3ee6b86b..2a47475fa5 100644 --- a/e2e/schemas/createSchema.test.ts +++ b/e2e/schemas/createSchema.test.ts @@ -5,7 +5,12 @@ import assert from 'assert'; import { AVRO_GRAPH_CHANGE } from './fixtures/avroGraphChangeSchemaType'; import { KeyringPair } from '@polkadot/keyring/types'; import { ExtrinsicHelper } from '../scaffolding/extrinsicHelpers'; -import { createKeys, createAndFundKeypair, assertExtrinsicSuccess } from '../scaffolding/helpers'; +import { + createKeys, + createAndFundKeypair, + assertExtrinsicSuccess, + generateSchemaPartialName, +} from '../scaffolding/helpers'; import { getFundingSource } from '../scaffolding/funding'; const fundingSource = getFundingSource('schemas-create'); @@ -128,4 +133,77 @@ describe('#createSchema', function () { assertExtrinsicSuccess(eventMap); assert.notEqual(createSchemaEvent, undefined); }); + + it('should successfully create a schema v3 with name', async function () { + const schemaName = 'e-e.' + generateSchemaPartialName(20); + const f = ExtrinsicHelper.createSchemaV3(keys, AVRO_GRAPH_CHANGE, 'AvroBinary', 'OnChain', [], schemaName); + const { target: createSchemaEvent, eventMap } = await f.fundAndSend(fundingSource); + + assertExtrinsicSuccess(eventMap); + assert.notEqual(createSchemaEvent, undefined); + assert.notEqual(eventMap['schemas.SchemaNameCreated'], undefined); + }); + + it('should successfully create a schema v3 without a name', async function () { + const f = ExtrinsicHelper.createSchemaV3(keys, AVRO_GRAPH_CHANGE, 'AvroBinary', 'OnChain', [], null); + const { target: createSchemaEvent, eventMap } = await f.fundAndSend(fundingSource); + + assertExtrinsicSuccess(eventMap); + assert.notEqual(createSchemaEvent, undefined); + assert.equal(eventMap['schemas.SchemaNameCreated'], undefined); + }); + + it('should fail to create schema with invalid character in name v3', async function () { + const f = ExtrinsicHelper.createSchemaV3(keys, AVRO_GRAPH_CHANGE, 'AvroBinary', 'OnChain', [], 'test2.invalid'); + await assert.rejects(f.fundAndSend(fundingSource), { + name: 'InvalidSchemaNameCharacters', + }); + }); + + it('should fail to create schema with invalid name structure v3', async function () { + const f = ExtrinsicHelper.createSchemaV3(keys, AVRO_GRAPH_CHANGE, 'AvroBinary', 'OnChain', [], 'test'); + await assert.rejects(f.fundAndSend(fundingSource), { + name: 'InvalidSchemaNameStructure', + }); + }); + + it('should fail to create schema with invalid name encoding v3', async function () { + const f = ExtrinsicHelper.createSchemaV3(keys, AVRO_GRAPH_CHANGE, 'AvroBinary', 'OnChain', [], 'ñòò.ò'); + await assert.rejects(f.fundAndSend(fundingSource), { + name: 'InvalidSchemaNameEncoding', + }); + }); + + it('should fail to create schema with invalid namespace length v3', async function () { + const f = ExtrinsicHelper.createSchemaV3(keys, AVRO_GRAPH_CHANGE, 'AvroBinary', 'OnChain', [], 'a.b'); + await assert.rejects(f.fundAndSend(fundingSource), { + name: 'InvalidSchemaNamespaceLength', + }); + }); + + it('get version rpc should return all schemas using the same name', async function () { + const namespace = generateSchemaPartialName(20); + const aliceSchemaName = namespace + '.alice'; + const bobSchemaName = namespace + '.bob'; + const f = ExtrinsicHelper.createSchemaV3(keys, AVRO_GRAPH_CHANGE, 'AvroBinary', 'OnChain', [], aliceSchemaName); + const { target: createSchemaEvent, eventMap } = await f.fundAndSend(fundingSource); + + assertExtrinsicSuccess(eventMap); + assert.notEqual(createSchemaEvent, undefined); + assert.notEqual(eventMap['schemas.SchemaNameCreated'], undefined); + + const f2 = ExtrinsicHelper.createSchemaV3(keys, AVRO_GRAPH_CHANGE, 'AvroBinary', 'OnChain', [], bobSchemaName); + const { target: createSchemaEvent2, eventMap: eventMap2 } = await f2.fundAndSend(fundingSource); + + assertExtrinsicSuccess(eventMap2); + assert.notEqual(createSchemaEvent2, undefined); + assert.notEqual(eventMap2['schemas.SchemaNameCreated'], undefined); + + const versions = await ExtrinsicHelper.apiPromise.rpc.schemas.getVersions(namespace); + assert(versions.isSome); + const versions_response_value = versions.unwrap(); + assert.equal(versions_response_value.length, 2); + assert(versions_response_value.toArray().some((v) => v.schema_name == aliceSchemaName)); + assert(versions_response_value.toArray().some((v) => v.schema_name == bobSchemaName)); + }); }); diff --git a/e2e/sudo/sudo.test.ts b/e2e/sudo/sudo.test.ts index 4718814282..f5aa4fdbd8 100644 --- a/e2e/sudo/sudo.test.ts +++ b/e2e/sudo/sudo.test.ts @@ -14,6 +14,7 @@ import { createDelegatorAndDelegation, createProviderKeysAndId, getCurrentItemizedHash, + generateSchemaPartialName, } from '../scaffolding/helpers'; import { AVRO_CHAT_MESSAGE } from '../stateful-pallet-storage/fixtures/itemizedSchemaType'; @@ -49,6 +50,31 @@ describe('Sudo required', function () { }); }); + describe('schema-pallet ', function () { + it('should create schema with name using createSchemaWithSettingsGovV2', async function () { + if (isTestnet()) this.skip(); + const schemaName = 'e-e.sudo-' + generateSchemaPartialName(15); + const createSchema = ExtrinsicHelper.createSchemaWithSettingsGovV2( + sudoKey, + AVRO_GRAPH_CHANGE, + 'AvroBinary', + 'Itemized', + 'AppendOnly', + schemaName + ); + const { target: event, eventMap } = await createSchema.sudoSignAndSend(); + assert.notEqual(event, undefined); + const itemizedSchemaId: u16 = event?.data.schemaId || new u16(ExtrinsicHelper.api.registry, 0); + assert.notEqual(itemizedSchemaId.toNumber(), 0); + const schema_response = await ExtrinsicHelper.getSchema(itemizedSchemaId); + assert(schema_response.isSome); + const schema_response_value = schema_response.unwrap(); + const schema_settings = schema_response_value.settings; + assert.notEqual(schema_settings.length, 0); + assert.notEqual(eventMap['schemas.SchemaNameCreated'], undefined); + }); + }); + describe('stateful-pallet-storage', function () { it('should fail to create non itemized schema with AppendOnly settings', async function () { if (isTestnet()) this.skip(); diff --git a/js/api-augment/definitions/schemas.ts b/js/api-augment/definitions/schemas.ts index 4f080733aa..0149dacc5b 100644 --- a/js/api-augment/definitions/schemas.ts +++ b/js/api-augment/definitions/schemas.ts @@ -25,10 +25,21 @@ export default { ], type: "bool", }, + getVersions: { + description: "Get different versions and schema ids for a complete schema name or only a namespace", + params: [ + { + name: "schema_name", + type: "String", + }, + ], + type: "Option>", + }, }, types: { SchemaId: "u16", SchemaModel: "Vec", + SchemaVersion: "u8", SchemaResponse: { schema_id: "SchemaId", model: "SchemaModel", @@ -45,6 +56,11 @@ export default { SchemaSetting: { _enum: ["AppendOnly", "SignatureRequired"], }, + SchemaVersionResponse: { + schema_name: "String", + schema_version: "SchemaVersion", + schema_id: "SchemaId", + }, }, runtime: { SchemasRuntimeApi: [ @@ -60,8 +76,18 @@ export default { ], type: "Option", }, + get_schema_versions_by_name: { + description: "Fetch the schema versions by name", + params: [ + { + name: "schema_name", + type: "Vec", + }, + ], + type: "Option>", + }, }, - version: 1, + version: 2, }, ], }, diff --git a/pallets/schemas/src/benchmarking.rs b/pallets/schemas/src/benchmarking.rs index 79b1396313..fb6c898a9e 100644 --- a/pallets/schemas/src/benchmarking.rs +++ b/pallets/schemas/src/benchmarking.rs @@ -1,4 +1,5 @@ #![allow(clippy::unwrap_used)] +use common_primitives::schema::SchemaVersion; use frame_benchmarking::{benchmarks, whitelisted_caller}; use frame_support::{assert_ok, ensure, BoundedVec}; use frame_system::RawOrigin; @@ -83,6 +84,24 @@ benchmarks! { ensure!(SchemasPallet::::get_schema_info(1).is_some(), "Created schema should exist"); } + create_schema_v3 { + let m in (T::MinSchemaModelSizeBytes::get() + 8) .. (T::SchemaModelMaxBytesBoundedVecLimit::get() - 1); + let sender: T::AccountId = whitelisted_caller(); + let version: SchemaVersion = 1; + let namespace = vec![b'a'; NAMESPACE_MIN as usize]; + let descriptor = vec![b'b'; DESCRIPTOR_MAX as usize]; + let name:Vec= namespace.into_iter().chain(vec![b'.'].into_iter()).chain(descriptor.into_iter()).collect(); + let bounded_name = BoundedVec::try_from(name).expect("should resolve"); + let model_type = ModelType::AvroBinary; + let payload_location = PayloadLocation::OnChain; + assert_ok!(SchemasPallet::::set_max_schema_model_bytes(RawOrigin::Root.into(), T::SchemaModelMaxBytesBoundedVecLimit::get())); + let schema_input = generate_schema::(m as usize); + }: _(RawOrigin::Signed(sender), schema_input, model_type, payload_location, BoundedVec::default(), Some(bounded_name)) + verify { + ensure!(SchemasPallet::::get_current_schema_identifier_maximum() > 0, "Created schema count should be > 0"); + ensure!(SchemasPallet::::get_schema_info(1).is_some(), "Created schema should exist"); + } + set_max_schema_model_bytes { let sender = RawOrigin::Root; let max_size = T::SchemaModelMaxBytesBoundedVecLimit::get(); @@ -91,7 +110,69 @@ benchmarks! { ensure!(SchemasPallet::::get_schema_model_max_bytes() == T::SchemaModelMaxBytesBoundedVecLimit::get(), "Schema model max should be updated!"); } + create_schema_via_governance_v2 { + let m in (T::MinSchemaModelSizeBytes::get() + 8) .. (T::SchemaModelMaxBytesBoundedVecLimit::get() - 1); + let sender: T::AccountId = whitelisted_caller(); + let namespace = vec![b'a'; NAMESPACE_MIN as usize]; + let descriptor = vec![b'b'; DESCRIPTOR_MAX as usize]; + let name:Vec= namespace.into_iter().chain(vec![b'.'].into_iter()).chain(descriptor.into_iter()).collect(); + let bounded_name = BoundedVec::try_from(name).expect("should resolve"); + let model_type = ModelType::AvroBinary; + let payload_location = PayloadLocation::OnChain; + assert_ok!(SchemasPallet::::set_max_schema_model_bytes(RawOrigin::Root.into(), T::SchemaModelMaxBytesBoundedVecLimit::get())); + let schema_input = generate_schema::(m as usize); + }: _(RawOrigin::Root, sender.clone(), schema_input, model_type, payload_location, BoundedVec::default(), Some(bounded_name)) + verify { + ensure!(SchemasPallet::::get_current_schema_identifier_maximum() > 0, "Created schema count should be > 0"); + ensure!(SchemasPallet::::get_schema_info(1).is_some(), "Created schema should exist"); + } + + propose_to_create_schema_v2 { + let m in (T::MinSchemaModelSizeBytes::get() + 8) .. (T::SchemaModelMaxBytesBoundedVecLimit::get() - 1); + let sender: T::AccountId = whitelisted_caller(); + let model_type = ModelType::AvroBinary; + let payload_location = PayloadLocation::OnChain; + let namespace = vec![b'a'; NAMESPACE_MIN as usize]; + let descriptor = vec![b'b'; DESCRIPTOR_MAX as usize]; + let name:Vec= namespace.into_iter().chain(vec![b'.'].into_iter()).chain(descriptor.into_iter()).collect(); + let bounded_name = BoundedVec::try_from(name).expect("should resolve"); + assert_ok!(SchemasPallet::::set_max_schema_model_bytes(RawOrigin::Root.into(), T::SchemaModelMaxBytesBoundedVecLimit::get())); + let schema_input = generate_schema::(m as usize); + }: _(RawOrigin::Signed(sender), schema_input, model_type, payload_location, BoundedVec::default(), Some(bounded_name)) + verify { + assert_eq!(T::ProposalProvider::proposal_count(), 1); + } + + propose_to_create_schema_name { + let sender: T::AccountId = whitelisted_caller(); + let schema_id = 1; + let model = generate_schema::(100 as usize); + let namespace = vec![b'a'; NAMESPACE_MIN as usize]; + let descriptor = vec![b'b'; DESCRIPTOR_MAX as usize]; + let name:Vec= namespace.into_iter().chain(vec![b'.'].into_iter()).chain(descriptor.into_iter()).collect(); + let schema_name = SchemaNamePayload::try_from(name).expect("should resolve"); + assert_ok!(SchemasPallet::::set_max_schema_model_bytes(RawOrigin::Root.into(), T::SchemaModelMaxBytesBoundedVecLimit::get())); + assert_ok!(SchemasPallet::::add_schema(model, ModelType::AvroBinary, PayloadLocation::OnChain, BoundedVec::default(), None)); + }: _(RawOrigin::Signed(sender), schema_id, schema_name) + verify { + assert_eq!(T::ProposalProvider::proposal_count(), 1); + } + create_schema_name_via_governance { + let schema_id = 1; + let model = generate_schema::(100 as usize); + let namespace = vec![b'a'; NAMESPACE_MIN as usize]; + let descriptor = vec![b'b'; DESCRIPTOR_MAX as usize]; + let name:Vec= namespace.into_iter().chain(vec![b'.'].into_iter()).chain(descriptor.into_iter()).collect(); + let schema_name = SchemaNamePayload::try_from(name).expect("should resolve"); + assert_ok!(SchemasPallet::::set_max_schema_model_bytes(RawOrigin::Root.into(), T::SchemaModelMaxBytesBoundedVecLimit::get())); + assert_ok!(SchemasPallet::::add_schema(model, ModelType::AvroBinary, PayloadLocation::OnChain, BoundedVec::default(), None)); + }: _(RawOrigin::Root, schema_id, schema_name.clone()) + verify { + let versions = SchemasPallet::::get_schema_versions(schema_name.into_inner()); + ensure!(versions.is_some(), "Created schema name should exist"); + ensure!(versions.unwrap().len() == 1, "Version should be added!"); + } impl_benchmark_test_suite!( SchemasPallet, diff --git a/pallets/schemas/src/lib.rs b/pallets/schemas/src/lib.rs index c0662f0112..23c10e1c7f 100644 --- a/pallets/schemas/src/lib.rs +++ b/pallets/schemas/src/lib.rs @@ -75,7 +75,7 @@ mod tests; mod benchmarking; #[cfg(feature = "runtime-benchmarks")] use common_primitives::benchmarks::SchemaBenchmarkHelper; -use common_primitives::schema::SchemaInfoResponse; +use common_primitives::schema::{SchemaInfoResponse, SchemaVersionResponse}; /// migration module pub mod migration; mod types; @@ -148,6 +148,14 @@ pub mod pallet { /// Max size of schema document max_size: u32, }, + + /// Emitted when a schema is assigned a name + SchemaNameCreated { + /// Schema ID which a name is assigned + schema_id: SchemaId, + /// ASCII string in bytes of the assigned name + name: Vec, + }, } #[derive(PartialEq, Eq)] // for testing @@ -167,6 +175,36 @@ pub mod pallet { /// Invalid setting for schema InvalidSetting, + + /// Invalid schema name encoding + InvalidSchemaNameEncoding, + + /// Invalid schema name characters + InvalidSchemaNameCharacters, + + /// Invalid schema name structure + InvalidSchemaNameStructure, + + /// Invalid schema name length + InvalidSchemaNameLength, + + /// Invalid schema namespace length + InvalidSchemaNamespaceLength, + + /// Invalid schema descriptor length + InvalidSchemaDescriptorLength, + + /// Schema version exceeds the maximum allowed number + ExceedsMaxNumberOfVersions, + + /// Inserted schema id already exists + SchemaIdAlreadyExists, + + /// SchemaId does not exist + SchemaIdDoesNotExist, + + /// SchemaId has a name already + SchemaIdAlreadyHasName, } #[pallet::pallet] @@ -209,6 +247,21 @@ pub mod pallet { OptionQuery, >; + /// Storage for message schema info struct data + /// - Key: Schema Id + /// - Value: [`SchemaInfo`](SchemaInfo) + #[pallet::storage] + #[pallet::getter(fn get_schema_ids)] + pub(super) type SchemaNameToIds = StorageDoubleMap< + _, + Blake2_128Concat, + SchemaNamespace, + Blake2_128Concat, + SchemaDescriptor, + SchemaVersionId, + ValueQuery, + >; + #[pallet::genesis_config] pub struct GenesisConfig { /// Maximum schema size in bytes at genesis @@ -251,7 +304,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::create_schema(model.len() as u32))] #[allow(deprecated)] #[deprecated( - note = "please use `create_schema_v2` since `create_schema` has been deprecated." + note = "please use `create_schema_v3` since `create_schema` has been deprecated." )] pub fn create_schema( origin: OriginFor, @@ -261,11 +314,12 @@ pub mod pallet { ) -> DispatchResult { let sender = ensure_signed(origin)?; - let schema_id = Self::create_schema_for( + let (schema_id, _) = Self::create_schema_for( model, model_type, payload_location, BoundedVec::default(), + None, )?; Self::deposit_event(Event::SchemaCreated { key: sender, schema_id }); @@ -304,6 +358,10 @@ pub mod pallet { /// #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::propose_to_create_schema(model.len() as u32))] + #[allow(deprecated)] + #[deprecated( + note = "please use `propose_to_create_schema_v2` since `propose_to_create_schema` has been deprecated." + )] pub fn propose_to_create_schema( origin: OriginFor, model: BoundedVec, @@ -339,6 +397,10 @@ pub mod pallet { /// * [`Error::SchemaCountOverflow`] - The schema count has exceeded its bounds #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::create_schema_via_governance(model.len() as u32+ settings.len() as u32))] + #[allow(deprecated)] + #[deprecated( + note = "please use `create_schema_via_governance_v2` since `create_schema_via_governance` has been deprecated." + )] pub fn create_schema_via_governance( origin: OriginFor, creator_key: T::AccountId, @@ -348,7 +410,8 @@ pub mod pallet { settings: BoundedVec, ) -> DispatchResult { T::CreateSchemaViaGovernanceOrigin::ensure_origin(origin)?; - let schema_id = Self::create_schema_for(model, model_type, payload_location, settings)?; + let (schema_id, _) = + Self::create_schema_for(model, model_type, payload_location, settings, None)?; Self::deposit_event(Event::SchemaCreated { key: creator_key, schema_id }); Ok(()) @@ -372,6 +435,10 @@ pub mod pallet { /// #[pallet::call_index(4)] #[pallet::weight(T::WeightInfo::create_schema_v2(model.len() as u32 + settings.len() as u32))] + #[allow(deprecated)] + #[deprecated( + note = "please use `create_schema_v3` since `create_schema_v2` has been deprecated." + )] pub fn create_schema_v2( origin: OriginFor, model: BoundedVec, @@ -381,11 +448,235 @@ pub mod pallet { ) -> DispatchResult { let sender = ensure_signed(origin)?; - let schema_id = Self::create_schema_for(model, model_type, payload_location, settings)?; + let (schema_id, _) = + Self::create_schema_for(model, model_type, payload_location, settings, None)?; Self::deposit_event(Event::SchemaCreated { key: sender, schema_id }); Ok(()) } + + /// Propose to create a schema. Creates a proposal for council approval to create a schema + /// + #[pallet::call_index(5)] + #[pallet::weight( + match schema_name { + Some(_) => T::WeightInfo::propose_to_create_schema_v2(model.len() as u32), + None => T::WeightInfo::propose_to_create_schema(model.len() as u32) + } + )] + pub fn propose_to_create_schema_v2( + origin: OriginFor, + model: BoundedVec, + model_type: ModelType, + payload_location: PayloadLocation, + settings: BoundedVec, + schema_name: Option, + ) -> DispatchResult { + let proposer = ensure_signed(origin)?; + + let proposal: Box = Box::new( + (Call::::create_schema_via_governance_v2 { + creator_key: proposer.clone(), + model, + model_type, + payload_location, + settings, + schema_name, + }) + .into(), + ); + T::ProposalProvider::propose_with_simple_majority(proposer, proposal)?; + Ok(()) + } + + /// Create a schema by means of council approval + /// + /// # Events + /// * [`Event::SchemaCreated`] + /// * [`Event::SchemaNameCreated`] + /// + /// # Errors + /// * [`Error::LessThanMinSchemaModelBytes`] - The schema's length is less than the minimum schema length + /// * [`Error::ExceedsMaxSchemaModelBytes`] - The schema's length is greater than the maximum schema length + /// * [`Error::InvalidSchema`] - Schema is malformed in some way + /// * [`Error::SchemaCountOverflow`] - The schema count has exceeded its bounds + /// * [`Error::InvalidSchemaNameEncoding`] - The schema name has invalid encoding + /// * [`Error::InvalidSchemaNameCharacters`] - The schema name has invalid characters + /// * [`Error::InvalidSchemaNameStructure`] - The schema name has invalid structure + /// * [`Error::InvalidSchemaNameLength`] - The schema name has invalid length + /// * [`Error::InvalidSchemaNamespaceLength`] - The schema namespace has invalid length + /// * [`Error::InvalidSchemaDescriptorLength`] - The schema descriptor has invalid length + /// * [`Error::ExceedsMaxNumberOfVersions`] - The schema name reached max number of versions + /// + #[pallet::call_index(6)] + #[pallet::weight( + match schema_name { + Some(_) => T::WeightInfo::create_schema_via_governance_v2(model.len() as u32+ settings.len() as u32), + None => T::WeightInfo::create_schema_via_governance(model.len() as u32+ settings.len() as u32) + } + )] + pub fn create_schema_via_governance_v2( + origin: OriginFor, + creator_key: T::AccountId, + model: BoundedVec, + model_type: ModelType, + payload_location: PayloadLocation, + settings: BoundedVec, + schema_name: Option, + ) -> DispatchResult { + T::CreateSchemaViaGovernanceOrigin::ensure_origin(origin)?; + let (schema_id, schema_name) = Self::create_schema_for( + model, + model_type, + payload_location, + settings, + schema_name, + )?; + + Self::deposit_event(Event::SchemaCreated { key: creator_key, schema_id }); + if let Some(inner_name) = schema_name { + Self::deposit_event(Event::SchemaNameCreated { + schema_id, + name: inner_name.get_combined_name(), + }); + } + Ok(()) + } + + /// Adds a given schema to storage. The schema in question must be of length + /// between the min and max model size allowed for schemas (see pallet + /// constants above). If the pallet's maximum schema limit has been + /// fulfilled by the time this extrinsic is called, a SchemaCountOverflow error + /// will be thrown. + /// + /// # Events + /// * [`Event::SchemaCreated`] + /// * [`Event::SchemaNameCreated`] + /// + /// # Errors + /// * [`Error::LessThanMinSchemaModelBytes`] - The schema's length is less than the minimum schema length + /// * [`Error::ExceedsMaxSchemaModelBytes`] - The schema's length is greater than the maximum schema length + /// * [`Error::InvalidSchema`] - Schema is malformed in some way + /// * [`Error::SchemaCountOverflow`] - The schema count has exceeded its bounds + /// * [`Error::InvalidSetting`] - Invalid setting is provided + /// * [`Error::InvalidSchemaNameEncoding`] - The schema name has invalid encoding + /// * [`Error::InvalidSchemaNameCharacters`] - The schema name has invalid characters + /// * [`Error::InvalidSchemaNameStructure`] - The schema name has invalid structure + /// * [`Error::InvalidSchemaNameLength`] - The schema name has invalid length + /// * [`Error::InvalidSchemaNamespaceLength`] - The schema namespace has invalid length + /// * [`Error::InvalidSchemaDescriptorLength`] - The schema descriptor has invalid length + /// * [`Error::ExceedsMaxNumberOfVersions`] - The schema name reached max number of versions + /// + #[pallet::call_index(7)] + #[pallet::weight( + match schema_name { + Some(_) => T::WeightInfo::create_schema_v3(model.len() as u32 + settings.len() as u32), + None => T::WeightInfo::create_schema_v2(model.len() as u32 + settings.len() as u32) + } + )] + pub fn create_schema_v3( + origin: OriginFor, + model: BoundedVec, + model_type: ModelType, + payload_location: PayloadLocation, + settings: BoundedVec, + schema_name: Option, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + let (schema_id, schema_name) = Self::create_schema_for( + model, + model_type, + payload_location, + settings, + schema_name, + )?; + + Self::deposit_event(Event::SchemaCreated { key: sender, schema_id }); + if let Some(inner_name) = schema_name { + Self::deposit_event(Event::SchemaNameCreated { + schema_id, + name: inner_name.get_combined_name(), + }); + } + Ok(()) + } + + /// Propose to create a schema name. Creates a proposal for council approval to create a schema name + /// * [`Error::LessThanMinSchemaModelBytes`] - The schema's length is less than the minimum schema length + /// * [`Error::ExceedsMaxSchemaModelBytes`] - The schema's length is greater than the maximum schema length + /// * [`Error::InvalidSchema`] - Schema is malformed in some way + /// * [`Error::InvalidSchemaNameEncoding`] - The schema name has invalid encoding + /// * [`Error::InvalidSchemaNameCharacters`] - The schema name has invalid characters + /// * [`Error::InvalidSchemaNameStructure`] - The schema name has invalid structure + /// * [`Error::InvalidSchemaNameLength`] - The schema name has invalid length + /// * [`Error::InvalidSchemaNamespaceLength`] - The schema namespace has invalid length + /// * [`Error::InvalidSchemaDescriptorLength`] - The schema descriptor has invalid length + /// * [`Error::ExceedsMaxNumberOfVersions`] - The schema name reached max number of versions + /// * [`Error::SchemaIdDoesNotExist`] - The schema id does not exist + /// * [`Error::SchemaIdAlreadyHasName`] - The schema id already has a name + #[pallet::call_index(8)] + #[pallet::weight(T::WeightInfo::propose_to_create_schema_name())] + pub fn propose_to_create_schema_name( + origin: OriginFor, + schema_id: SchemaId, + schema_name: SchemaNamePayload, + ) -> DispatchResult { + let proposer = ensure_signed(origin)?; + + let _ = Self::parse_and_verify_schema_name(schema_id, &schema_name)?; + + let proposal: Box = Box::new( + (Call::::create_schema_name_via_governance { schema_id, schema_name }).into(), + ); + T::ProposalProvider::propose_with_simple_majority(proposer, proposal)?; + Ok(()) + } + + /// Assigns a name to a schema without any name + /// + /// # Events + /// * [`Event::SchemaNameCreated`] + /// + /// # Errors + /// * [`Error::LessThanMinSchemaModelBytes`] - The schema's length is less than the minimum schema length + /// * [`Error::ExceedsMaxSchemaModelBytes`] - The schema's length is greater than the maximum schema length + /// * [`Error::InvalidSchema`] - Schema is malformed in some way + /// * [`Error::SchemaCountOverflow`] - The schema count has exceeded its bounds + /// * [`Error::InvalidSchemaNameEncoding`] - The schema name has invalid encoding + /// * [`Error::InvalidSchemaNameCharacters`] - The schema name has invalid characters + /// * [`Error::InvalidSchemaNameStructure`] - The schema name has invalid structure + /// * [`Error::InvalidSchemaNameLength`] - The schema name has invalid length + /// * [`Error::InvalidSchemaNamespaceLength`] - The schema namespace has invalid length + /// * [`Error::InvalidSchemaDescriptorLength`] - The schema descriptor has invalid length + /// * [`Error::ExceedsMaxNumberOfVersions`] - The schema name reached max number of versions + /// * [`Error::SchemaIdDoesNotExist`] - The schema id does not exist + /// * [`Error::SchemaIdAlreadyHasName`] - The schema id already has a name + /// + #[pallet::call_index(9)] + #[pallet::weight(T::WeightInfo::create_schema_name_via_governance())] + pub fn create_schema_name_via_governance( + origin: OriginFor, + schema_id: SchemaId, + schema_name: SchemaNamePayload, + ) -> DispatchResult { + T::CreateSchemaViaGovernanceOrigin::ensure_origin(origin)?; + + let parsed_name = Self::parse_and_verify_schema_name(schema_id, &schema_name)?; + SchemaNameToIds::::try_mutate( + &parsed_name.namespace, + &parsed_name.descriptor, + |schema_version_id| -> DispatchResult { + schema_version_id.add::(schema_id)?; + + Self::deposit_event(Event::SchemaNameCreated { + schema_id, + name: parsed_name.get_combined_name(), + }); + Ok(()) + }, + ) + } } impl Pallet { @@ -402,18 +693,34 @@ pub mod pallet { model_type: ModelType, payload_location: PayloadLocation, settings: BoundedVec, + schema_name_option: Option, ) -> Result { let schema_id = Self::get_next_schema_id()?; + let has_name = schema_name_option.is_some(); let mut set_settings = SchemaSettings::all_disabled(); if !settings.is_empty() { for i in settings.into_inner() { set_settings.set(i); } } - let schema_info = SchemaInfo { model_type, payload_location, settings: set_settings }; + + if let Some(schema_name) = schema_name_option { + SchemaNameToIds::::try_mutate( + schema_name.namespace, + schema_name.descriptor, + |schema_version_id| -> Result<(), DispatchError> { + schema_version_id.add::(schema_id)?; + Ok(()) + }, + )?; + }; + + let schema_info = + SchemaInfo { model_type, payload_location, settings: set_settings, has_name }; >::set(schema_id); >::insert(schema_id, schema_info); >::insert(schema_id, model); + Ok(schema_id) } @@ -508,7 +815,8 @@ pub mod pallet { model_type: ModelType, payload_location: PayloadLocation, settings: BoundedVec, - ) -> Result { + optional_schema_name: Option, + ) -> Result<(SchemaId, Option), DispatchError> { Self::ensure_valid_model(&model_type, &model)?; ensure!( model.len() >= T::MinSchemaModelSizeBytes::get() as usize, @@ -524,7 +832,52 @@ pub mod pallet { payload_location == PayloadLocation::Itemized, Error::::InvalidSetting ); - Self::add_schema(model, model_type, payload_location, settings) + let schema_name = match optional_schema_name { + None => None, + Some(name_payload) => { + let parsed_name = SchemaName::try_parse::(name_payload, true)?; + Some(parsed_name) + }, + }; + let schema_id = Self::add_schema( + model, + model_type, + payload_location, + settings, + schema_name.clone(), + )?; + Ok((schema_id, schema_name)) + } + + /// a method to return all versions of a schema name with their schemaIds + /// Warning: Must only get called from RPC, since the number of DB accesses is not deterministic + pub fn get_schema_versions(schema_name: Vec) -> Option> { + let bounded_name = BoundedVec::try_from(schema_name).ok()?; + let parsed_name = SchemaName::try_parse::(bounded_name, false).ok()?; + let versions: Vec<_> = match parsed_name.descriptor_exists() { + true => SchemaNameToIds::::get(&parsed_name.namespace, &parsed_name.descriptor) + .convert_to_response(&parsed_name), + false => SchemaNameToIds::::iter_prefix(&parsed_name.namespace) + .flat_map(|(descriptor, val)| { + val.convert_to_response(&parsed_name.new_with_descriptor(descriptor)) + }) + .collect(), + }; + Some(versions) + } + + /// Parses the schema name and makes sure the schema does not have a name + fn parse_and_verify_schema_name( + schema_id: SchemaId, + schema_name: &SchemaNamePayload, + ) -> Result { + let schema_option = Self::get_schema_info(schema_id); + ensure!(schema_option.is_some(), Error::::SchemaIdDoesNotExist); + if let Some(info) = schema_option { + ensure!(!info.has_name, Error::::SchemaIdAlreadyHasName); + } + let parsed_name = SchemaName::try_parse::(schema_name.clone(), true)?; + Ok(parsed_name) } } } @@ -546,7 +899,7 @@ impl SchemaBenchmarkHelper for Pallet { let model: BoundedVec = model.try_into().unwrap(); Self::ensure_valid_model(&model_type, &model)?; - Self::add_schema(model, model_type, payload_location, BoundedVec::default())?; + Self::add_schema(model, model_type, payload_location, BoundedVec::default(), None)?; Ok(()) } } diff --git a/pallets/schemas/src/migration/mod.rs b/pallets/schemas/src/migration/mod.rs index c34354a101..4c2cc2564a 100644 --- a/pallets/schemas/src/migration/mod.rs +++ b/pallets/schemas/src/migration/mod.rs @@ -1,2 +1,2 @@ -/// migrations to v2 -pub mod v2; +/// migrations to v3 +pub mod v3; diff --git a/pallets/schemas/src/migration/v3.rs b/pallets/schemas/src/migration/v3.rs new file mode 100644 index 0000000000..4c45d786c8 --- /dev/null +++ b/pallets/schemas/src/migration/v3.rs @@ -0,0 +1,179 @@ +#[cfg(feature = "try-runtime")] +use crate::types::SCHEMA_STORAGE_VERSION; +use crate::{ + migration::v3::old::OldSchemaInfo, + pallet::{SchemaInfos, SchemaNameToIds}, + Config, Pallet, SchemaId, SchemaInfo, SchemaName, LOG_TARGET, +}; +use frame_support::{pallet_prelude::*, traits::OnRuntimeUpgrade, weights::Weight}; +use log; +use sp_runtime::Saturating; +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; +use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; + +/// get known schema names for mainnet +#[cfg(feature = "frequency")] +pub fn get_known_schemas() -> BTreeMap> { + BTreeMap::from([ + (1, b"dsnp.tombstone".to_vec()), + (2, b"dsnp.broadcast".to_vec()), + (3, b"dsnp.reply".to_vec()), + (4, b"dsnp.reaction".to_vec()), + (5, b"dsnp.update".to_vec()), + (6, b"dsnp.profile".to_vec()), + (7, b"dsnp.public-key".to_vec()), + (8, b"dsnp.public-follows".to_vec()), + (9, b"dsnp.private-follows".to_vec()), + (10, b"dsnp.private-connections".to_vec()), + ]) +} +/// get known schema names for rococo +#[cfg(not(feature = "frequency"))] +pub fn get_known_schemas() -> BTreeMap> { + BTreeMap::from([ + (1, b"dsnp.tombstone".to_vec()), + (2, b"dsnp.broadcast".to_vec()), + (3, b"dsnp.reply".to_vec()), + (4, b"dsnp.reaction".to_vec()), + (5, b"dsnp.profile".to_vec()), + (6, b"dsnp.update".to_vec()), + (18, b"dsnp.public-key".to_vec()), + (13, b"dsnp.public-follows".to_vec()), + (14, b"dsnp.private-follows".to_vec()), + (15, b"dsnp.private-connections".to_vec()), + ]) +} + +/// old module storages +pub mod old { + use super::*; + use common_primitives::schema::{ModelType, PayloadLocation, SchemaSettings}; + + #[derive(Clone, Encode, Decode, PartialEq, Debug, TypeInfo, Eq, MaxEncodedLen)] + /// A structure defining a Schema information (excluding the payload) + pub struct OldSchemaInfo { + /// The type of model (AvroBinary, Parquet, etc.) + pub model_type: ModelType, + /// The payload location + pub payload_location: PayloadLocation, + /// additional control settings for the schema + pub settings: SchemaSettings, + } +} + +/// migration to v2 implementation +pub struct MigrateToV3(PhantomData); + +impl OnRuntimeUpgrade for MigrateToV3 { + fn on_runtime_upgrade() -> Weight { + migrate_to_v3::() + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, TryRuntimeError> { + log::info!(target: LOG_TARGET, "Running pre_upgrade..."); + let count = SchemaInfos::::iter().count() as u32; + log::info!(target: LOG_TARGET, "Finish pre_upgrade for {:?}", count); + Ok(count.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_: Vec) -> Result<(), TryRuntimeError> { + log::info!(target: LOG_TARGET, "Running post_upgrade..."); + let onchain_version = Pallet::::on_chain_storage_version(); + assert_eq!(onchain_version, SCHEMA_STORAGE_VERSION); + log::info!(target: LOG_TARGET, "Finished post_upgrade"); + Ok(()) + } +} + +/// migrating to v3 +pub fn migrate_to_v3() -> Weight { + log::info!(target: LOG_TARGET, "Running storage migration..."); + let onchain_version = Pallet::::on_chain_storage_version(); + let current_version = Pallet::::current_storage_version(); + log::info!(target: LOG_TARGET, "onchain_version= {:?}, current_version={:?}", onchain_version, current_version); + let each_layer_access: u64 = 33 * 16; + + if onchain_version < 3 { + let known_schemas = get_known_schemas(); + let mut reads = 1u64; + let mut writes = 0u64; + let mut bytes = 0u64; + + SchemaInfos::::translate_values(|old: OldSchemaInfo| -> Option { + reads.saturating_inc(); + writes.saturating_inc(); + bytes = bytes.saturating_add(old.encode().len() as u64); + bytes = bytes.saturating_add(each_layer_access * 3); // three layers in merkle tree + + Some(SchemaInfo { + model_type: old.model_type, + payload_location: old.payload_location, + settings: old.settings, + has_name: false, + }) + }); + + log::error!(target: LOG_TARGET, "Finished translating {:?} SchemaInfos!", writes); + + for (schema_id, schema_name) in known_schemas.iter() { + reads.saturating_inc(); + + if let Some(mut schema) = SchemaInfos::::get(&schema_id) { + bytes = bytes.saturating_add(schema.encode().len() as u64); + bytes = bytes.saturating_add(each_layer_access * 3); // three layers in merkle tree + + match BoundedVec::try_from(schema_name.clone()) { + Ok(bounded_name) => match SchemaName::try_parse::(bounded_name, true) { + Ok(parsed_name) => { + let _ = SchemaNameToIds::::try_mutate( + parsed_name.namespace, + parsed_name.descriptor, + |schema_version_id| -> Result<(), DispatchError> { + bytes = bytes + .saturating_add(schema_version_id.encode().len() as u64); + + let _ = schema_version_id.add::(*schema_id); + + // set schema as having a name + schema.has_name = true; + SchemaInfos::::set(&schema_id, Some(schema)); + writes.saturating_inc(); + + Ok(()) + }, + ); + reads.saturating_inc(); + writes.saturating_inc(); + }, + Err(_) => { + log::error!(target: LOG_TARGET, "Not able to parse the name {:?}", schema_name); + }, + }, + Err(_) => { + log::error!(target: LOG_TARGET, "Was not able to get bounded vec {:?}", schema_name); + }, + } + } + } + + // Set storage version to `3`. + StorageVersion::new(3).put::>(); + writes.saturating_inc(); + + log::info!(target: LOG_TARGET, "Storage migrated to version 3 read={:?}, write={:?}, bytes={:?}", reads, writes, bytes); + let weights = T::DbWeight::get().reads_writes(reads, writes).add_proof_size(bytes); + log::info!(target: LOG_TARGET, "Migration Calculated weights={:?}",weights); + weights + } else { + log::info!( + target: LOG_TARGET, + "Migration did not execute. This probably should be removed onchain:{:?}, current:{:?}", + onchain_version, + current_version + ); + T::DbWeight::get().reads(1) + } +} diff --git a/pallets/schemas/src/rpc/src/lib.rs b/pallets/schemas/src/rpc/src/lib.rs index 241caf2aa9..7901239181 100644 --- a/pallets/schemas/src/rpc/src/lib.rs +++ b/pallets/schemas/src/rpc/src/lib.rs @@ -55,6 +55,10 @@ pub trait SchemasApi { /// validates a schema model and returns `true` if the model is correct. #[method(name = "schemas_checkSchemaValidity")] fn check_schema_validity(&self, model: Vec, at: Option) -> RpcResult; + + /// returns an array of schema versions + #[method(name = "schemas_getVersions")] + fn get_versions(&self, schema_name: String) -> RpcResult>>; } /// The client handler for the API used by Frequency Service RPC with `jsonrpsee` @@ -99,4 +103,11 @@ where let schema_api_result = api.get_by_schema_id(at, schema_id); map_rpc_result(schema_api_result) } + + fn get_versions(&self, schema_name: String) -> RpcResult>> { + let api = self.client.runtime_api(); + let at = self.client.info().best_hash; + let schema_api_result = api.get_schema_versions_by_name(at, schema_name.into_bytes()); + map_rpc_result(schema_api_result) + } } diff --git a/pallets/schemas/src/rpc/src/tests/mod.rs b/pallets/schemas/src/rpc/src/tests/mod.rs index 4722062b8b..aba41017b6 100644 --- a/pallets/schemas/src/rpc/src/tests/mod.rs +++ b/pallets/schemas/src/rpc/src/tests/mod.rs @@ -2,12 +2,14 @@ mod rpc_mock; use super::*; use rpc_mock::*; +use std::string::ToString; use pallet_schemas_runtime_api::SchemasRuntimeApi; use std::sync::Arc; use substrate_test_runtime_client::runtime::Block; const SUCCESSFUL_SCHEMA_ID: u16 = 1; +const SUCCESSFUL_SCHEMA_NAME: &str = "namespace.descriptor"; sp_api::mock_impl_runtime_apis! { impl SchemasRuntimeApi for TestRuntimeApi { @@ -23,10 +25,33 @@ sp_api::mock_impl_runtime_apis! { _ => None, } } + + fn get_schema_versions_by_name(schema_name: Vec) -> Option> { + let successful_name_bytes = SUCCESSFUL_SCHEMA_NAME.to_string().into_bytes(); + if successful_name_bytes == schema_name { + Some( + vec![ + SchemaVersionResponse { + schema_id: 1, + schema_version: 1, + schema_name: successful_name_bytes.clone() + }, + SchemaVersionResponse { + schema_id: 10, + schema_version: 2, + schema_name: successful_name_bytes.clone() + }, + ] + ) + } else { + None + } + } } } type SchemaResult = Result, jsonrpsee::core::Error>; +type VersionResult = Result>, jsonrpsee::core::Error>; #[tokio::test] async fn get_schema_with_non_existent_schema_id_should_return_none() { @@ -57,6 +82,18 @@ async fn get_schema_with_success() { assert_eq!(PayloadLocation::OnChain, response.payload_location); } +#[tokio::test] +async fn get_schema_versions_with_success() { + let client = Arc::new(TestApi {}); + let api = SchemasHandler::new(client); + + let result: VersionResult = api.get_versions(SUCCESSFUL_SCHEMA_NAME.to_string()); + + assert_eq!(true, result.is_ok()); + let response = result.unwrap().unwrap(); + assert_eq!(response.len(), 2); +} + #[tokio::test] async fn check_schema_validity_success() { let client = Arc::new(TestApi {}); diff --git a/pallets/schemas/src/runtime-api/src/lib.rs b/pallets/schemas/src/runtime-api/src/lib.rs index 71e114cec1..af0a175199 100644 --- a/pallets/schemas/src/runtime-api/src/lib.rs +++ b/pallets/schemas/src/runtime-api/src/lib.rs @@ -18,6 +18,7 @@ //! - Runtime interfaces for end users beyond just State Queries use common_primitives::schema::*; +use sp_api::vec::Vec; sp_api::decl_runtime_apis! { @@ -25,12 +26,14 @@ sp_api::decl_runtime_apis! { /// - MUST be incremented if anything changes /// - Also update in js/api-augment /// - See: https://paritytech.github.io/polkadot/doc/polkadot_primitives/runtime_api/index.html - #[api_version(1)] + #[api_version(2)] /// Runtime API definition for [Schemas](../pallet_schemas/index.html) pub trait SchemasRuntimeApi { /// Fetch the schema by id fn get_by_schema_id(schema_id: SchemaId) -> Option; + /// Fetch the schema versions by name + fn get_schema_versions_by_name(schema_name: Vec) -> Option>; } } diff --git a/pallets/schemas/src/tests/deprecated_tests.rs b/pallets/schemas/src/tests/deprecated_tests.rs new file mode 100644 index 0000000000..ff460d00ae --- /dev/null +++ b/pallets/schemas/src/tests/deprecated_tests.rs @@ -0,0 +1,470 @@ +use super::mock::*; +use crate::{Error, Event as AnnouncementEvent}; +use common_primitives::{ + node::AccountId, + schema::{ModelType, PayloadLocation, SchemaId, SchemaSetting}, +}; +use frame_support::{ + assert_noop, assert_ok, + traits::{ChangeMembers, Hash}, + weights::Weight, + BoundedVec, +}; +use parity_scale_codec::Encode; +use serial_test::serial; + +#[test] +#[allow(deprecated)] +fn require_valid_schema_size_errors() { + new_test_ext().execute_with(|| { + sudo_set_max_schema_size(); + let test_cases: [TestCase<(Error, u8)>; 2] = [ + TestCase { + input: r#"{"a":1}"#, + expected: (Error::::LessThanMinSchemaModelBytes, 3), + }, + TestCase { + input: r#"{"id": "long", "title": "I am a very very very long schema", "properties": "just way too long to live a long life", "description": "Just a never ending stream of bytes that goes on for a minute too long"}"#, + expected: (Error::::ExceedsMaxSchemaModelBytes, 2), + }, + ]; + for tc in test_cases { + assert_noop!( + SchemasPallet::create_schema(RuntimeOrigin::signed(test_public(1)), create_bounded_schema_vec(tc.input), ModelType::AvroBinary, PayloadLocation::OnChain), + tc.expected.0); + } + }) +} + +#[test] +#[allow(deprecated)] +fn create_schema_v2_requires_valid_schema_size() { + new_test_ext().execute_with(|| { + sudo_set_max_schema_size(); + let test_cases: [TestCase<(Error, u8)>; 2] = [ + TestCase { + input: r#"{"a":1}"#, + expected: (Error::::LessThanMinSchemaModelBytes, 3), + }, + TestCase { + input: r#"{"id": "long", "title": "I am a very very very long schema", "properties": "just way too long to live a long life", "description": "Just a never ending stream of bytes that goes on for a minute too long"}"#, + expected: (Error::::ExceedsMaxSchemaModelBytes, 2), + }, + ]; + for tc in test_cases { + assert_noop!( + SchemasPallet::create_schema_v2(RuntimeOrigin::signed(test_public(1)), create_bounded_schema_vec(tc.input), ModelType::AvroBinary, PayloadLocation::OnChain, BoundedVec::default()), + tc.expected.0); + } + }) +} + +#[test] +#[allow(deprecated)] +fn create_schema_via_governance_happy_path() { + new_test_ext().execute_with(|| { + sudo_set_max_schema_size(); + let sender: AccountId = test_public(5); + assert_ok!(SchemasPallet::create_schema_via_governance( + RuntimeOrigin::from(pallet_collective::RawOrigin::Members(2, 3)), + sender, + create_bounded_schema_vec(r#"{"name": "Doe", "type": "lost"}"#), + ModelType::AvroBinary, + PayloadLocation::OnChain, + BoundedVec::default(), + )); + }) +} + +/// Test that a request to be a provider, makes the MSA a provider after the council approves it. +#[test] +#[allow(deprecated)] +fn propose_to_create_schema_happy_path() { + new_test_ext().execute_with(|| { + sudo_set_max_schema_size(); + + let test_model = r#"{"foo": "bar", "bar": "buzz"}"#; + let serialized_fields = Vec::from(test_model.as_bytes()); + // Propose a new schema + _ = SchemasPallet::propose_to_create_schema( + test_origin_signed(5), + create_bounded_schema_vec(test_model), + ModelType::AvroBinary, + PayloadLocation::OnChain, + BoundedVec::default(), + ); + + // Find the Proposed event and get it's hash and index so it can be voted on + let proposed_events: Vec<(u32, Hash)> = System::events() + .iter() + .filter_map(|event| match event.event { + RuntimeEvent::Council(pallet_collective::Event::Proposed { + account: _, + proposal_index, + proposal_hash, + threshold: _, + }) => Some((proposal_index, proposal_hash)), + _ => None, + }) + .collect(); + + assert_eq!(proposed_events.len(), 1); + + let proposal_index = proposed_events[0].0; + let proposal_hash = proposed_events[0].1; + let proposal = Council::proposal_of(proposal_hash).unwrap(); + let proposal_len: u32 = proposal.encoded_size() as u32; + + // Set up the council members + let council_member_1 = test_public(1); // Use ALICE as a council member + let council_member_2 = test_public(2); // Use BOB as a council member + let council_member_3 = test_public(3); // Use CHARLIE as a council member + + let incoming = vec![]; + let outgoing = vec![]; + Council::change_members( + &incoming, + &outgoing, + vec![council_member_1.clone(), council_member_2.clone(), council_member_3.clone()], + ); + + // Council member #1 votes AYE on the proposal + assert_ok!(Council::vote( + RuntimeOrigin::signed(council_member_1.clone()), + proposal_hash, + proposal_index, + true + )); + // Council member #2 votes AYE on the proposal + assert_ok!(Council::vote( + RuntimeOrigin::signed(council_member_2.clone()), + proposal_hash, + proposal_index, + true + )); + // Council member #3 votes NAY on the proposal + assert_ok!(Council::vote( + RuntimeOrigin::signed(council_member_3.clone()), + proposal_hash, + proposal_index, + false + )); + + // Find the Voted event and check if it passed + let voted_events: Vec<(bool, u32, u32)> = System::events() + .iter() + .filter_map(|event| match event.event { + RuntimeEvent::Council(pallet_collective::Event::Voted { + account: _, + proposal_hash: _, + voted, + yes, + no, + }) => Some((voted, yes, no)), + _ => None, + }) + .collect(); + + assert_eq!(voted_events.len(), 3); + assert_eq!(voted_events[1].1, 2); // There should be two AYE (out of three) votes to pass + + // Close the voting + assert_ok!(Council::close( + RuntimeOrigin::signed(test_public(5)), + proposal_hash, + proposal_index, + Weight::MAX, + proposal_len + )); + + // Find the Closed event and check if it passed + let closed_events: Vec<(u32, u32)> = System::events() + .iter() + .filter_map(|event| match event.event { + RuntimeEvent::Council(pallet_collective::Event::Closed { + proposal_hash: _, + yes, + no, + }) => Some((yes, no)), + _ => None, + }) + .collect(); + + assert_eq!(closed_events.len(), 1); + assert_eq!(closed_events[0].0, 2); // There should be two YES votes to pass + + // Find the SchemaCreated event and check if it passed + let schema_events: Vec = System::events() + .iter() + .filter_map(|event| match event.event { + RuntimeEvent::SchemasPallet(AnnouncementEvent::SchemaCreated { + key: _, + schema_id, + }) => Some(schema_id), + _ => None, + }) + .collect(); + + // Confirm that the schema was created + assert_eq!(schema_events.len(), 1); + + let last_schema_id = schema_events[0]; + let created_schema = SchemasPallet::get_schema_by_id(last_schema_id); + assert_eq!(created_schema.as_ref().is_some(), true); + assert_eq!(created_schema.as_ref().unwrap().clone().model, serialized_fields); + }) +} + +#[allow(deprecated)] +#[test] +fn create_schema_happy_path() { + new_test_ext().execute_with(|| { + sudo_set_max_schema_size(); + let sender: AccountId = test_public(1); + assert_ok!(SchemasPallet::create_schema( + RuntimeOrigin::signed(sender), + create_bounded_schema_vec(r#"{"name": "Doe", "type": "lost"}"#), + ModelType::AvroBinary, + PayloadLocation::OnChain, + )); + }) +} + +#[test] +#[allow(deprecated)] +fn create_schema_v2_happy_path() { + new_test_ext().execute_with(|| { + sudo_set_max_schema_size(); + let sender: AccountId = test_public(1); + assert_ok!(SchemasPallet::create_schema_v2( + RuntimeOrigin::signed(sender), + create_bounded_schema_vec(r#"{"name": "Doe", "type": "lost"}"#), + ModelType::AvroBinary, + PayloadLocation::OnChain, + BoundedVec::default() + )); + }) +} + +#[allow(deprecated)] +#[test] +fn create_schema_unhappy_path() { + new_test_ext().execute_with(|| { + sudo_set_max_schema_size(); + let sender: AccountId = test_public(1); + assert_noop!( + SchemasPallet::create_schema( + RuntimeOrigin::signed(sender), + // name key does not have a colon + create_bounded_schema_vec(r#"{"name", 54, "type": "none"}"#), + ModelType::AvroBinary, + PayloadLocation::OnChain, + ), + Error::::InvalidSchema + ); + }) +} + +#[allow(deprecated)] +#[test] +#[serial] +fn create_schema_id_deposits_events_and_increments_schema_id() { + new_test_ext().execute_with(|| { + sudo_set_max_schema_size(); + let sender: AccountId = test_public(1); + let mut last_schema_id: SchemaId = 0; + for fields in [ + r#"{"Name": "Bond", "Code": "007"}"#, + r#"{"type": "num","minimum": -90,"maximum": 90}"#, + r#"{"latitude": 48.858093,"longitude": 2.294694}"#, + ] { + let expected_schema_id = last_schema_id + 1; + assert_ok!(SchemasPallet::create_schema( + RuntimeOrigin::signed(sender.clone()), + create_bounded_schema_vec(fields), + ModelType::AvroBinary, + PayloadLocation::OnChain, + )); + System::assert_last_event( + AnnouncementEvent::SchemaCreated { + key: sender.clone(), + schema_id: expected_schema_id, + } + .into(), + ); + last_schema_id = expected_schema_id; + } + assert_ok!(SchemasPallet::create_schema( + RuntimeOrigin::signed(sender.clone()), + create_bounded_schema_vec(r#"{"account":3050}"#), + ModelType::AvroBinary, + PayloadLocation::OnChain, + )); + }) +} + +#[test] +#[allow(deprecated)] +#[serial] +fn create_schema_v2_id_deposits_events_and_increments_schema_id() { + new_test_ext().execute_with(|| { + sudo_set_max_schema_size(); + let sender: AccountId = test_public(1); + let mut last_schema_id: SchemaId = 0; + for fields in [ + r#"{"Name": "Bond", "Code": "007"}"#, + r#"{"type": "num","minimum": -90,"maximum": 90}"#, + r#"{"latitude": 48.858093,"longitude": 2.294694}"#, + ] { + let expected_schema_id = last_schema_id + 1; + assert_ok!(SchemasPallet::create_schema_v2( + RuntimeOrigin::signed(sender.clone()), + create_bounded_schema_vec(fields), + ModelType::AvroBinary, + PayloadLocation::OnChain, + BoundedVec::default() + )); + System::assert_last_event( + AnnouncementEvent::SchemaCreated { + key: sender.clone(), + schema_id: expected_schema_id, + } + .into(), + ); + last_schema_id = expected_schema_id; + } + assert_ok!(SchemasPallet::create_schema_v2( + RuntimeOrigin::signed(sender.clone()), + create_bounded_schema_vec(r#"{"account":3050}"#), + ModelType::AvroBinary, + PayloadLocation::OnChain, + BoundedVec::default() + )); + }) +} + +#[allow(deprecated)] +#[test] +fn get_existing_schema_by_id_should_return_schema() { + new_test_ext().execute_with(|| { + let sender: AccountId = test_public(1); + sudo_set_max_schema_size(); + // arrange + let test_str = r#"{"foo": "bar", "bar": "buzz"}"#; + let serialized_fields = Vec::from(test_str.as_bytes()); + assert_ok!(SchemasPallet::create_schema( + RuntimeOrigin::signed(sender), + create_bounded_schema_vec(test_str), + ModelType::AvroBinary, + PayloadLocation::OnChain, + )); + + // act + let res = SchemasPallet::get_schema_by_id(1); + + // assert + assert_eq!(res.as_ref().is_some(), true); + assert_eq!(res.as_ref().unwrap().clone().model, serialized_fields); + }) +} + +#[allow(deprecated)] +#[test] +fn get_existing_schema_by_id_should_return_schema_v2() { + new_test_ext().execute_with(|| { + let sender: AccountId = test_public(1); + sudo_set_max_schema_size(); + // arrange + let test_str = r#"{"foo": "bar", "bar": "buzz"}"#; + let serialized_fields = Vec::from(test_str.as_bytes()); + assert_ok!(SchemasPallet::create_schema_v2( + RuntimeOrigin::signed(sender), + create_bounded_schema_vec(test_str), + ModelType::AvroBinary, + PayloadLocation::OnChain, + BoundedVec::default() + )); + + // act + let res = SchemasPallet::get_schema_by_id(1); + + // assert + assert_eq!(res.as_ref().is_some(), true); + assert_eq!(res.as_ref().unwrap().clone().model, serialized_fields); + }) +} + +#[allow(deprecated)] +#[test] +fn create_schema_with_settings_should_work() { + new_test_ext().execute_with(|| { + sudo_set_max_schema_size(); + + // arrange + let settings = vec![SchemaSetting::AppendOnly]; + let sender: AccountId = test_public(1); + + // act and assert + assert_ok!(SchemasPallet::create_schema_via_governance( + RuntimeOrigin::from(pallet_collective::RawOrigin::Members(2, 3)), + sender, + create_bounded_schema_vec(r#"{"name":"John Doe"}"#), + ModelType::AvroBinary, + PayloadLocation::Itemized, + BoundedVec::try_from(settings.clone()).unwrap(), + )); + + // assert + let res = SchemasPallet::get_schema_by_id(1); + assert_eq!(res.unwrap().settings, settings); + }) +} + +#[allow(deprecated)] +#[test] +fn create_schema_with_append_only_setting_and_non_itemized_should_fail() { + new_test_ext().execute_with(|| { + sudo_set_max_schema_size(); + + // arrange + let settings = vec![SchemaSetting::AppendOnly]; + let sender: AccountId = test_public(1); + // act and assert + assert_noop!( + SchemasPallet::create_schema_via_governance( + RuntimeOrigin::from(pallet_collective::RawOrigin::Members(2, 3)), + sender.clone(), + create_bounded_schema_vec(r#"{"name":"John Doe"}"#), + ModelType::AvroBinary, + PayloadLocation::Paginated, + BoundedVec::try_from(settings.clone()).unwrap(), + ), + Error::::InvalidSetting + ); + + // act and assert + assert_noop!( + SchemasPallet::create_schema_via_governance( + RuntimeOrigin::from(pallet_collective::RawOrigin::Members(2, 3)), + sender.clone(), + create_bounded_schema_vec(r#"{"name":"John Doe"}"#), + ModelType::AvroBinary, + PayloadLocation::OnChain, + BoundedVec::try_from(settings.clone()).unwrap(), + ), + Error::::InvalidSetting + ); + + assert_noop!( + SchemasPallet::create_schema_via_governance( + RuntimeOrigin::from(pallet_collective::RawOrigin::Members(2, 3)), + sender, + create_bounded_schema_vec(r#"{"name":"John Doe"}"#), + ModelType::AvroBinary, + PayloadLocation::IPFS, + BoundedVec::try_from(settings.clone()).unwrap(), + ), + Error::::InvalidSetting + ); + }) +} diff --git a/pallets/schemas/src/tests/migrations_tests.rs b/pallets/schemas/src/tests/migrations_tests.rs new file mode 100644 index 0000000000..71d2a75fbd --- /dev/null +++ b/pallets/schemas/src/tests/migrations_tests.rs @@ -0,0 +1,52 @@ +use crate::{ + migration::v3, + pallet::SchemaNameToIds, + tests::mock::{ + create_bounded_schema_vec, new_test_ext, sudo_set_max_schema_size, test_public, + RuntimeOrigin, SchemasPallet, Test, + }, + SchemaName, +}; +use common_primitives::{node::AccountId, schema::*}; +use frame_support::{ + assert_ok, pallet_prelude::StorageVersion, traits::GetStorageVersion, BoundedVec, +}; + +#[test] +fn schemas_migration_to_v3_should_work_as_expected() { + new_test_ext().execute_with(|| { + // Arrange + sudo_set_max_schema_size(); + let sender: AccountId = test_public(5); + let schemas = vec![r#"{"latitude": 48.858093,"longitude": 2.294694}"#; 20]; + for fields in schemas.iter() { + assert_ok!(SchemasPallet::create_schema_v3( + RuntimeOrigin::signed(sender.clone()), + create_bounded_schema_vec(fields), + ModelType::AvroBinary, + PayloadLocation::OnChain, + BoundedVec::default(), + None, + )); + } + + // Act + let _ = v3::migrate_to_v3::(); + + // Assert + let current_version = SchemasPallet::current_storage_version(); + assert_eq!(current_version, StorageVersion::new(3)); + + let known_schemas = v3::get_known_schemas(); + let versions_count = SchemaNameToIds::::iter().count(); + assert_eq!(known_schemas.len(), versions_count); + + for (_, schema_name) in known_schemas.iter() { + let bounded_name = BoundedVec::try_from(schema_name.clone()).expect("should work"); + let parsed_name = + SchemaName::try_parse::(bounded_name, true).expect("should parse"); + let val = SchemaNameToIds::::get(&parsed_name.namespace, &parsed_name.descriptor); + assert_eq!(val.ids.len(), 1usize); + } + }); +} diff --git a/pallets/schemas/src/tests/mock.rs b/pallets/schemas/src/tests/mock.rs index 918da81343..ef0df1513d 100644 --- a/pallets/schemas/src/tests/mock.rs +++ b/pallets/schemas/src/tests/mock.rs @@ -1,6 +1,9 @@ use frame_support::{ + assert_ok, + dispatch::RawOrigin, traits::{ConstU16, ConstU32, EitherOfDiverse}, weights::{Weight, WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial}, + BoundedVec, }; use frame_system::EnsureRoot; use parity_scale_codec::MaxEncodedLen; @@ -170,3 +173,22 @@ pub fn test_public(n: u8) -> AccountId32 { pub fn test_origin_signed(n: u8) -> RuntimeOrigin { RuntimeOrigin::signed(test_public(n)) } + +/// creates a bounded vec schema +pub fn create_bounded_schema_vec( + from_string: &str, +) -> BoundedVec::SchemaModelMaxBytesBoundedVecLimit> { + let fields_vec = Vec::from(from_string.as_bytes()); + BoundedVec::try_from(fields_vec).unwrap() +} + +/// sets max schema size +pub fn sudo_set_max_schema_size() { + assert_ok!(SchemasPallet::set_max_schema_model_bytes(RawOrigin::Root.into(), 70)); +} + +/// struct which encapsulates a test case and expected result +pub struct TestCase { + pub input: &'static str, + pub expected: T, +} diff --git a/pallets/schemas/src/tests/mod.rs b/pallets/schemas/src/tests/mod.rs index baaa2f1243..55e75503fb 100644 --- a/pallets/schemas/src/tests/mod.rs +++ b/pallets/schemas/src/tests/mod.rs @@ -1,3 +1,5 @@ +mod deprecated_tests; +mod migrations_tests; pub mod mock; mod other_tests; mod serde_tests; diff --git a/pallets/schemas/src/tests/other_tests.rs b/pallets/schemas/src/tests/other_tests.rs index 47c259eb13..faa3aa9aac 100644 --- a/pallets/schemas/src/tests/other_tests.rs +++ b/pallets/schemas/src/tests/other_tests.rs @@ -1,14 +1,3 @@ -use frame_support::{ - assert_noop, assert_ok, - dispatch::RawOrigin, - pallet_prelude::GetStorageVersion, - traits::{ChangeMembers, Hash, StorageVersion}, - BoundedVec, -}; -use serial_test::serial; -use sp_core::{crypto::AccountId32, Encode}; -use sp_weights::Weight; - use common_primitives::{ node::AccountId, parquet::{ @@ -18,289 +7,28 @@ use common_primitives::{ types::ParquetType, ParquetModel, }, - schema::{ModelType, PayloadLocation, SchemaId, SchemaSetting, SchemaSettings}, + schema::{ + ModelType, PayloadLocation, SchemaId, SchemaSetting, SchemaVersion, SchemaVersionResponse, + }, }; +use frame_support::{ + assert_noop, assert_ok, + dispatch::RawOrigin, + traits::{ChangeMembers, Hash}, + weights::Weight, + BoundedVec, +}; +use parity_scale_codec::Encode; +use serial_test::serial; use sp_runtime::DispatchError::BadOrigin; use crate::{ - migration::v2, - pallet::{SchemaInfos, SchemaPayloads}, - Config, Error, Event as AnnouncementEvent, + Error, Event as AnnouncementEvent, SchemaDescriptor, SchemaName, SchemaNamePayload, + SchemaNamespace, SchemaVersionId, MAX_NUMBER_OF_VERSIONS, }; use super::mock::*; -fn create_bounded_schema_vec( - from_string: &str, -) -> BoundedVec::SchemaModelMaxBytesBoundedVecLimit> { - let fields_vec = Vec::from(from_string.as_bytes()); - BoundedVec::try_from(fields_vec).unwrap() -} - -fn sudo_set_max_schema_size() { - assert_ok!(SchemasPallet::set_max_schema_model_bytes(RawOrigin::Root.into(), 70)); -} - -pub mod test {} - -struct TestCase { - schema: &'static str, - expected: T, -} - -/// Create and return a simple test AccountId32 constructed with the desired integer. -pub fn test_public(n: u8) -> AccountId32 { - AccountId32::new([n; 32]) -} - -#[test] -#[allow(deprecated)] -fn require_valid_schema_size_errors() { - new_test_ext().execute_with(|| { - sudo_set_max_schema_size(); - let test_cases: [TestCase<(Error, u8)>; 2] = [ - TestCase { - schema: r#"{"a":1}"#, - expected: (Error::::LessThanMinSchemaModelBytes, 3), - }, - TestCase { - schema: r#"{"id": "long", "title": "I am a very very very long schema", "properties": "just way too long to live a long life", "description": "Just a never ending stream of bytes that goes on for a minute too long"}"#, - expected: (Error::::ExceedsMaxSchemaModelBytes, 2), - }, - ]; - for tc in test_cases { - assert_noop!( - SchemasPallet::create_schema(RuntimeOrigin::signed(test_public(1)), create_bounded_schema_vec(tc.schema), ModelType::AvroBinary, PayloadLocation::OnChain), - tc.expected.0); - } - }) -} - -#[test] -fn create_schema_v2_requires_valid_schema_size() { - new_test_ext().execute_with(|| { - sudo_set_max_schema_size(); - let test_cases: [TestCase<(Error, u8)>; 2] = [ - TestCase { - schema: r#"{"a":1}"#, - expected: (Error::::LessThanMinSchemaModelBytes, 3), - }, - TestCase { - schema: r#"{"id": "long", "title": "I am a very very very long schema", "properties": "just way too long to live a long life", "description": "Just a never ending stream of bytes that goes on for a minute too long"}"#, - expected: (Error::::ExceedsMaxSchemaModelBytes, 2), - }, - ]; - for tc in test_cases { - assert_noop!( - SchemasPallet::create_schema_v2(RuntimeOrigin::signed(test_public(1)), create_bounded_schema_vec(tc.schema), ModelType::AvroBinary, PayloadLocation::OnChain, BoundedVec::default()), - tc.expected.0); - } - }) -} - -#[test] -fn create_schema_via_governance_happy_path() { - new_test_ext().execute_with(|| { - sudo_set_max_schema_size(); - let sender: AccountId = test_public(5); - assert_ok!(SchemasPallet::create_schema_via_governance( - RuntimeOrigin::from(pallet_collective::RawOrigin::Members(2, 3)), - sender, - create_bounded_schema_vec(r#"{"name": "Doe", "type": "lost"}"#), - ModelType::AvroBinary, - PayloadLocation::OnChain, - BoundedVec::default(), - )); - }) -} - -/// Test that a request to be a provider, makes the MSA a provider after the council approves it. -#[test] -fn propose_to_create_schema_happy_path() { - new_test_ext().execute_with(|| { - sudo_set_max_schema_size(); - - let test_model = r#"{"foo": "bar", "bar": "buzz"}"#; - let serialized_fields = Vec::from(test_model.as_bytes()); - // Propose a new schema - _ = SchemasPallet::propose_to_create_schema( - test_origin_signed(5), - create_bounded_schema_vec(test_model), - ModelType::AvroBinary, - PayloadLocation::OnChain, - BoundedVec::default(), - ); - - // Find the Proposed event and get it's hash and index so it can be voted on - let proposed_events: Vec<(u32, Hash)> = System::events() - .iter() - .filter_map(|event| match event.event { - RuntimeEvent::Council(pallet_collective::Event::Proposed { - account: _, - proposal_index, - proposal_hash, - threshold: _, - }) => Some((proposal_index, proposal_hash)), - _ => None, - }) - .collect(); - - assert_eq!(proposed_events.len(), 1); - - let proposal_index = proposed_events[0].0; - let proposal_hash = proposed_events[0].1; - let proposal = Council::proposal_of(proposal_hash).unwrap(); - let proposal_len: u32 = proposal.encoded_size() as u32; - - // Set up the council members - let council_member_1 = test_public(1); // Use ALICE as a council member - let council_member_2 = test_public(2); // Use BOB as a council member - let council_member_3 = test_public(3); // Use CHARLIE as a council member - - let incoming = vec![]; - let outgoing = vec![]; - Council::change_members( - &incoming, - &outgoing, - vec![council_member_1.clone(), council_member_2.clone(), council_member_3.clone()], - ); - - // Council member #1 votes AYE on the proposal - assert_ok!(Council::vote( - RuntimeOrigin::signed(council_member_1.clone()), - proposal_hash, - proposal_index, - true - )); - // Council member #2 votes AYE on the proposal - assert_ok!(Council::vote( - RuntimeOrigin::signed(council_member_2.clone()), - proposal_hash, - proposal_index, - true - )); - // Council member #3 votes NAY on the proposal - assert_ok!(Council::vote( - RuntimeOrigin::signed(council_member_3.clone()), - proposal_hash, - proposal_index, - false - )); - - // Find the Voted event and check if it passed - let voted_events: Vec<(bool, u32, u32)> = System::events() - .iter() - .filter_map(|event| match event.event { - RuntimeEvent::Council(pallet_collective::Event::Voted { - account: _, - proposal_hash: _, - voted, - yes, - no, - }) => Some((voted, yes, no)), - _ => None, - }) - .collect(); - - assert_eq!(voted_events.len(), 3); - assert_eq!(voted_events[1].1, 2); // There should be two AYE (out of three) votes to pass - - // Close the voting - assert_ok!(Council::close( - RuntimeOrigin::signed(test_public(5)), - proposal_hash, - proposal_index, - Weight::MAX, - proposal_len - )); - - // Find the Closed event and check if it passed - let closed_events: Vec<(u32, u32)> = System::events() - .iter() - .filter_map(|event| match event.event { - RuntimeEvent::Council(pallet_collective::Event::Closed { - proposal_hash: _, - yes, - no, - }) => Some((yes, no)), - _ => None, - }) - .collect(); - - assert_eq!(closed_events.len(), 1); - assert_eq!(closed_events[0].0, 2); // There should be two YES votes to pass - - // Find the SchemaCreated event and check if it passed - let schema_events: Vec = System::events() - .iter() - .filter_map(|event| match event.event { - RuntimeEvent::SchemasPallet(AnnouncementEvent::SchemaCreated { - key: _, - schema_id, - }) => Some(schema_id), - _ => None, - }) - .collect(); - - // Confirm that the schema was created - assert_eq!(schema_events.len(), 1); - - let last_schema_id = schema_events[0]; - let created_schema = SchemasPallet::get_schema_by_id(last_schema_id); - assert_eq!(created_schema.as_ref().is_some(), true); - assert_eq!(created_schema.as_ref().unwrap().clone().model, serialized_fields); - }) -} - -#[allow(deprecated)] -#[test] -fn create_schema_happy_path() { - new_test_ext().execute_with(|| { - sudo_set_max_schema_size(); - let sender: AccountId = test_public(1); - assert_ok!(SchemasPallet::create_schema( - RuntimeOrigin::signed(sender), - create_bounded_schema_vec(r#"{"name": "Doe", "type": "lost"}"#), - ModelType::AvroBinary, - PayloadLocation::OnChain, - )); - }) -} - -#[test] -fn create_schema_v2_happy_path() { - new_test_ext().execute_with(|| { - sudo_set_max_schema_size(); - let sender: AccountId = test_public(1); - assert_ok!(SchemasPallet::create_schema_v2( - RuntimeOrigin::signed(sender), - create_bounded_schema_vec(r#"{"name": "Doe", "type": "lost"}"#), - ModelType::AvroBinary, - PayloadLocation::OnChain, - BoundedVec::default() - )); - }) -} - -#[allow(deprecated)] -#[test] -fn create_schema_unhappy_path() { - new_test_ext().execute_with(|| { - sudo_set_max_schema_size(); - let sender: AccountId = test_public(1); - assert_noop!( - SchemasPallet::create_schema( - RuntimeOrigin::signed(sender), - // name key does not have a colon - create_bounded_schema_vec(r#"{"name", 54, "type": "none"}"#), - ModelType::AvroBinary, - PayloadLocation::OnChain, - ), - Error::::InvalidSchema - ); - }) -} - #[test] fn set_max_schema_size_works_if_root() { new_test_ext().execute_with(|| { @@ -336,135 +64,8 @@ fn set_max_schema_size_fails_if_larger_than_bound() { }) } -#[allow(deprecated)] #[test] -#[serial] -fn create_schema_id_deposits_events_and_increments_schema_id() { - new_test_ext().execute_with(|| { - sudo_set_max_schema_size(); - let sender: AccountId = test_public(1); - let mut last_schema_id: SchemaId = 0; - for fields in [ - r#"{"Name": "Bond", "Code": "007"}"#, - r#"{"type": "num","minimum": -90,"maximum": 90}"#, - r#"{"latitude": 48.858093,"longitude": 2.294694}"#, - ] { - let expected_schema_id = last_schema_id + 1; - assert_ok!(SchemasPallet::create_schema( - RuntimeOrigin::signed(sender.clone()), - create_bounded_schema_vec(fields), - ModelType::AvroBinary, - PayloadLocation::OnChain, - )); - System::assert_last_event( - AnnouncementEvent::SchemaCreated { - key: sender.clone(), - schema_id: expected_schema_id, - } - .into(), - ); - last_schema_id = expected_schema_id; - } - assert_ok!(SchemasPallet::create_schema( - RuntimeOrigin::signed(sender.clone()), - create_bounded_schema_vec(r#"{"account":3050}"#), - ModelType::AvroBinary, - PayloadLocation::OnChain, - )); - }) -} - -#[test] -#[serial] -fn create_schema_v2_id_deposits_events_and_increments_schema_id() { - new_test_ext().execute_with(|| { - sudo_set_max_schema_size(); - let sender: AccountId = test_public(1); - let mut last_schema_id: SchemaId = 0; - for fields in [ - r#"{"Name": "Bond", "Code": "007"}"#, - r#"{"type": "num","minimum": -90,"maximum": 90}"#, - r#"{"latitude": 48.858093,"longitude": 2.294694}"#, - ] { - let expected_schema_id = last_schema_id + 1; - assert_ok!(SchemasPallet::create_schema_v2( - RuntimeOrigin::signed(sender.clone()), - create_bounded_schema_vec(fields), - ModelType::AvroBinary, - PayloadLocation::OnChain, - BoundedVec::default() - )); - System::assert_last_event( - AnnouncementEvent::SchemaCreated { - key: sender.clone(), - schema_id: expected_schema_id, - } - .into(), - ); - last_schema_id = expected_schema_id; - } - assert_ok!(SchemasPallet::create_schema_v2( - RuntimeOrigin::signed(sender.clone()), - create_bounded_schema_vec(r#"{"account":3050}"#), - ModelType::AvroBinary, - PayloadLocation::OnChain, - BoundedVec::default() - )); - }) -} - -#[allow(deprecated)] -#[test] -fn get_existing_schema_by_id_should_return_schema() { - new_test_ext().execute_with(|| { - let sender: AccountId = test_public(1); - sudo_set_max_schema_size(); - // arrange - let test_str = r#"{"foo": "bar", "bar": "buzz"}"#; - let serialized_fields = Vec::from(test_str.as_bytes()); - assert_ok!(SchemasPallet::create_schema( - RuntimeOrigin::signed(sender), - create_bounded_schema_vec(test_str), - ModelType::AvroBinary, - PayloadLocation::OnChain, - )); - - // act - let res = SchemasPallet::get_schema_by_id(1); - - // assert - assert_eq!(res.as_ref().is_some(), true); - assert_eq!(res.as_ref().unwrap().clone().model, serialized_fields); - }) -} - -#[test] -fn get_existing_schema_by_id_should_return_schema_v2() { - new_test_ext().execute_with(|| { - let sender: AccountId = test_public(1); - sudo_set_max_schema_size(); - // arrange - let test_str = r#"{"foo": "bar", "bar": "buzz"}"#; - let serialized_fields = Vec::from(test_str.as_bytes()); - assert_ok!(SchemasPallet::create_schema_v2( - RuntimeOrigin::signed(sender), - create_bounded_schema_vec(test_str), - ModelType::AvroBinary, - PayloadLocation::OnChain, - BoundedVec::default() - )); - - // act - let res = SchemasPallet::get_schema_by_id(1); - - // assert - assert_eq!(res.as_ref().is_some(), true); - assert_eq!(res.as_ref().unwrap().clone().model, serialized_fields); - }) -} - -#[test] -fn get_non_existing_schema_by_id_should_return_none() { +fn get_non_existing_schema_by_id_should_return_none() { new_test_ext().execute_with(|| { // act let res = SchemasPallet::get_schema_by_id(1); @@ -632,145 +233,824 @@ fn dsnp_broadcast() { } #[test] -fn create_schema_with_settings_should_work() { +fn schema_name_try_parse_with_strict_invalid_names_should_fail() { new_test_ext().execute_with(|| { - sudo_set_max_schema_size(); + let test_cases = [ + TestCase { + input: r#"¥¤¤.©©©"#, expected: Error::::InvalidSchemaNameEncoding + }, + TestCase { + input: r#"1asbd.hgd"#, + expected: Error::::InvalidSchemaNameCharacters, + }, + TestCase { + input: r#"asbd.hg1d"#, + expected: Error::::InvalidSchemaNameCharacters, + }, + TestCase { + input: r#"asbd.hg@d"#, + expected: Error::::InvalidSchemaNameCharacters, + }, + TestCase { input: r#"asbd"#, expected: Error::::InvalidSchemaNameStructure }, + TestCase { + input: r#"asbd.sdhks.shd"#, + expected: Error::::InvalidSchemaNameStructure, + }, + TestCase { + input: r#"-asbdsdhks.shd"#, + expected: Error::::InvalidSchemaNameStructure, + }, + TestCase { + input: r#"asbdsdhks-.shd"#, + expected: Error::::InvalidSchemaNameStructure, + }, + TestCase { + input: r#"asbdsdhks.-shd"#, + expected: Error::::InvalidSchemaNameStructure, + }, + TestCase { + input: r#"asbdsdhks.shd-"#, + expected: Error::::InvalidSchemaNameStructure, + }, + TestCase { + input: r#"hjsagdhjsagjhgdshjagsadhjsaaaaa."#, + expected: Error::::InvalidSchemaNamespaceLength, + }, + TestCase { input: r#"a.sdhks"#, expected: Error::::InvalidSchemaNamespaceLength }, + TestCase { + input: r#"aa.sdhks"#, + expected: Error::::InvalidSchemaNamespaceLength, + }, + TestCase { input: r#".sdhks"#, expected: Error::::InvalidSchemaNamespaceLength }, + TestCase { input: r#"hjs."#, expected: Error::::InvalidSchemaDescriptorLength }, + ]; + for tc in test_cases { + let payload: SchemaNamePayload = + BoundedVec::try_from(tc.input.to_string().into_bytes()).expect("should convert"); + assert_noop!(SchemaName::try_parse::(payload, true), tc.expected); + } + }); +} - // arrange - let settings = vec![SchemaSetting::AppendOnly]; - let sender: AccountId = test_public(1); +#[test] +fn schema_name_try_parse_with_non_strict_invalid_names_should_fail() { + new_test_ext().execute_with(|| { + let test_cases = [ + TestCase { + input: r#"¥¤¤.©©©"#, expected: Error::::InvalidSchemaNameEncoding + }, + TestCase { input: r#"¥¤¤"#, expected: Error::::InvalidSchemaNameEncoding }, + TestCase { + input: r#"1asbd.hgd"#, + expected: Error::::InvalidSchemaNameCharacters, + }, + TestCase { input: r#"1asbd"#, expected: Error::::InvalidSchemaNameCharacters }, + TestCase { + input: r#"asbd.hg1d"#, + expected: Error::::InvalidSchemaNameCharacters, + }, + TestCase { + input: r#"asbd.hg@d"#, + expected: Error::::InvalidSchemaNameCharacters, + }, + TestCase { input: r#"hg@d"#, expected: Error::::InvalidSchemaNameCharacters }, + TestCase { + input: r#"asbd.sdhks.shd"#, + expected: Error::::InvalidSchemaNameStructure, + }, + TestCase { + input: r#"-asbdsdhks.shd"#, + expected: Error::::InvalidSchemaNameStructure, + }, + TestCase { + input: r#"asbdsdhks-.shd"#, + expected: Error::::InvalidSchemaNameStructure, + }, + TestCase { + input: r#"hjsagdhjsagjhgdshjagsadhjsaaaaa."#, + expected: Error::::InvalidSchemaNamespaceLength, + }, + TestCase { + input: r#"hjsagdhjsagjhgdshjagsadhjsaaaaa"#, + expected: Error::::InvalidSchemaNamespaceLength, + }, + TestCase { input: r#"a.sdhks"#, expected: Error::::InvalidSchemaNamespaceLength }, + TestCase { input: r#"a"#, expected: Error::::InvalidSchemaNamespaceLength }, + TestCase { + input: r#"aa.sdhks"#, + expected: Error::::InvalidSchemaNamespaceLength, + }, + TestCase { input: r#".sdhks"#, expected: Error::::InvalidSchemaNamespaceLength }, + TestCase { input: r#"aa"#, expected: Error::::InvalidSchemaNamespaceLength }, + TestCase { input: r#"hjs."#, expected: Error::::InvalidSchemaDescriptorLength }, + ]; + for tc in test_cases { + let payload: SchemaNamePayload = + BoundedVec::try_from(tc.input.to_string().into_bytes()).expect("should convert"); + assert_noop!(SchemaName::try_parse::(payload, false), tc.expected); + } + }); +} - // act and assert - assert_ok!(SchemasPallet::create_schema_via_governance( - RuntimeOrigin::from(pallet_collective::RawOrigin::Members(2, 3)), - sender, - create_bounded_schema_vec(r#"{"name":"John Doe"}"#), - ModelType::AvroBinary, - PayloadLocation::Itemized, - BoundedVec::try_from(settings.clone()).unwrap(), - )); +#[test] +fn schema_name_try_parse_with_strict_valid_names_should_succeed() { + new_test_ext().execute_with(|| { + let valid_names = vec!["Abc.a", "a-v.D-D", "aZxcvBnmkjhgfds.asdfghKkloiuyTre"]; + let parsed_names = vec![ + SchemaName { + namespace: SchemaNamespace::try_from("abc".to_string().into_bytes()).unwrap(), + descriptor: SchemaDescriptor::try_from("a".to_string().into_bytes()).unwrap(), + }, + SchemaName { + namespace: SchemaNamespace::try_from("a-v".to_string().into_bytes()).unwrap(), + descriptor: SchemaDescriptor::try_from("d-d".to_string().into_bytes()).unwrap(), + }, + SchemaName { + namespace: SchemaNamespace::try_from("azxcvbnmkjhgfds".to_string().into_bytes()) + .unwrap(), + descriptor: SchemaDescriptor::try_from("asdfghkkloiuytre".to_string().into_bytes()) + .unwrap(), + }, + ]; + for (name, result) in valid_names.iter().zip(parsed_names) { + let payload: SchemaNamePayload = + BoundedVec::try_from(name.to_string().into_bytes()).expect("should convert"); + assert_eq!(SchemaName::try_parse::(payload, true), Ok(result)); + } + }); +} - // assert - let res = SchemasPallet::get_schema_by_id(1); - assert_eq!(res.unwrap().settings, settings); - }) +#[test] +fn schema_name_try_parse_with_non_strict_valid_names_should_succeed() { + new_test_ext().execute_with(|| { + let valid_names = vec!["Abc", "a-v", "aZxcvBnmkjhgfds"]; + let parsed_names = vec![ + SchemaName { + namespace: SchemaNamespace::try_from("abc".to_string().into_bytes()).unwrap(), + descriptor: SchemaDescriptor::default(), + }, + SchemaName { + namespace: SchemaNamespace::try_from("a-v".to_string().into_bytes()).unwrap(), + descriptor: SchemaDescriptor::default(), + }, + SchemaName { + namespace: SchemaNamespace::try_from("azxcvbnmkjhgfds".to_string().into_bytes()) + .unwrap(), + descriptor: SchemaDescriptor::default(), + }, + ]; + for (name, result) in valid_names.iter().zip(parsed_names) { + let payload: SchemaNamePayload = + BoundedVec::try_from(name.to_string().into_bytes()).expect("should convert"); + assert_eq!(SchemaName::try_parse::(payload, false), Ok(result)); + } + }); } #[test] -fn create_schema_with_append_only_setting_and_non_itemized_should_fail() { +fn schema_name_get_combined_name_with_valid_names_should_succeed() { new_test_ext().execute_with(|| { - sudo_set_max_schema_size(); + let valid_names = vec!["Abc.a", "a-v.D-D", "aZxcvBnmkjhgfds.asdfghKkloiuyTre"]; + let results = vec!["abc.a", "a-v.d-d", "azxcvbnmkjhgfds.asdfghkkloiuytre"]; + for (name, result) in valid_names.iter().zip(results) { + let payload: SchemaNamePayload = + BoundedVec::try_from(name.to_string().into_bytes()).expect("should convert"); + let parsed = SchemaName::try_parse::(payload, true).expect("should work"); + assert_eq!(parsed.get_combined_name(), result.to_string().into_bytes()); + } + }); +} - // arrange - let settings = vec![SchemaSetting::AppendOnly]; - let sender: AccountId = test_public(1); - // act and assert - assert_noop!( - SchemasPallet::create_schema_via_governance( - RuntimeOrigin::from(pallet_collective::RawOrigin::Members(2, 3)), - sender.clone(), - create_bounded_schema_vec(r#"{"name":"John Doe"}"#), - ModelType::AvroBinary, - PayloadLocation::Paginated, - BoundedVec::try_from(settings.clone()).unwrap(), - ), - Error::::InvalidSetting +#[test] +fn schema_version_id_add_should_work() { + new_test_ext().execute_with(|| { + let mut val = SchemaVersionId::default(); + let schema_id_1: SchemaId = 55; + let schema_id_2: SchemaId = 200; + let schema_name = SchemaName { + namespace: SchemaNamespace::try_from("abc".to_string().into_bytes()).unwrap(), + descriptor: SchemaDescriptor::try_from("d-d".to_string().into_bytes()).unwrap(), + }; + assert_ok!(val.add::(schema_id_1)); + assert_ok!(val.add::(schema_id_2)); + + let response = val.convert_to_response(&schema_name); + assert_eq!( + response, + vec![ + SchemaVersionResponse { + schema_id: schema_id_1, + schema_version: 1, + schema_name: schema_name.clone().get_combined_name() + }, + SchemaVersionResponse { + schema_id: schema_id_2, + schema_version: 2, + schema_name: schema_name.get_combined_name() + }, + ] ); + }); +} - // act and assert - assert_noop!( - SchemasPallet::create_schema_via_governance( - RuntimeOrigin::from(pallet_collective::RawOrigin::Members(2, 3)), - sender.clone(), - create_bounded_schema_vec(r#"{"name":"John Doe"}"#), - ModelType::AvroBinary, - PayloadLocation::OnChain, - BoundedVec::try_from(settings.clone()).unwrap(), - ), - Error::::InvalidSetting - ); +#[test] +fn schema_version_id_add_with_duplicate_should_fail() { + new_test_ext().execute_with(|| { + let mut val = SchemaVersionId::default(); + let schema_id_1: SchemaId = 55; + + assert_ok!(val.add::(schema_id_1)); + assert_noop!(val.add::(schema_id_1), Error::::SchemaIdAlreadyExists); + }); +} + +#[test] +fn schema_version_id_add_with_max_len_should_fail() { + new_test_ext().execute_with(|| { + let mut val = SchemaVersionId::default(); + for i in 1..=MAX_NUMBER_OF_VERSIONS { + let res = val.add::(i as SchemaId); + assert_eq!(res, Ok(i as SchemaVersion)); + } assert_noop!( - SchemasPallet::create_schema_via_governance( - RuntimeOrigin::from(pallet_collective::RawOrigin::Members(2, 3)), - sender, - create_bounded_schema_vec(r#"{"name":"John Doe"}"#), - ModelType::AvroBinary, - PayloadLocation::IPFS, - BoundedVec::try_from(settings.clone()).unwrap(), - ), - Error::::InvalidSetting + val.add::((MAX_NUMBER_OF_VERSIONS + 1) as SchemaId), + Error::::ExceedsMaxNumberOfVersions ); - }) + }); } #[test] -fn schemas_migration_to_v2_should_work_as_expected() { +fn create_schema_v3_requires_valid_schema_size() { new_test_ext().execute_with(|| { - // Arrange sudo_set_max_schema_size(); - let sender: AccountId = test_public(5); - let schemas = vec![ - r#"{"Name": "Bond", "Code": "007"}"#, - r#"{"type": "num","minimum": -90,"maximum": 90}"#, - r#"{"latitude": 48.858093,"longitude": 2.294694}"#, + let test_cases: [TestCase<(Error, u8)>; 2] = [ + TestCase { + input: r#"{"a":1}"#, + expected: (Error::::LessThanMinSchemaModelBytes, 3), + }, + TestCase { + input: r#"{"id": "long", "title": "I am a very very very long schema", "properties": "just way too long to live a long life", "description": "Just a never ending stream of bytes that goes on for a minute too long"}"#, + expected: (Error::::ExceedsMaxSchemaModelBytes, 2), + }, ]; - for (idx, fields) in schemas.iter().enumerate() { - assert_ok!(SchemasPallet::create_schema_v2( - RuntimeOrigin::signed(sender.clone()), - create_bounded_schema_vec(fields), - ModelType::AvroBinary, - PayloadLocation::OnChain, - BoundedVec::default() - )); - v2::old::Schemas::::insert( - idx as u16 + 1, - v2::old::Schema { - model_type: ModelType::AvroBinary, - payload_location: PayloadLocation::OnChain, - settings: SchemaSettings::all_disabled(), - model: BoundedVec::try_from(fields.as_bytes().to_vec()) - .expect("should have value"), - }, - ); + for tc in test_cases { + assert_noop!( + SchemasPallet::create_schema_v3(RuntimeOrigin::signed(test_public(1)), create_bounded_schema_vec(tc.input), ModelType::AvroBinary, PayloadLocation::OnChain, BoundedVec::default(), None), + tc.expected.0); } - let old_schema_1 = v2::old::Schemas::::get(1u16).expect("should have value"); - let old_schema_2 = v2::old::Schemas::::get(2u16).expect("should have value"); - let old_schema_3 = v2::old::Schemas::::get(3u16).expect("should have value"); - - // Act - let _ = v2::migrate_to_v2::(); - - // Assert - let old_count = v2::old::Schemas::::iter().count(); - let new_info_count = SchemaInfos::::iter().count(); - let new_payload_count = SchemaPayloads::::iter().count(); - let current_version = SchemasPallet::current_storage_version(); - - assert_eq!(old_count, 0); - assert_eq!(new_info_count, schemas.len()); - assert_eq!(new_payload_count, schemas.len()); - assert_eq!(current_version, StorageVersion::new(2)); - - let schema_info_1 = SchemaInfos::::get(1).expect("should have value"); - let schema_payload_1 = SchemaPayloads::::get(1u16).expect("should have value"); - assert_eq!(schema_info_1.model_type, old_schema_1.model_type); - assert_eq!(schema_info_1.payload_location, old_schema_1.payload_location); - assert_eq!(schema_info_1.settings, old_schema_1.settings); - assert_eq!(schema_payload_1.into_inner(), old_schema_1.model.into_inner()); - - let schema_info_2 = SchemaInfos::::get(2).expect("should have value"); - let schema_payload_2 = SchemaPayloads::::get(2u16).expect("should have value"); - assert_eq!(schema_info_2.model_type, old_schema_2.model_type); - assert_eq!(schema_info_2.payload_location, old_schema_2.payload_location); - assert_eq!(schema_info_2.settings, old_schema_2.settings); - assert_eq!(schema_payload_2.into_inner(), old_schema_2.model.into_inner()); - - let schema_info_3 = SchemaInfos::::get(3).expect("should have value"); - let schema_payload_3 = SchemaPayloads::::get(3u16).expect("should have value"); - assert_eq!(schema_info_3.model_type, old_schema_3.model_type); - assert_eq!(schema_info_3.payload_location, old_schema_3.payload_location); - assert_eq!(schema_info_3.settings, old_schema_3.settings); - assert_eq!(schema_payload_3.into_inner(), old_schema_3.model.into_inner()); - }); + }) +} + +#[test] +fn create_schema_v3_happy_path() { + new_test_ext().execute_with(|| { + // arrange + sudo_set_max_schema_size(); + let sender: AccountId = test_public(1); + let name = "namespace.descriptor"; + let schema_name: SchemaNamePayload = + BoundedVec::try_from(name.to_string().into_bytes()).expect("should convert"); + + // act + assert_ok!(SchemasPallet::create_schema_v3( + RuntimeOrigin::signed(sender.clone()), + create_bounded_schema_vec(r#"{"name": "Doe", "type": "lost"}"#), + ModelType::AvroBinary, + PayloadLocation::OnChain, + BoundedVec::default(), + Some(schema_name.clone()), + )); + let res = SchemasPallet::get_schema_by_id(1); + let versions = SchemasPallet::get_schema_versions(schema_name.clone().into_inner()); + + // assert + System::assert_has_event( + AnnouncementEvent::SchemaCreated { key: sender, schema_id: 1 }.into(), + ); + System::assert_last_event( + AnnouncementEvent::SchemaNameCreated { + schema_id: 1, + name: name.to_string().into_bytes(), + } + .into(), + ); + assert_eq!(res.as_ref().is_some(), true); + assert_eq!( + versions, + Some(vec![SchemaVersionResponse { + schema_id: 1, + schema_name: schema_name.into_inner(), + schema_version: 1 + }]) + ); + }) +} + +#[test] +#[serial] +fn create_schema_v3_increments_schema_id_and_version_for_same_name() { + new_test_ext().execute_with(|| { + // arrange + sudo_set_max_schema_size(); + let sender: AccountId = test_public(1); + let name = "namespace.descriptor"; + let schema_name: SchemaNamePayload = + BoundedVec::try_from(name.to_string().into_bytes()).expect("should convert"); + let mut last_schema_id: SchemaId = 0; + + // act and assert + for fields in [ + r#"{"Name": "Bond", "Code": "007"}"#, + r#"{"type": "num","minimum": -90,"maximum": 90}"#, + r#"{"latitude": 48.858093,"longitude": 2.294694}"#, + ] { + let expected_schema_id = last_schema_id + 1; + assert_ok!(SchemasPallet::create_schema_v3( + RuntimeOrigin::signed(sender.clone()), + create_bounded_schema_vec(fields), + ModelType::AvroBinary, + PayloadLocation::OnChain, + BoundedVec::default(), + Some(schema_name.clone()), + )); + System::assert_has_event( + AnnouncementEvent::SchemaCreated { + key: sender.clone(), + schema_id: expected_schema_id, + } + .into(), + ); + System::assert_last_event( + AnnouncementEvent::SchemaNameCreated { + schema_id: expected_schema_id, + name: name.to_string().into_bytes(), + } + .into(), + ); + last_schema_id = expected_schema_id; + } + let versions = SchemasPallet::get_schema_versions(schema_name.clone().into_inner()); + assert_eq!( + versions, + Some(vec![ + SchemaVersionResponse { + schema_id: 1, + schema_name: schema_name.clone().into_inner(), + schema_version: 1 + }, + SchemaVersionResponse { + schema_id: 2, + schema_name: schema_name.clone().into_inner(), + schema_version: 2 + }, + SchemaVersionResponse { + schema_id: 3, + schema_name: schema_name.into_inner(), + schema_version: 3 + } + ]) + ); + }) +} + +#[test] +fn get_schema_versions_for_namespace_should_return_all_descriptors() { + new_test_ext().execute_with(|| { + // arrange + sudo_set_max_schema_size(); + let sender: AccountId = test_public(1); + let namespace = "namespace"; + let name_1 = format!("{}.alice", namespace); + let schema_name_1: SchemaNamePayload = + BoundedVec::try_from(name_1.to_string().into_bytes()).expect("should convert"); + let name_2 = format!("{}.bob", namespace); + let schema_name_2: SchemaNamePayload = + BoundedVec::try_from(name_2.to_string().into_bytes()).expect("should convert"); + assert_ok!(SchemasPallet::create_schema_v3( + RuntimeOrigin::signed(sender.clone()), + create_bounded_schema_vec(r#"{"name": "Doe", "type": "lost"}"#), + ModelType::AvroBinary, + PayloadLocation::OnChain, + BoundedVec::default(), + Some(schema_name_1.clone()), + )); + assert_ok!(SchemasPallet::create_schema_v3( + RuntimeOrigin::signed(sender.clone()), + create_bounded_schema_vec(r#"{"name": "Doe", "type": "lost"}"#), + ModelType::AvroBinary, + PayloadLocation::OnChain, + BoundedVec::default(), + Some(schema_name_2.clone()), + )); + + // act + let versions = SchemasPallet::get_schema_versions(String::from(namespace).into_bytes()); + + // assert + assert!(versions.is_some()); + + let mut inner = versions.clone().unwrap(); + inner.sort_by(|a, b| a.schema_name.cmp(&b.schema_name)); + assert_eq!( + versions, + Some(vec![ + SchemaVersionResponse { + schema_id: 1, + schema_name: schema_name_1.into_inner(), + schema_version: 1 + }, + SchemaVersionResponse { + schema_id: 2, + schema_name: schema_name_2.into_inner(), + schema_version: 1 + }, + ]) + ); + }) +} + +#[test] +fn create_schema_via_governance_v2_happy_path() { + new_test_ext().execute_with(|| { + // arrange + sudo_set_max_schema_size(); + let settings = vec![SchemaSetting::AppendOnly]; + let sender: AccountId = test_public(5); + let name = "namespace.descriptor"; + let schema_name: SchemaNamePayload = + BoundedVec::try_from(name.to_string().into_bytes()).expect("should convert"); + + // act + assert_ok!(SchemasPallet::create_schema_via_governance_v2( + RuntimeOrigin::from(pallet_collective::RawOrigin::Members(2, 3)), + sender, + create_bounded_schema_vec(r#"{"name": "Doe", "type": "lost"}"#), + ModelType::AvroBinary, + PayloadLocation::Itemized, + BoundedVec::try_from(settings.clone()).unwrap(), + Some(schema_name.clone()), + )); + + // assert + let res = SchemasPallet::get_schema_by_id(1); + let versions = SchemasPallet::get_schema_versions(schema_name.clone().into_inner()); + + assert_eq!(res.unwrap().settings, settings); + assert_eq!( + versions, + Some(vec![SchemaVersionResponse { + schema_id: 1, + schema_name: schema_name.into_inner(), + schema_version: 1 + }]) + ); + }) +} + +#[test] +fn create_schema_via_governance_v2_with_append_only_setting_and_non_itemized_should_fail() { + new_test_ext().execute_with(|| { + sudo_set_max_schema_size(); + + // arrange + let settings = vec![SchemaSetting::AppendOnly]; + let sender: AccountId = test_public(1); + // act and assert + assert_noop!( + SchemasPallet::create_schema_via_governance_v2( + RuntimeOrigin::from(pallet_collective::RawOrigin::Members(2, 3)), + sender.clone(), + create_bounded_schema_vec(r#"{"name":"John Doe"}"#), + ModelType::AvroBinary, + PayloadLocation::Paginated, + BoundedVec::try_from(settings.clone()).unwrap(), + None, + ), + Error::::InvalidSetting + ); + + // act and assert + assert_noop!( + SchemasPallet::create_schema_via_governance_v2( + RuntimeOrigin::from(pallet_collective::RawOrigin::Members(2, 3)), + sender.clone(), + create_bounded_schema_vec(r#"{"name":"John Doe"}"#), + ModelType::AvroBinary, + PayloadLocation::OnChain, + BoundedVec::try_from(settings.clone()).unwrap(), + None, + ), + Error::::InvalidSetting + ); + + assert_noop!( + SchemasPallet::create_schema_via_governance_v2( + RuntimeOrigin::from(pallet_collective::RawOrigin::Members(2, 3)), + sender, + create_bounded_schema_vec(r#"{"name":"John Doe"}"#), + ModelType::AvroBinary, + PayloadLocation::IPFS, + BoundedVec::try_from(settings.clone()).unwrap(), + None, + ), + Error::::InvalidSetting + ); + }) +} + +/// Test that a request to be a provider, makes the MSA a provider after the council approves it. +#[test] +fn propose_to_create_schema_v2_happy_path() { + new_test_ext().execute_with(|| { + sudo_set_max_schema_size(); + + let test_model = r#"{"foo": "bar", "bar": "buzz"}"#; + let serialized_fields = Vec::from(test_model.as_bytes()); + let schema_name = + SchemaNamePayload::try_from("namespace.descriptor".to_string().into_bytes()) + .expect("should work"); + // Propose a new schema + _ = SchemasPallet::propose_to_create_schema_v2( + test_origin_signed(5), + create_bounded_schema_vec(test_model), + ModelType::AvroBinary, + PayloadLocation::OnChain, + BoundedVec::default(), + Some(schema_name.clone()), + ); + + // Find the Proposed event and get it's hash and index so it can be voted on + let proposed_events: Vec<(u32, Hash)> = System::events() + .iter() + .filter_map(|event| match event.event { + RuntimeEvent::Council(pallet_collective::Event::Proposed { + account: _, + proposal_index, + proposal_hash, + threshold: _, + }) => Some((proposal_index, proposal_hash)), + _ => None, + }) + .collect(); + + assert_eq!(proposed_events.len(), 1); + + let proposal_index = proposed_events[0].0; + let proposal_hash = proposed_events[0].1; + let proposal = Council::proposal_of(proposal_hash).unwrap(); + let proposal_len: u32 = proposal.encoded_size() as u32; + + // Set up the council members + let council_member_1 = test_public(1); // Use ALICE as a council member + let council_member_2 = test_public(2); // Use BOB as a council member + let council_member_3 = test_public(3); // Use CHARLIE as a council member + + let incoming = vec![]; + let outgoing = vec![]; + Council::change_members( + &incoming, + &outgoing, + vec![council_member_1.clone(), council_member_2.clone(), council_member_3.clone()], + ); + + // Council member #1 votes AYE on the proposal + assert_ok!(Council::vote( + RuntimeOrigin::signed(council_member_1.clone()), + proposal_hash, + proposal_index, + true + )); + // Council member #2 votes AYE on the proposal + assert_ok!(Council::vote( + RuntimeOrigin::signed(council_member_2.clone()), + proposal_hash, + proposal_index, + true + )); + // Council member #3 votes NAY on the proposal + assert_ok!(Council::vote( + RuntimeOrigin::signed(council_member_3.clone()), + proposal_hash, + proposal_index, + false + )); + + // Find the Voted event and check if it passed + let voted_events: Vec<(bool, u32, u32)> = System::events() + .iter() + .filter_map(|event| match event.event { + RuntimeEvent::Council(pallet_collective::Event::Voted { + account: _, + proposal_hash: _, + voted, + yes, + no, + }) => Some((voted, yes, no)), + _ => None, + }) + .collect(); + + assert_eq!(voted_events.len(), 3); + assert_eq!(voted_events[1].1, 2); // There should be two AYE (out of three) votes to pass + + // Close the voting + assert_ok!(Council::close( + RuntimeOrigin::signed(test_public(5)), + proposal_hash, + proposal_index, + Weight::MAX, + proposal_len + )); + + // Find the Closed event and check if it passed + let closed_events: Vec<(u32, u32)> = System::events() + .iter() + .filter_map(|event| match event.event { + RuntimeEvent::Council(pallet_collective::Event::Closed { + proposal_hash: _, + yes, + no, + }) => Some((yes, no)), + _ => None, + }) + .collect(); + + assert_eq!(closed_events.len(), 1); + assert_eq!(closed_events[0].0, 2); // There should be two YES votes to pass + + // Find the SchemaCreated event and check if it passed + let schema_events: Vec = System::events() + .iter() + .filter_map(|event| match event.event { + RuntimeEvent::SchemasPallet(AnnouncementEvent::SchemaCreated { + key: _, + schema_id, + }) | + RuntimeEvent::SchemasPallet(AnnouncementEvent::SchemaNameCreated { + name: _, + schema_id, + }) => Some(schema_id), + _ => None, + }) + .collect(); + + // Confirm that the schema was created + assert_eq!(schema_events.len(), 2); + + let last_schema_id = schema_events[0]; + let created_schema = SchemasPallet::get_schema_by_id(last_schema_id); + assert_eq!(created_schema.as_ref().is_some(), true); + assert_eq!(created_schema.as_ref().unwrap().clone().model, serialized_fields); + }) +} + +#[test] +fn propose_to_create_schema_name_happy_path() { + new_test_ext().execute_with(|| { + sudo_set_max_schema_size(); + let schema_name = + SchemaNamePayload::try_from("namespace.descriptor".to_string().into_bytes()) + .expect("should work"); + let sender: AccountId = test_public(1); + + assert_ok!(SchemasPallet::create_schema_v3( + RuntimeOrigin::signed(sender.clone()), + create_bounded_schema_vec(r#"{"name": "Doe", "type": "lost"}"#), + ModelType::AvroBinary, + PayloadLocation::OnChain, + BoundedVec::default(), + None, + )); + // Propose a new schema + _ = SchemasPallet::propose_to_create_schema_name( + test_origin_signed(5), + 1, + schema_name.clone(), + ); + + // Find the Proposed event and get it's hash and index so it can be voted on + let proposed_events: Vec<(u32, Hash)> = System::events() + .iter() + .filter_map(|event| match event.event { + RuntimeEvent::Council(pallet_collective::Event::Proposed { + account: _, + proposal_index, + proposal_hash, + threshold: _, + }) => Some((proposal_index, proposal_hash)), + _ => None, + }) + .collect(); + + assert_eq!(proposed_events.len(), 1); + + let proposal_index = proposed_events[0].0; + let proposal_hash = proposed_events[0].1; + let proposal = Council::proposal_of(proposal_hash).unwrap(); + let proposal_len: u32 = proposal.encoded_size() as u32; + + // Set up the council members + let council_member_1 = test_public(1); // Use ALICE as a council member + let council_member_2 = test_public(2); // Use BOB as a council member + let council_member_3 = test_public(3); // Use CHARLIE as a council member + + let incoming = vec![]; + let outgoing = vec![]; + Council::change_members( + &incoming, + &outgoing, + vec![council_member_1.clone(), council_member_2.clone(), council_member_3.clone()], + ); + + // Council member #1 votes AYE on the proposal + assert_ok!(Council::vote( + RuntimeOrigin::signed(council_member_1.clone()), + proposal_hash, + proposal_index, + true + )); + // Council member #2 votes AYE on the proposal + assert_ok!(Council::vote( + RuntimeOrigin::signed(council_member_2.clone()), + proposal_hash, + proposal_index, + true + )); + // Council member #3 votes NAY on the proposal + assert_ok!(Council::vote( + RuntimeOrigin::signed(council_member_3.clone()), + proposal_hash, + proposal_index, + false + )); + + // Find the Voted event and check if it passed + let voted_events: Vec<(bool, u32, u32)> = System::events() + .iter() + .filter_map(|event| match event.event { + RuntimeEvent::Council(pallet_collective::Event::Voted { + account: _, + proposal_hash: _, + voted, + yes, + no, + }) => Some((voted, yes, no)), + _ => None, + }) + .collect(); + + assert_eq!(voted_events.len(), 3); + assert_eq!(voted_events[1].1, 2); // There should be two AYE (out of three) votes to pass + + // Close the voting + assert_ok!(Council::close( + RuntimeOrigin::signed(test_public(5)), + proposal_hash, + proposal_index, + Weight::MAX, + proposal_len + )); + + // Find the Closed event and check if it passed + let closed_events: Vec<(u32, u32)> = System::events() + .iter() + .filter_map(|event| match event.event { + RuntimeEvent::Council(pallet_collective::Event::Closed { + proposal_hash: _, + yes, + no, + }) => Some((yes, no)), + _ => None, + }) + .collect(); + + assert_eq!(closed_events.len(), 1); + assert_eq!(closed_events[0].0, 2); // There should be two YES votes to pass + + // Find the SchemaCreated event and check if it passed + let schema_events: Vec = System::events() + .iter() + .filter_map(|event| match event.event { + RuntimeEvent::SchemasPallet(AnnouncementEvent::SchemaNameCreated { + name: _, + schema_id, + }) => Some(schema_id), + _ => None, + }) + .collect(); + + // Confirm that the schema was created + assert_eq!(schema_events.len(), 1); + + let versions = SchemasPallet::get_schema_versions(schema_name.clone().into_inner()); + assert_eq!( + versions, + Some(vec![SchemaVersionResponse { + schema_id: 1, + schema_name: schema_name.into_inner(), + schema_version: 1 + }]) + ); + }) } diff --git a/pallets/schemas/src/types.rs b/pallets/schemas/src/types.rs index a9df238733..f58502317c 100644 --- a/pallets/schemas/src/types.rs +++ b/pallets/schemas/src/types.rs @@ -1,12 +1,42 @@ //! Types for the Schema Pallet -use common_primitives::schema::{ModelType, PayloadLocation, SchemaSettings}; -use frame_support::traits::StorageVersion; +use crate::{Config, Error}; +use common_primitives::schema::{ + ModelType, PayloadLocation, SchemaId, SchemaSettings, SchemaVersion, SchemaVersionResponse, +}; +use frame_support::{ensure, pallet_prelude::ConstU32, traits::StorageVersion, BoundedVec}; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; +use sp_runtime::DispatchError; use sp_std::fmt::Debug; +extern crate alloc; +use alloc::string::String; +use frame_support::traits::Len; +use sp_std::{vec, vec::*}; /// Current storage version of the schemas pallet. -pub const SCHEMA_STORAGE_VERSION: StorageVersion = StorageVersion::new(2); +pub const SCHEMA_STORAGE_VERSION: StorageVersion = StorageVersion::new(3); + +/// The maximum size of schema name including all parts +pub const SCHEMA_NAME_BYTES_MAX: u32 = 32; // Hard limit of 32 bytes +/// A schema name following following structure NAMESPACE.DESCRIPTOR +pub type SchemaNamePayload = BoundedVec>; +/// schema namespace type +pub type SchemaNamespace = BoundedVec>; +/// schema descriptor type +pub type SchemaDescriptor = BoundedVec>; +/// The minimum size of a namespace in schema +pub const NAMESPACE_MIN: u32 = 3; +/// The maximum size of a namespace in schema +pub const NAMESPACE_MAX: u32 = SCHEMA_NAME_BYTES_MAX - (DESCRIPTOR_MIN + 1); +/// The minimum size of a schema descriptor +pub const DESCRIPTOR_MIN: u32 = 1; +/// The maximum size of a schema descriptor +pub const DESCRIPTOR_MAX: u32 = SCHEMA_NAME_BYTES_MAX - (NAMESPACE_MIN + 1); +/// separator character +pub const SEPARATOR_CHAR: char = '.'; +/// maximum number of versions for a certain schema name +/// -1 is to avoid overflow when converting the (index + 1) to `SchemaVersion` in `SchemaVersionId` +pub const MAX_NUMBER_OF_VERSIONS: u32 = SchemaVersion::MAX as u32 - 1; #[derive(Clone, Encode, Decode, PartialEq, Debug, TypeInfo, Eq, MaxEncodedLen)] /// A structure defining a Schema information (excluding the payload) @@ -17,4 +47,129 @@ pub struct SchemaInfo { pub payload_location: PayloadLocation, /// additional control settings for the schema pub settings: SchemaSettings, + /// Defines if a schema has a name or not + pub has_name: bool, +} + +#[derive(Clone, Encode, Decode, PartialEq, Debug, TypeInfo, Eq, MaxEncodedLen)] +/// A structure defining name of a schema +pub struct SchemaName { + /// namespace or domain of the schema + pub namespace: SchemaNamespace, + /// name or descriptor of this schema + pub descriptor: SchemaDescriptor, +} + +#[derive(Clone, Encode, Decode, PartialEq, Debug, TypeInfo, Eq, MaxEncodedLen, Default)] +/// A structure defining name of a schema +pub struct SchemaVersionId { + /// the index of each item + 1 is considered as their version. + /// Ex: the schemaId located in `ids[2]` is for version number 3 + pub ids: BoundedVec>, +} + +impl SchemaName { + /// parses and verifies the request and returns the SchemaName type if successful + pub fn try_parse( + payload: SchemaNamePayload, + is_strict: bool, + ) -> Result { + // check if all ascii + let mut str = String::from_utf8(payload.into_inner()) + .map_err(|_| Error::::InvalidSchemaNameEncoding)?; + ensure!(str.is_ascii(), Error::::InvalidSchemaNameEncoding); + + // to canonical form + str = String::from(str.to_lowercase().trim()); + + // check if alphabetic or - or separator character + ensure!( + str.chars().all(|c| c.is_ascii_alphabetic() || c == '-' || c == SEPARATOR_CHAR), + Error::::InvalidSchemaNameCharacters + ); + + // split to namespace and descriptor + let chunks: Vec<_> = str.split(SEPARATOR_CHAR).collect(); + ensure!( + chunks.len() == 2 || (chunks.len() == 1 && !is_strict), + Error::::InvalidSchemaNameStructure + ); + + // check namespace + let namespace = BoundedVec::try_from(chunks[0].as_bytes().to_vec()) + .map_err(|_| Error::::InvalidSchemaNamespaceLength)?; + ensure!(NAMESPACE_MIN <= namespace.len() as u32, Error::::InvalidSchemaNamespaceLength); + // should not start or end with - + ensure!( + !(namespace.starts_with(b"-") || namespace.ends_with(b"-")), + Error::::InvalidSchemaNameStructure + ); + + // check descriptor + let descriptor = match chunks.len() == 2 { + true => { + let descriptor = BoundedVec::try_from(chunks[1].as_bytes().to_vec()) + .map_err(|_| Error::::InvalidSchemaDescriptorLength)?; + ensure!( + DESCRIPTOR_MIN <= descriptor.len() as u32, + Error::::InvalidSchemaDescriptorLength + ); + // should not start or end with - + ensure!( + !(descriptor.starts_with(b"-") || descriptor.ends_with(b"-")), + Error::::InvalidSchemaNameStructure + ); + descriptor + }, + false => BoundedVec::default(), + }; + + Ok(SchemaName { namespace, descriptor }) + } + + /// get the combined name namespace.descriptor + pub fn get_combined_name(&self) -> Vec { + vec![ + self.namespace.clone().into_inner(), + vec![SEPARATOR_CHAR as u8], + self.descriptor.clone().into_inner(), + ] + .concat() + } + + /// creates a new SchemaName using provided descriptor + pub fn new_with_descriptor(&self, descriptor: SchemaDescriptor) -> Self { + Self { namespace: self.namespace.clone(), descriptor } + } + + /// returns true if the descriptor exists + pub fn descriptor_exists(&self) -> bool { + self.descriptor.len() > 0 + } +} + +impl SchemaVersionId { + /// adds a new schema id and returns the version for that schema_id + pub fn add(&mut self, schema_id: SchemaId) -> Result { + let is_new = !self.ids.iter().any(|id| id == &schema_id); + ensure!(is_new, Error::::SchemaIdAlreadyExists); + self.ids + .try_push(schema_id) + .map_err(|_| Error::::ExceedsMaxNumberOfVersions)?; + let version = self.ids.len() as SchemaVersion; + Ok(version) + } + + /// convert into a response vector + pub fn convert_to_response(&self, schema_name: &SchemaName) -> Vec { + self.ids + .iter() + .enumerate() + .map(|(index, schema_id)| SchemaVersionResponse { + schema_name: schema_name.get_combined_name(), + schema_id: *schema_id, + schema_version: (index + 1) as SchemaVersion, + }) + .collect() + } } diff --git a/pallets/schemas/src/weights.rs b/pallets/schemas/src/weights.rs index 8db058944e..bd969e6849 100644 --- a/pallets/schemas/src/weights.rs +++ b/pallets/schemas/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for pallet_schemas //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-14, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-30, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `benchmark-runner-44wtw-sz2gt`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` +//! HOSTNAME: `benchmark-runner-44wtw-l2h9r`, CPU: `Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz` //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("frequency-bench"), DB CACHE: 1024 // Executed Command: @@ -53,7 +53,12 @@ pub trait WeightInfo { fn create_schema_via_governance(m: u32, ) -> Weight; fn propose_to_create_schema(m: u32, ) -> Weight; fn create_schema_v2(m: u32, ) -> Weight; + fn create_schema_v3(m: u32, ) -> Weight; fn set_max_schema_model_bytes() -> Weight; + fn create_schema_via_governance_v2(m: u32, ) -> Weight; + fn propose_to_create_schema_v2(m: u32, ) -> Weight; + fn propose_to_create_schema_name() -> Weight; + fn create_schema_name_via_governance() -> Weight; } /// Weights for pallet_schemas using the Substrate node and recommended hardware. @@ -64,7 +69,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Schemas::CurrentSchemaIdentifierMaximum` (r:1 w:1) /// Proof: `Schemas::CurrentSchemaIdentifierMaximum` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `Schemas::SchemaInfos` (r:0 w:1) - /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(15), added: 2490, mode: `MaxEncodedLen`) /// Storage: `Schemas::SchemaPayloads` (r:0 w:1) /// Proof: `Schemas::SchemaPayloads` (`max_values`: None, `max_size`: Some(65514), added: 67989, mode: `MaxEncodedLen`) /// The range of component `m` is `[16, 65499]`. @@ -72,10 +77,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `136` // Estimated: `2974` - // Minimum execution time: 15_561_000 picoseconds. - Weight::from_parts(15_783_000, 2974) - // Standard Error: 49 - .saturating_add(Weight::from_parts(34_744, 0).saturating_mul(m.into())) + // Minimum execution time: 15_644_000 picoseconds. + Weight::from_parts(15_766_000, 2974) + // Standard Error: 44 + .saturating_add(Weight::from_parts(33_034, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -84,7 +89,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Schemas::CurrentSchemaIdentifierMaximum` (r:1 w:1) /// Proof: `Schemas::CurrentSchemaIdentifierMaximum` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `Schemas::SchemaInfos` (r:0 w:1) - /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(15), added: 2490, mode: `MaxEncodedLen`) /// Storage: `Schemas::SchemaPayloads` (r:0 w:1) /// Proof: `Schemas::SchemaPayloads` (`max_values`: None, `max_size`: Some(65514), added: 67989, mode: `MaxEncodedLen`) /// The range of component `m` is `[16, 65499]`. @@ -92,10 +97,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `136` // Estimated: `2974` - // Minimum execution time: 15_899_000 picoseconds. - Weight::from_parts(16_004_000, 2974) - // Standard Error: 53 - .saturating_add(Weight::from_parts(34_628, 0).saturating_mul(m.into())) + // Minimum execution time: 15_531_000 picoseconds. + Weight::from_parts(15_680_000, 2974) + // Standard Error: 42 + .saturating_add(Weight::from_parts(32_973, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -114,10 +119,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `230` // Estimated: `5180` - // Minimum execution time: 21_420_000 picoseconds. - Weight::from_parts(10_115_148, 5180) + // Minimum execution time: 20_821_000 picoseconds. + Weight::from_parts(9_707_541, 5180) // Standard Error: 33 - .saturating_add(Weight::from_parts(3_133, 0).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(3_080, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -126,7 +131,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Schemas::CurrentSchemaIdentifierMaximum` (r:1 w:1) /// Proof: `Schemas::CurrentSchemaIdentifierMaximum` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `Schemas::SchemaInfos` (r:0 w:1) - /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(15), added: 2490, mode: `MaxEncodedLen`) /// Storage: `Schemas::SchemaPayloads` (r:0 w:1) /// Proof: `Schemas::SchemaPayloads` (`max_values`: None, `max_size`: Some(65514), added: 67989, mode: `MaxEncodedLen`) /// The range of component `m` is `[16, 65499]`. @@ -134,21 +139,121 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `136` // Estimated: `2974` - // Minimum execution time: 15_700_000 picoseconds. - Weight::from_parts(15_894_000, 2974) - // Standard Error: 46 - .saturating_add(Weight::from_parts(34_920, 0).saturating_mul(m.into())) + // Minimum execution time: 15_677_000 picoseconds. + Weight::from_parts(15_790_000, 2974) + // Standard Error: 42 + .saturating_add(Weight::from_parts(32_935, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } + /// Storage: `Schemas::GovernanceSchemaModelMaxBytes` (r:1 w:0) + /// Proof: `Schemas::GovernanceSchemaModelMaxBytes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Schemas::CurrentSchemaIdentifierMaximum` (r:1 w:1) + /// Proof: `Schemas::CurrentSchemaIdentifierMaximum` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaNameToIds` (r:1 w:1) + /// Proof: `Schemas::SchemaNameToIds` (`max_values`: None, `max_size`: Some(602), added: 3077, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaInfos` (r:0 w:1) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(15), added: 2490, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaPayloads` (r:0 w:1) + /// Proof: `Schemas::SchemaPayloads` (`max_values`: None, `max_size`: Some(65514), added: 67989, mode: `MaxEncodedLen`) + /// The range of component `m` is `[16, 65499]`. + fn create_schema_v3(m: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `136` + // Estimated: `5552` + // Minimum execution time: 24_859_000 picoseconds. + Weight::from_parts(6_368_803, 5552) + // Standard Error: 59 + .saturating_add(Weight::from_parts(33_540, 0).saturating_mul(m.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } /// Storage: `Schemas::GovernanceSchemaModelMaxBytes` (r:0 w:1) /// Proof: `Schemas::GovernanceSchemaModelMaxBytes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn set_max_schema_model_bytes() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_806_000 picoseconds. - Weight::from_parts(7_135_000, 0) + // Minimum execution time: 6_682_000 picoseconds. + Weight::from_parts(6_932_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Schemas::GovernanceSchemaModelMaxBytes` (r:1 w:0) + /// Proof: `Schemas::GovernanceSchemaModelMaxBytes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Schemas::CurrentSchemaIdentifierMaximum` (r:1 w:1) + /// Proof: `Schemas::CurrentSchemaIdentifierMaximum` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaNameToIds` (r:1 w:1) + /// Proof: `Schemas::SchemaNameToIds` (`max_values`: None, `max_size`: Some(602), added: 3077, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaInfos` (r:0 w:1) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(15), added: 2490, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaPayloads` (r:0 w:1) + /// Proof: `Schemas::SchemaPayloads` (`max_values`: None, `max_size`: Some(65514), added: 67989, mode: `MaxEncodedLen`) + /// The range of component `m` is `[16, 65499]`. + fn create_schema_via_governance_v2(m: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `136` + // Estimated: `5552` + // Minimum execution time: 25_098_000 picoseconds. + Weight::from_parts(6_990_638, 5552) + // Standard Error: 64 + .saturating_add(Weight::from_parts(33_985, 0).saturating_mul(m.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:1 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalCount` (r:1 w:1) + /// Proof: `Council::ProposalCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Voting` (r:0 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `m` is `[16, 65499]`. + fn propose_to_create_schema_v2(m: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `230` + // Estimated: `5180` + // Minimum execution time: 21_655_000 picoseconds. + Weight::from_parts(10_381_585, 5180) + // Standard Error: 32 + .saturating_add(Weight::from_parts(3_063, 0).saturating_mul(m.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + /// Storage: `Schemas::SchemaInfos` (r:1 w:0) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(15), added: 2490, mode: `MaxEncodedLen`) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:1 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalCount` (r:1 w:1) + /// Proof: `Council::ProposalCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Voting` (r:0 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn propose_to_create_schema_name() -> Weight { + // Proof Size summary in bytes: + // Measured: `433` + // Estimated: `5383` + // Minimum execution time: 27_584_000 picoseconds. + Weight::from_parts(28_187_000, 5383) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + /// Storage: `Schemas::SchemaInfos` (r:1 w:0) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(15), added: 2490, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaNameToIds` (r:1 w:1) + /// Proof: `Schemas::SchemaNameToIds` (`max_values`: None, `max_size`: Some(602), added: 3077, mode: `MaxEncodedLen`) + fn create_schema_name_via_governance() -> Weight { + // Proof Size summary in bytes: + // Measured: `203` + // Estimated: `5552` + // Minimum execution time: 17_093_000 picoseconds. + Weight::from_parts(17_513_000, 5552) + .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -160,7 +265,7 @@ impl WeightInfo for () { /// Storage: `Schemas::CurrentSchemaIdentifierMaximum` (r:1 w:1) /// Proof: `Schemas::CurrentSchemaIdentifierMaximum` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `Schemas::SchemaInfos` (r:0 w:1) - /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(15), added: 2490, mode: `MaxEncodedLen`) /// Storage: `Schemas::SchemaPayloads` (r:0 w:1) /// Proof: `Schemas::SchemaPayloads` (`max_values`: None, `max_size`: Some(65514), added: 67989, mode: `MaxEncodedLen`) /// The range of component `m` is `[16, 65499]`. @@ -168,10 +273,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `136` // Estimated: `2974` - // Minimum execution time: 15_561_000 picoseconds. - Weight::from_parts(15_783_000, 2974) - // Standard Error: 49 - .saturating_add(Weight::from_parts(34_744, 0).saturating_mul(m.into())) + // Minimum execution time: 15_644_000 picoseconds. + Weight::from_parts(15_766_000, 2974) + // Standard Error: 44 + .saturating_add(Weight::from_parts(33_034, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -180,7 +285,7 @@ impl WeightInfo for () { /// Storage: `Schemas::CurrentSchemaIdentifierMaximum` (r:1 w:1) /// Proof: `Schemas::CurrentSchemaIdentifierMaximum` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `Schemas::SchemaInfos` (r:0 w:1) - /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(15), added: 2490, mode: `MaxEncodedLen`) /// Storage: `Schemas::SchemaPayloads` (r:0 w:1) /// Proof: `Schemas::SchemaPayloads` (`max_values`: None, `max_size`: Some(65514), added: 67989, mode: `MaxEncodedLen`) /// The range of component `m` is `[16, 65499]`. @@ -188,10 +293,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `136` // Estimated: `2974` - // Minimum execution time: 15_899_000 picoseconds. - Weight::from_parts(16_004_000, 2974) - // Standard Error: 53 - .saturating_add(Weight::from_parts(34_628, 0).saturating_mul(m.into())) + // Minimum execution time: 15_531_000 picoseconds. + Weight::from_parts(15_680_000, 2974) + // Standard Error: 42 + .saturating_add(Weight::from_parts(32_973, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -210,10 +315,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `230` // Estimated: `5180` - // Minimum execution time: 21_420_000 picoseconds. - Weight::from_parts(10_115_148, 5180) + // Minimum execution time: 20_821_000 picoseconds. + Weight::from_parts(9_707_541, 5180) // Standard Error: 33 - .saturating_add(Weight::from_parts(3_133, 0).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(3_080, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -222,7 +327,7 @@ impl WeightInfo for () { /// Storage: `Schemas::CurrentSchemaIdentifierMaximum` (r:1 w:1) /// Proof: `Schemas::CurrentSchemaIdentifierMaximum` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// Storage: `Schemas::SchemaInfos` (r:0 w:1) - /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(15), added: 2490, mode: `MaxEncodedLen`) /// Storage: `Schemas::SchemaPayloads` (r:0 w:1) /// Proof: `Schemas::SchemaPayloads` (`max_values`: None, `max_size`: Some(65514), added: 67989, mode: `MaxEncodedLen`) /// The range of component `m` is `[16, 65499]`. @@ -230,21 +335,121 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `136` // Estimated: `2974` - // Minimum execution time: 15_700_000 picoseconds. - Weight::from_parts(15_894_000, 2974) - // Standard Error: 46 - .saturating_add(Weight::from_parts(34_920, 0).saturating_mul(m.into())) + // Minimum execution time: 15_677_000 picoseconds. + Weight::from_parts(15_790_000, 2974) + // Standard Error: 42 + .saturating_add(Weight::from_parts(32_935, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } + /// Storage: `Schemas::GovernanceSchemaModelMaxBytes` (r:1 w:0) + /// Proof: `Schemas::GovernanceSchemaModelMaxBytes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Schemas::CurrentSchemaIdentifierMaximum` (r:1 w:1) + /// Proof: `Schemas::CurrentSchemaIdentifierMaximum` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaNameToIds` (r:1 w:1) + /// Proof: `Schemas::SchemaNameToIds` (`max_values`: None, `max_size`: Some(602), added: 3077, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaInfos` (r:0 w:1) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(15), added: 2490, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaPayloads` (r:0 w:1) + /// Proof: `Schemas::SchemaPayloads` (`max_values`: None, `max_size`: Some(65514), added: 67989, mode: `MaxEncodedLen`) + /// The range of component `m` is `[16, 65499]`. + fn create_schema_v3(m: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `136` + // Estimated: `5552` + // Minimum execution time: 24_859_000 picoseconds. + Weight::from_parts(6_368_803, 5552) + // Standard Error: 59 + .saturating_add(Weight::from_parts(33_540, 0).saturating_mul(m.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } /// Storage: `Schemas::GovernanceSchemaModelMaxBytes` (r:0 w:1) /// Proof: `Schemas::GovernanceSchemaModelMaxBytes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn set_max_schema_model_bytes() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_806_000 picoseconds. - Weight::from_parts(7_135_000, 0) + // Minimum execution time: 6_682_000 picoseconds. + Weight::from_parts(6_932_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Schemas::GovernanceSchemaModelMaxBytes` (r:1 w:0) + /// Proof: `Schemas::GovernanceSchemaModelMaxBytes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Schemas::CurrentSchemaIdentifierMaximum` (r:1 w:1) + /// Proof: `Schemas::CurrentSchemaIdentifierMaximum` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaNameToIds` (r:1 w:1) + /// Proof: `Schemas::SchemaNameToIds` (`max_values`: None, `max_size`: Some(602), added: 3077, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaInfos` (r:0 w:1) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(15), added: 2490, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaPayloads` (r:0 w:1) + /// Proof: `Schemas::SchemaPayloads` (`max_values`: None, `max_size`: Some(65514), added: 67989, mode: `MaxEncodedLen`) + /// The range of component `m` is `[16, 65499]`. + fn create_schema_via_governance_v2(m: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `136` + // Estimated: `5552` + // Minimum execution time: 25_098_000 picoseconds. + Weight::from_parts(6_990_638, 5552) + // Standard Error: 64 + .saturating_add(Weight::from_parts(33_985, 0).saturating_mul(m.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:1 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalCount` (r:1 w:1) + /// Proof: `Council::ProposalCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Voting` (r:0 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `m` is `[16, 65499]`. + fn propose_to_create_schema_v2(m: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `230` + // Estimated: `5180` + // Minimum execution time: 21_655_000 picoseconds. + Weight::from_parts(10_381_585, 5180) + // Standard Error: 32 + .saturating_add(Weight::from_parts(3_063, 0).saturating_mul(m.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + /// Storage: `Schemas::SchemaInfos` (r:1 w:0) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(15), added: 2490, mode: `MaxEncodedLen`) + /// Storage: `Council::Members` (r:1 w:0) + /// Proof: `Council::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalOf` (r:1 w:1) + /// Proof: `Council::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Council::Proposals` (r:1 w:1) + /// Proof: `Council::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::ProposalCount` (r:1 w:1) + /// Proof: `Council::ProposalCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Council::Voting` (r:0 w:1) + /// Proof: `Council::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn propose_to_create_schema_name() -> Weight { + // Proof Size summary in bytes: + // Measured: `433` + // Estimated: `5383` + // Minimum execution time: 27_584_000 picoseconds. + Weight::from_parts(28_187_000, 5383) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + /// Storage: `Schemas::SchemaInfos` (r:1 w:0) + /// Proof: `Schemas::SchemaInfos` (`max_values`: None, `max_size`: Some(15), added: 2490, mode: `MaxEncodedLen`) + /// Storage: `Schemas::SchemaNameToIds` (r:1 w:1) + /// Proof: `Schemas::SchemaNameToIds` (`max_values`: None, `max_size`: Some(602), added: 3077, mode: `MaxEncodedLen`) + fn create_schema_name_via_governance() -> Weight { + // Proof Size summary in bytes: + // Measured: `203` + // Estimated: `5552` + // Minimum execution time: 17_093_000 picoseconds. + Weight::from_parts(17_513_000, 5552) + .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } diff --git a/runtime/frequency/src/lib.rs b/runtime/frequency/src/lib.rs index f553726c91..6e7497a2fc 100644 --- a/runtime/frequency/src/lib.rs +++ b/runtime/frequency/src/lib.rs @@ -25,13 +25,7 @@ use sp_version::NativeVersion; use sp_version::RuntimeVersion; use common_primitives::{ - handles::*, - messages::*, - msa::*, - node::*, - rpc::RpcEvent, - schema::{PayloadLocation, SchemaId, SchemaResponse}, - stateful_storage::*, + handles::*, messages::*, msa::*, node::*, rpc::RpcEvent, schema::*, stateful_storage::*, }; pub use common_runtime::{ @@ -139,6 +133,7 @@ impl Contains for BaseCallFilter { RuntimeCall::Msa(pallet_msa::Call::create_provider { .. }) => false, RuntimeCall::Schemas(pallet_schemas::Call::create_schema { .. }) => false, RuntimeCall::Schemas(pallet_schemas::Call::create_schema_v2 { .. }) => false, + RuntimeCall::Schemas(pallet_schemas::Call::create_schema_v3 { .. }) => false, // Everything else is allowed on Mainnet _ => true, } @@ -170,6 +165,7 @@ impl BaseCallFilter { RuntimeCall::Msa(pallet_msa::Call::create_provider { .. }) | RuntimeCall::Schemas(pallet_schemas::Call::create_schema { .. }) | RuntimeCall::Schemas(pallet_schemas::Call::create_schema_v2 { .. }) => false, + RuntimeCall::Schemas(pallet_schemas::Call::create_schema_v3 { .. }) => false, // Block `Pays::No` calls from utility batch _ if Self::is_pays_no_call(call) => false, @@ -222,8 +218,8 @@ pub type Executive = frame_executive::Executive< AllPalletsWithSystem, ( pallet_messages::migration::v2::MigrateToV2, - pallet_schemas::migration::v2::MigrateToV2, pallet_capacity::migration::v2::MigrateToV2, + pallet_schemas::migration::v3::MigrateToV3, ), >; @@ -262,7 +258,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("frequency"), impl_name: create_runtime_str!("frequency"), authoring_version: 1, - spec_version: 65, + spec_version: 66, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -276,7 +272,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("frequency-rococo"), impl_name: create_runtime_str!("frequency"), authoring_version: 1, - spec_version: 65, + spec_version: 66, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -1266,6 +1262,10 @@ impl_runtime_apis! { fn get_by_schema_id(schema_id: SchemaId) -> Option { Schemas::get_schema_by_id(schema_id) } + + fn get_schema_versions_by_name(schema_name: Vec) -> Option> { + Schemas::get_schema_versions(schema_name) + } } impl system_runtime_api::AdditionalRuntimeApi for Runtime { From c4258d1a276afaf0061781494eb8846c8fae2b3b Mon Sep 17 00:00:00 2001 From: Aramik Date: Tue, 5 Dec 2023 10:40:37 -0800 Subject: [PATCH 2/3] init: security md (#1797) # Goal The goal of this PR is to create a document to guide the public about how and what to report as a security vulnerability. Closes #559 # Discussions - Should we put anything about bounty program even though it's not ready? - How about any legal stuff? # Checklist - [x] Doc(s) updated - [x] PGP key generated --- README.md | 2 +- SECURITY.md | 91 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 92 insertions(+), 1 deletion(-) create mode 100644 SECURITY.md diff --git a/README.md b/README.md index 71ee26c963..3ebdf8cb27 100644 --- a/README.md +++ b/README.md @@ -334,7 +334,7 @@ Please check out [the information here](./CONTRIBUTING.md). # Security Issue Reporting Do you know of an on-chain vulnerability (or possible one) that can lead to economic loss, privacy loss, or instability of the network? -Please report it to [security@frequency.xyz](mailto:security@frequency.xyz) +Please report it by following the steps mentioned in [here](./SECURITY.md). # Additional Resources diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000..f64fd8a6a1 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,91 @@ +# Security Policy + +We appreciate the efforts of security researchers and the broader community in helping us maintain a +secure environment for our users. If you discover a security vulnerability, we kindly request that +you report it to us privately before disclosing it publicly. This allows us to address the issue +promptly and protect our users. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report the vulnerability via [https://github.com/LibertyDSNP/frequency/security/advisories/new](https://github.com/LibertyDSNP/frequency/security/advisories/new). + +Alternatively, you can send email to [security@frequency.xyz](mailto:security@frequency.xyz). If +possible, encrypt your message with our PGP key; you can download it from [OpenGPG key server](https://keys.openpgp.org/vks/v1/by-fingerprint/0E50AE7CFD8195999CF45370B766E94411B9B734) +or copy from below. + +You should receive a response within 48 hours. If for some reason you do not, please follow up via +email to ensure we received your original message. + +## Report details +Please include the requested information listed below (as much as you can provide) to help us better +understand the nature and scope of the possible issue: + +- Your name and contact information +- Description of the vulnerability +- Attack scenario (if any) +- Step-by-step instructions to reproduce the issue +- Any other details + +**Reminder: This process is only for security related vulnerabilities and if this is a generic issue +(no security implications) with Frequency or Polkadot-sdk or any other dependencies please open a +regular GitHub issue.** + + +## Plaintext PGP Key + +``` +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGVt5AsBEADN7Hu5MDzC+J2omxN2cJXiQlgtxyMet6eUcUvtvtmF8viVYLaG +Bf3pGFvsSX3oQEGVMj0Xwby9PdyA7y4eIgIZfAG6qBELpeOvum7LL2N6qbU0GPsY +8fl+aejxAPiWbg8jJRDVmIPi18FOV3xhU6D8VWSToYLEt/Gqw7hAAiW3pRNJIJX5 +0VpR7N6ZNZwY+yj/Dwx7g9YaWyVDiRYxhQ5zRK7OhdaH6BV6YWeY0QmTmaeJBFrb +WGBU7ub+/LmOGw7JG3Aqi3seyD39SpE3sae2rFpXrX4bNfkKi4nTTNm3P4+2Zr42 +uMKFlmCLiFna01/DD+yT6FOF4ovCyCQF+Vzezz593Phj8Cl1vNOFWLc8b77tNZjk +UnXGwdCXKC/FLWng0ASy4zHcJiPrC/i+8Yhr+o+jJIraG+yDaO1T1VWFOlFu7JBl +QZb+rSnnonfxzWSx7I6ug3nO4y0DKRgOt8tzKvMJPVkhLEIZibPzYMBM7hpycqfo +Cb4EvWUkEuK7VhSqZ56MsQ+ziF4VYDZn4t+7uB8WuJ9xo3Xd1R2Cl2gUTP7aEBCj +N6KGiddXbNFDN4UsVveKHrRwPHpjEiOy+e32CafoyG7+dJWX3klg/Os1hCkjw3hj +iOKBUUGdtELiMPqt84qmaGrQRaIy0p/2JEn7PSjJ2HAwzCL6CMwiMy1eAwARAQAB +tDVGcmVxdWVuY3kgU2VjdXJpdHkgUmVwb3J0aW5nIDxzZWN1cml0eUBmcmVxdWVu +Y3kueHl6PokCVAQTAQgAPhYhBA5Qrnz9gZWZnPRTcLdm6UQRubc0BQJlbeQLAhsD +BQkH0pYMBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJELdm6UQRubc0nmoP/2Dt +eDthwWTXVwoExxOJZ/035aq+wgIQ9Z04S2GyOgV8eNYIxSRiDfvhoyezJmtVsJNW +f4dyHYdhpC2yMlcT3jWFXJEQc4y72enhiyUzRHHrKuNLNioquV+3l614v5zhuROS +Yx/lUMHCgyg5viUe2EUyyhss/OkxLUrLT8S9Ggn4Aaf6eGUixag4DvLf7XWP8JhF +4trX5BSiCYwVhmKPu8HnxNa2TXBdprm/SnRTDmyYXsF/1MVbmaIaNzvTi0Tt+5iQ +tIjfxoCV5mX6eNPogQdkvpdBk2SSAO0CTLMP/qFVI2nsuTmyPuCO6dx+Sr8eaG2u +Aumk/tU2Rra5V9Mc0SSwYQj8nTMcgBh2PeIuVmOdznh18s/4/2Fp0teizjnqzbU0 +dreRNsqooVPqifumh2BoaNZcKOnuBp+/2xZid2B6VEjLFYHlrKmajQK8V1P0AHV+ +HFi6+z1Ahge6JCCl0MfhoO13wepmgBkyyhPnUR8pbSeUGqA9iYkJ8q+LeXztxiIO +rJ0f7Y0lpJBWkU6nu4LNdL8qOUDLZx2BKZZGjzemGV05vKh/aS2cv0Z6z/YWY97e +M0wGWp9LELxzGBpp0F9HkJ84rFEyW/fZ3f4yDSrwaRsb7eCrrWpBiqKsVj4vKfZz +XE5oWhk0eAFFMAcPCcRemlMTKEW1lt6kfF/8xLYBuQINBGVt5AsBEAC/mB66pftw +YCW4/PXAWI+TQk5/iR9DPX0RHNd8d7B41qo5KOnu8FGdGJuk60fR26C9qyMJOBxz +HhbEPlE5UHyaUn/QiDOyzB1eZmwSUH38Y5PTgXM1ZvB0taLjkCaNIe+AeEqMx6NT +xK989a3+1fiC1nro797urZ52JdzJvU1Krh6K8AsfEIfVqqmzSaNGee1jREL5HC3y +JpiN2h0/G0WX4s8mCOER5jOuu3vWa/qBV0qkwYBwgBV0n42DBE0/BHoQYXRNzpKV +unQ3wqqqln4XdyuQanfUEIGgZAgK0HYsO1/jG58yUhw8jw3VMjtqgzO9A0NA90RE +ZSimDD1RQuCJonDS2fZBewpDKgSNp9PqobkMEM60/uvrB7ZUGSAOkjnRLRbVGLyA +bHaMES0c6IP/FsRZUnJ9+0u7pp59Zvtqsn1pl46bN8s38EdZ/uyYN9P9C5cX/Tof +1fcPN6W45K1BVBvGhbRX6j/+J0CH0Ya9lGJmhVyb9My/YmusjjPRiEngyjcXN0lv +G9rrqFVwQLoGanF/YZE8VOPrC33NreocuF7ClK8Kkvwow71254inUYIw6VJVrwMW +97QYvDAJ8iRLh6fY2W1JaLNbJS89OcDfq8yaHhQlheT2moFONcx+IxrFQu/HEqGn +6DLCoYnNrHtVH2ZfRvE3T0dzJ/NtTr4QrQARAQABiQI8BBgBCAAmFiEEDlCufP2B +lZmc9FNwt2bpRBG5tzQFAmVt5AsCGwwFCQfSlgwACgkQt2bpRBG5tzQpWRAAirJZ +I1CQzk4+tYdFzPx9dWgvsO/J+y+tM3HJdeLKzBIeQGGJKvAmQC6RyMhhXwapznm8 +qS+KUj3/riBLuGRni1OJLABR0W+zilsVA9RMkAHdK8jGCCRjB7+HSAXKcN1k297j +mJwZstQuWlWOU315v2ebLSLW+SwBvHQVFnWRQZqu8oNm0uVWQBzZ0jAzoMXi/KDm +156L9CFowcHVwCHzkdgyQ6h5XxJfTwGlX5Kwed8SQD1eavRHiFstl6S4bqG9+xlB +YGFIKCMOSNO5DO+76NiqcgZb2huZ+9WHdcYB911j766uRHqPIAinWOARPYmtaLoT +kl96hi50EmtmFa0Roa5otbnw4TAXvlSsXXLqc30XoI3gKhnMLY846HVsU/PjCvgq +5WFDxT+fovLfd2IUfmSri8zGFdlAPSyRa1jPp9vtEeelKeMTTElRiz2e6LYMkhmF +YAfw4o1Gy+Io+Eu/Gu+CXfQuGRBU/sbIBwfe/mlk1e0NOO7u4sx5Q0QaRvl/XZ/P +oyjZstz48mfnFOTGbWRaX45rs/eBLYTggAFiNZXUrA0vH4iAxN1o+IsEzmnGFfRb +hVu9J6odTCsucHDKzqhPxbJdPP4rS9yKHwoYmilZq9NzwkX88raA7pWRX4tp6KPe +GK223m2Hg28/Y6p6JVrKZ/3w/Q8QFMkHFOeOSMs= +=oBQV +-----END PGP PUBLIC KEY BLOCK----- +``` From bd32d04a3a356d131318e2309fb0d9d294f8e8d5 Mon Sep 17 00:00:00 2001 From: Wil Wade Date: Tue, 5 Dec 2023 16:20:01 -0500 Subject: [PATCH 3/3] Ci Update: Use v2 runners and grcov (#1794) # Goal The goal of this PR is to get Code Coverage results reporting using the internal runners (faster, and more consistent results). Result: https://app.codecov.io/gh/LibertyDSNP/frequency/tree/ci-update%2Fcode-cov-grcov/ # Discussion - grcov reads the results a bit different than llvm-cov, but llvm cov was always running out of memory - Lots of exclusions to get better results, but most of them are the same as from llvm-cov --- .github/workflows/common/codecov/action.yml | 57 +++++++++++++++++---- .github/workflows/merge-pr.yml | 6 +-- .github/workflows/verify-pr-commit.yml | 6 +-- 3 files changed, 54 insertions(+), 15 deletions(-) diff --git a/.github/workflows/common/codecov/action.yml b/.github/workflows/common/codecov/action.yml index 0f978122dd..78ed8e2d50 100644 --- a/.github/workflows/common/codecov/action.yml +++ b/.github/workflows/common/codecov/action.yml @@ -1,20 +1,59 @@ name: Run Generation of Code Coverage -description: Runs cargo llvm-cov +description: Runs cargo grcov +inputs: + code-cov-token: + description: "codecov token" + required: true runs: using: "composite" steps: - - name: Install cargo-llvm-cov - uses: taiki-e/install-action@cargo-llvm-cov + - name: Install grcov + shell: bash + run: cargo +nightly-2023-07-13 install grcov + - name: Build + shell: bash # Limited to 12 threads max + run: cargo +nightly-2023-07-13 build -j 12 --features frequency-lint-check + env: + CARGO_INCREMENTAL: '0' + RUSTFLAGS: "-Zprofile -Ccodegen-units=1 -Copt-level=0 -Clink-dead-code -Coverflow-checks=off -Zpanic_abort_tests -Cpanic=abort" + RUSTDOCFLAGS: "-Cpanic=abort" + - name: Test + shell: bash # Limited to 12 threads max + run: cargo +nightly-2023-07-13 test -j 12 --features frequency-lint-check + env: + CARGO_INCREMENTAL: '0' + RUSTFLAGS: "-Zprofile -Ccodegen-units=1 -Copt-level=0 -Clink-dead-code -Coverflow-checks=off -Zpanic_abort_tests -Cpanic=abort" + RUSTDOCFLAGS: "-Cpanic=abort" + # There are a lot of things ignored here to make it all work + # See the grcov docs for more information + # excl rpc traits, and error enums + # Only one of excl start and stop are allowed. - name: Generate code coverage shell: bash run: | - cargo llvm-cov -v --no-fail-fast --workspace --lcov --output-path lcov.info \ - --ignore-filename-regex "^.*\/(node\/|runtime\/|mock\.rs|weights(\.rs)?|benchmarking\.rs|runtime-api/src/lib\.rs).*$" \ - --exclude "frequency,frequency-cli,frequency-runtime,frequency-service" \ - --features frequency-lint-check + grcov . -s . --binary-path ./target/debug/ -t lcov \ + --ignore-not-existing \ + --excl-start '(pub enum Error \{|#\[rpc\()' \ + --excl-stop '\s*}$' \ + --ignore "target/*" \ + --ignore "node/*" \ + --ignore "runtime/*" \ + --ignore "**/*weights.rs" \ + --ignore "**/benchmark*.rs" \ + --ignore "**/*tests.rs" \ + --ignore "**/tests/*.rs" \ + --ignore "**/*mock.rs" \ + --ignore "**/*runtime-api/src/lib.rs" \ + --ignore "*github.com*" \ + --ignore "*libcore*" \ + --ignore "*rustc*" \ + --ignore "*liballoc*" \ + --ignore "*cargo*" \ + -o ./target/debug/lcov.info - name: Upload to codecov.io - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4.0.0-beta.3 with: - files: lcov.info + token: ${{inputs.code-cov-token}} + files: ./target/debug/lcov.info fail_ci_if_error: false # optional (default = false) verbose: true # optional (default = false) diff --git a/.github/workflows/merge-pr.yml b/.github/workflows/merge-pr.yml index 85a8599bc5..fa88897c75 100644 --- a/.github/workflows/merge-pr.yml +++ b/.github/workflows/merge-pr.yml @@ -64,9 +64,7 @@ jobs: calc-code-coverage: name: Merge - Calculate Code Coverage - # This job currently fails on EKS runners and must be run on standalone until - # https://www.pivotaltracker.com/story/show/185045668 is resolved. - runs-on: [self-hosted, Linux, X64, build, v1] + runs-on: [self-hosted, Linux, X64, build, v2] container: ghcr.io/libertydsnp/frequency/ci-base-image:1.0.0 steps: - name: Check Out Repo @@ -74,6 +72,8 @@ jobs: - name: Generate and Upload Code Coverage id: codecov uses: ./.github/workflows/common/codecov + with: + code-cov-token: ${{ secrets.CODECOV_TOKEN }} publish-ci-base-image: needs: changes diff --git a/.github/workflows/verify-pr-commit.yml b/.github/workflows/verify-pr-commit.yml index 8f93a6e204..04ab64f376 100644 --- a/.github/workflows/verify-pr-commit.yml +++ b/.github/workflows/verify-pr-commit.yml @@ -263,9 +263,7 @@ jobs: needs: changes if: needs.changes.outputs.rust == 'true' name: Calculate Code Coverage - # This job currently fails on EKS runners and must be run on standalone until - # https://www.pivotaltracker.com/story/show/185045668 is resolved. - runs-on: [self-hosted, Linux, X64, build, v1] + runs-on: [self-hosted, Linux, X64, build, v2] container: ghcr.io/libertydsnp/frequency/ci-base-image:1.0.0 steps: - name: Check Out Repo @@ -273,6 +271,8 @@ jobs: - name: Generate and Upload Code Coverage id: codecov uses: ./.github/workflows/common/codecov + with: + code-cov-token: ${{ secrets.CODECOV_TOKEN }} # Workaround to handle skipped required check inside matrix # https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/defining-the-mergeability-of-pull-requests/troubleshooting-required-status-checks