Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: upgrade to polkadot 2409 #965

Draft
wants to merge 22 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4,116 changes: 1,949 additions & 2,167 deletions Cargo.lock

Large diffs are not rendered by default.

1,000 changes: 500 additions & 500 deletions Cargo.toml

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ format:
.PHONY: try-runtime
try-runtime:
$(cargo) build --release --features try-runtime
try-runtime --runtime ./target/release/wbuild/hydradx-runtime/hydradx_runtime.wasm on-runtime-upgrade --blocktime 12000 --checks all live --uri wss://archive.rpc.hydration.cloud
try-runtime --runtime ./target/release/wbuild/hydradx-runtime/hydradx_runtime.wasm on-runtime-upgrade --checks all live --uri wss://archive.rpc.hydration.cloud

.PHONY: build-docs
build-docs:
Expand Down
Binary file modified integration-tests/evm-snapshot/SNAPSHOT
Binary file not shown.
44 changes: 15 additions & 29 deletions integration-tests/src/contracts.rs
Original file line number Diff line number Diff line change
@@ -1,37 +1,22 @@
use crate::evm::dai_ethereum_address;
use crate::polkadot_test_net::Hydra;
use crate::polkadot_test_net::TestNet;
use crate::polkadot_test_net::ALICE;
use crate::polkadot_test_net::BOB;
use crate::polkadot_test_net::UNITS;
use crate::polkadot_test_net::WETH;
use crate::utils::contracts::deploy_contract;
use crate::utils::contracts::deploy_contract_code;
use crate::utils::contracts::get_contract_bytecode;
use fp_evm::ExitReason::Succeed;
use fp_evm::ExitSucceed::Stopped;
use fp_evm::FeeCalculator;
use crate::polkadot_test_net::{Hydra, TestNet, ALICE, BOB, UNITS, WETH};
use crate::utils::contracts::{deploy_contract, deploy_contract_code, get_contract_bytecode};
use fp_evm::{ExitReason::Succeed, ExitSucceed::Stopped, FeeCalculator};
use frame_support::assert_ok;
use hex_literal::hex;
use hydradx_runtime::evm::precompiles::handle::EvmDataWriter;
use hydradx_runtime::evm::precompiles::Bytes;
use hydradx_runtime::evm::Executor;
use hydradx_runtime::AccountId;
use hydradx_runtime::EVMAccounts;
use hydradx_runtime::Runtime;
use hydradx_runtime::RuntimeEvent;
use hydradx_runtime::System;
use hydradx_traits::evm::CallContext;
use hydradx_traits::evm::EvmAddress;
use hydradx_traits::evm::InspectEvmAccounts;
use hydradx_traits::evm::EVM;
use hydradx_runtime::{
evm::{
precompiles::{handle::EvmDataWriter, Bytes},
Executor,
},
AccountId, EVMAccounts, Runtime, RuntimeEvent, System,
};
use hydradx_traits::evm::{CallContext, EvmAddress, InspectEvmAccounts, EVM};
use num_enum::{IntoPrimitive, TryFromPrimitive};
use pretty_assertions::assert_eq;
use sp_core::H256;
use sp_core::{RuntimeDebug, U256};
use sp_core::{RuntimeDebug, H256, U256};
use test_utils::expect_events;
use xcm_emulator::Network;
use xcm_emulator::TestExt;
use xcm_emulator::{Network, TestExt};

pub fn deployer() -> EvmAddress {
EVMAccounts::evm_address(&Into::<AccountId>::into(ALICE))
Expand Down Expand Up @@ -107,7 +92,7 @@ fn contract_check_fails_on_precompile_without_code() {
TestNet::reset();
Hydra::execute_with(|| {
let checker = deploy_contract("ContractCheck", deployer());
pallet_evm::AccountCodes::<Runtime>::remove(dai_ethereum_address());
pallet_evm::Pallet::<Runtime>::remove_account(&dai_ethereum_address());
assert_eq!(is_contract(checker, dai_ethereum_address()), false);
});
}
Expand All @@ -117,6 +102,7 @@ fn contract_check_succeeds_on_precompile_with_invalid_code() {
TestNet::reset();
Hydra::execute_with(|| {
let checker = deploy_contract("ContractCheck", deployer());
// The code is invalid, but we intentionally set account codes of registered assets to 0.
pallet_evm::AccountCodes::<Runtime>::insert(dai_ethereum_address(), &hex!["00"][..]);
assert_eq!(is_contract(checker, dai_ethereum_address()), true);
});
Expand Down
2 changes: 1 addition & 1 deletion integration-tests/src/cross_chain_transfer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ fn rococo_should_receive_asset_when_sent_from_hydra() {
Rococo::execute_with(|| {
assert_eq!(
hydradx_runtime::Balances::free_balance(AccountId::from(BOB)),
2_999_989_698_923 // 3 * HDX - fee
2_999_989_440_633 // 3 * HDX - fee
);
});
}
Expand Down
1 change: 1 addition & 0 deletions integration-tests/src/liquidation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ use orml_traits::currency::MultiCurrency;
use sp_core::{H256, U256};
use sp_runtime::{traits::CheckedConversion, SaturatedConversion};

// ./target/release/scraper save-storage --pallet EVM AssetRegistry Timestamp Omnipool Tokens --uri wss://rpc.nice.hydration.cloud:443
const PATH_TO_SNAPSHOT: &str = "evm-snapshot/SNAPSHOT";

#[module_evm_utility_macro::generate_function_selector]
Expand Down
34 changes: 29 additions & 5 deletions integration-tests/src/polkadot_test_net.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
use frame_support::{
assert_ok,
sp_runtime::{
traits::{AccountIdConversion, Block as BlockT, Dispatchable, HashingFor},
traits::{AccountIdConversion, Block as BlockT, Dispatchable},
BuildStorage, FixedU128, Permill,
},
traits::{GetCallMetadata, OnInitialize},
Expand All @@ -18,7 +18,7 @@ use hex_literal::hex;
use hydradx_runtime::{evm::WETH_ASSET_LOCATION, Referrals, RuntimeOrigin};
pub use hydradx_traits::{evm::InspectEvmAccounts, registry::Mutate};
use pallet_referrals::{FeeDistribution, Level};
pub use polkadot_primitives::v7::{BlockNumber, MAX_CODE_SIZE, MAX_POV_SIZE};
pub use polkadot_primitives::v8::{BlockNumber, MAX_CODE_SIZE, MAX_POV_SIZE};
use polkadot_runtime_parachains::configuration::HostConfiguration;
use sp_consensus_beefy::ecdsa_crypto::AuthorityId as BeefyId;
use sp_core::storage::Storage;
Expand Down Expand Up @@ -282,10 +282,9 @@ pub mod rococo {
}
}

use sp_core::{Pair, Public};
use sp_core::{sr25519, Pair, Public};

use polkadot_primitives::{AssignmentId, ValidatorId};
use polkadot_service::chain_spec::get_authority_keys_from_seed_no_beefy;
use sc_consensus_grandpa::AuthorityId as GrandpaId;
use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId;
use sp_consensus_babe::AuthorityId as BabeId;
Expand Down Expand Up @@ -340,6 +339,28 @@ pub mod rococo {
}
}

pub fn get_authority_keys_from_seed_no_beefy(
seed: &str,
) -> (
AccountId,
AccountId,
BabeId,
GrandpaId,
ValidatorId,
AssignmentId,
AuthorityDiscoveryId,
) {
(
get_account_id_from_seed::<sr25519::Public>(&format!("{}//stash", seed)),
get_account_id_from_seed::<sr25519::Public>(seed),
get_from_seed::<BabeId>(seed),
get_from_seed::<GrandpaId>(seed),
get_from_seed::<ValidatorId>(seed),
get_from_seed::<AssignmentId>(seed),
get_from_seed::<AuthorityDiscoveryId>(seed),
)
}

pub fn genesis() -> Storage {
let genesis_config = rococo_runtime::RuntimeGenesisConfig {
balances: rococo_runtime::BalancesConfig {
Expand All @@ -366,6 +387,7 @@ pub mod rococo {
)
})
.collect::<Vec<_>>(),
non_authority_keys: Default::default(),
},
configuration: rococo_runtime::ConfigurationConfig {
config: get_host_configuration(),
Expand Down Expand Up @@ -468,6 +490,7 @@ pub mod hydra {
)
})
.collect(),
non_authority_keys: Default::default(),
},
asset_registry: hydradx_runtime::AssetRegistryConfig {
registered_assets: vec![
Expand Down Expand Up @@ -632,6 +655,7 @@ pub mod para {
)
})
.collect(),
non_authority_keys: Default::default(),
},
parachain_info: hydradx_runtime::ParachainInfoConfig {
parachain_id: para_id.into(),
Expand Down Expand Up @@ -765,7 +789,7 @@ pub fn rococo_run_to_block(to: BlockNumber) {

pub fn hydra_live_ext(
path_to_snapshot: &str,
) -> frame_remote_externalities::RemoteExternalities<HashingFor<hydradx_runtime::Block>> {
) -> frame_remote_externalities::RemoteExternalities<hydradx_runtime::Block> {
let ext = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
Expand Down
2 changes: 1 addition & 1 deletion math/src/ema/math.rs
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ pub fn exp_smoothing(smoothing: Fraction, iterations: u32) -> Fraction {
/// Possible alternatives for `alpha = 2 / (period + 1)`:
/// + `alpha = 1 - 0.5^(1 / period)` for a half-life of `period` or
/// + `alpha = 1 - 0.5^(2 / period)` to have the same median as a `period`-length SMA.
/// See https://en.wikipedia.org/wiki/Moving_average#Relationship_between_SMA_and_EMA
/// See https://en.wikipedia.org/wiki/Moving_average#Relationship_between_SMA_and_EMA
///
/// Note: Not used in the pallet except to check configured values. Not meant to be used by code
/// interacting with the pallet. Use the configured values.
Expand Down
3 changes: 2 additions & 1 deletion math/src/ema/tests/high_precision.rs
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,8 @@ fn precise_price_ema_works() {
let smoothing = fraction::frac(1, 4);
let expected = ((Rational::from(history[0]) * 3 / 4 + Rational::from(history[1]) / 4) * 3 / 4
+ Rational::from(history[2]) / 4)
* 3 / 4 + Rational::from(history[3]) / 4;
* 3 / 4
+ Rational::from(history[3]) / 4;
let naive_ema = naive_precise_price_ema(history.clone(), fraction_to_high_precision(smoothing));
assert_eq!(expected, naive_ema);
let history = history.into_iter().map(|p| (p, 1)).collect();
Expand Down
2 changes: 1 addition & 1 deletion math/src/liquidity_mining/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -917,7 +917,7 @@ fn calculate_global_farm_rewards_should_work() {
(
FixedU128::from(1), //max value for yield_per_period
100_000_000_000_000_000_000_000_000_000_u128,
u128::max_value() / 1_000_000,
u128::MAX / 1_000_000,
FixedU128::from(3),
1_000_000,
300_000_000_000_000_000_000_000_000_000_000_000_u128,
Expand Down
6 changes: 2 additions & 4 deletions node/src/chain_spec/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,7 @@ pub mod staging;
use cumulus_primitives_core::ParaId;
use hex_literal::hex;
use hydradx_runtime::{
pallet_claims::EthereumAddress, AccountId, AuraId, Balance, DusterConfig, RegistryStrLimit, RuntimeGenesisConfig,
Signature, WASM_BINARY,
pallet_claims::EthereumAddress, AccountId, AuraId, Balance, DusterConfig, RegistryStrLimit, Signature, WASM_BINARY,
};
use primitives::{
constants::currency::{NATIVE_EXISTENTIAL_DEPOSIT, UNITS},
Expand All @@ -53,7 +52,6 @@ const STASH: Balance = 100 * UNITS;

/// The extensions for the [`ChainSpec`].
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, ChainSpecExtension, ChainSpecGroup)]
#[serde(deny_unknown_fields)]
pub struct Extensions {
/// The relay chain of the Parachain.
pub relay_chain: String,
Expand All @@ -71,7 +69,7 @@ impl Extensions {
}

/// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type.
pub type ChainSpec = sc_service::GenericChainSpec<RuntimeGenesisConfig, Extensions>;
pub type ChainSpec = sc_service::GenericChainSpec<Extensions>;

/// Generate a crypto pair from seed.
pub fn get_from_seed<TPublic: Public>(seed: &str) -> <TPublic::Pair as Pair>::Public {
Expand Down
12 changes: 3 additions & 9 deletions node/src/command.rs
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ pub fn run() -> sc_cli::Result<()> {
let id = ParaId::from(para_id);

let parachain_account =
AccountIdConversion::<polkadot_primitives::v7::AccountId>::into_account_truncating(&id);
AccountIdConversion::<polkadot_primitives::v8::AccountId>::into_account_truncating(&id);

let state_version = Cli::runtime_version().state_version();

Expand Down Expand Up @@ -368,15 +368,9 @@ impl CliConfiguration<Self> for RelayChainCli {
self.base.base.prometheus_config(default_listen_port, chain_spec)
}

fn init<F>(
&self,
_support_url: &String,
_impl_version: &String,
_logger_hook: F,
_config: &sc_service::Configuration,
) -> Result<()>
fn init<F>(&self, _support_url: &String, _impl_version: &String, _logger_hook: F) -> Result<()>
where
F: FnOnce(&mut sc_cli::LoggerBuilder, &sc_service::Configuration),
F: FnOnce(&mut sc_cli::LoggerBuilder),
{
unreachable!("PolkadotCli is never initialized; qed");
}
Expand Down
14 changes: 3 additions & 11 deletions node/src/rpc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ use sc_client_api::{
use sc_network::service::traits::NetworkService;
use sc_network_sync::SyncingService;
use sc_rpc::SubscriptionTaskExecutor;
pub use sc_rpc_api::DenyUnsafe;
use sc_transaction_pool::{ChainApi, Pool};
use sc_transaction_pool_api::TransactionPool;
use sp_api::{CallApiAt, ProvideRuntimeApi};
Expand All @@ -61,8 +60,6 @@ pub struct FullDeps<C, P, B> {
pub client: Arc<C>,
/// Transaction pool instance.
pub pool: Arc<P>,
/// Whether to deny unsafe calls
pub deny_unsafe: DenyUnsafe,
/// Backend used by the node.
pub backend: Arc<B>,
}
Expand Down Expand Up @@ -125,16 +122,11 @@ where
use substrate_state_trie_migration_rpc::{StateMigration, StateMigrationApiServer};

let mut module = RpcExtension::new(());
let FullDeps {
client,
pool,
deny_unsafe,
backend,
} = deps;
let FullDeps { client, pool, backend } = deps;

module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?;
module.merge(System::new(client.clone(), pool).into_rpc())?;
module.merge(TransactionPayment::new(client.clone()).into_rpc())?;
module.merge(StateMigration::new(client, backend, deny_unsafe).into_rpc())?;
module.merge(StateMigration::new(client, backend).into_rpc())?;

Ok(module)
}
Expand Down
19 changes: 9 additions & 10 deletions node/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ use sc_client_api::Backend;
use sc_consensus::ImportQueue;
use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY};
use sc_network::NetworkBlock;
use sc_network_sync::SyncingService;
use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager};
use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle};
use sc_transaction_pool_api::OffchainTransactionPoolFactory;
Expand Down Expand Up @@ -102,17 +101,18 @@ pub fn new_partial(
.transpose()?;

let heap_pages = config
.executor
.default_heap_pages
.map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static {
extra_pages: h as _,
});

let executor = WasmExecutor::builder()
.with_execution_method(config.wasm_method)
.with_execution_method(config.executor.wasm_method)
.with_onchain_heap_alloc_strategy(heap_pages)
.with_offchain_heap_alloc_strategy(heap_pages)
.with_max_runtime_instances(config.max_runtime_instances)
.with_runtime_cache_size(config.runtime_cache_size)
.with_max_runtime_instances(config.executor.max_runtime_instances)
.with_runtime_cache_size(config.executor.runtime_cache_size)
.build();

let (client, backend, keystore_container, task_manager) =
Expand Down Expand Up @@ -202,8 +202,11 @@ async fn start_node_impl(
let params = new_partial(&parachain_config)?;
let (block_import, mut telemetry, telemetry_worker_handle, frontier_backend, filter_pool, fee_history_cache) =
params.other;

let prometheus_registry = parachain_config.prometheus_registry().cloned();
let net_config = sc_network::config::FullNetworkConfiguration::<_, _, sc_network::NetworkWorker<Block, Hash>>::new(
&parachain_config.network,
prometheus_registry.clone(),
);

let client = params.client.clone();
Expand Down Expand Up @@ -294,11 +297,10 @@ async fn start_node_impl(
let pubsub_notification_sinks = pubsub_notification_sinks.clone();
let backend = backend.clone();

Box::new(move |deny_unsafe, subscription_task_executor| {
Box::new(move |subscription_task_executor| {
let deps = crate::rpc::FullDeps {
client: client.clone(),
pool: transaction_pool.clone(),
deny_unsafe,
backend: backend.clone(),
};

Expand Down Expand Up @@ -396,7 +398,6 @@ async fn start_node_impl(
&task_manager,
relay_chain_interface.clone(),
transaction_pool,
sync_service.clone(),
params.keystore_container.keystore(),
relay_chain_slot_duration,
para_id,
Expand Down Expand Up @@ -448,7 +449,6 @@ fn start_consensus(
task_manager: &TaskManager,
relay_chain_interface: Arc<dyn RelayChainInterface>,
transaction_pool: Arc<sc_transaction_pool::FullPool<Block, ParachainClient>>,
sync_oracle: Arc<SyncingService<Block>>,
keystore: KeystorePtr,
relay_chain_slot_duration: Duration,
para_id: ParaId,
Expand Down Expand Up @@ -483,7 +483,6 @@ fn start_consensus(
block_import,
para_client: client,
relay_client: relay_chain_interface,
sync_oracle,
keystore,
collator_key,
para_id,
Expand All @@ -496,7 +495,7 @@ fn start_consensus(
collation_request_receiver: None,
};

let fut = basic_aura::run::<Block, sp_consensus_aura::sr25519::AuthorityPair, _, _, _, _, _, _, _>(params);
let fut = basic_aura::run::<Block, sp_consensus_aura::sr25519::AuthorityPair, _, _, _, _, _, _>(params);
task_manager.spawn_essential_handle().spawn("aura", None, fut);

Ok(())
Expand Down
Loading
Loading