Skip to content

Commit

Permalink
Merge branch 'main' into A0-4268
Browse files Browse the repository at this point in the history
  • Loading branch information
Marcin-Radecki authored May 14, 2024
2 parents 37c2c08 + 92ad99a commit 1786736
Show file tree
Hide file tree
Showing 20 changed files with 652 additions and 316 deletions.
3 changes: 3 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,8 @@ sc-executor = { git = "https://github.com/Cardinal-Cryptography/polkadot-sdk.git
sc-keystore = { git = "https://github.com/Cardinal-Cryptography/polkadot-sdk.git", branch = "aleph-v1.5.0" }
sc-network = { git = "https://github.com/Cardinal-Cryptography/polkadot-sdk.git", branch = "aleph-v1.5.0" }
sc-network-common = { git = "https://github.com/Cardinal-Cryptography/polkadot-sdk.git", branch = "aleph-v1.5.0" }
sc-network-light = { git = "https://github.com/Cardinal-Cryptography/polkadot-sdk.git", branch = "aleph-v1.5.0" }
sc-network-transactions = { git = "https://github.com/Cardinal-Cryptography/polkadot-sdk.git", branch = "aleph-v1.5.0" }
sc-network-sync = { git = "https://github.com/Cardinal-Cryptography/polkadot-sdk.git", branch = "aleph-v1.5.0" }
sc-rpc = { git = "https://github.com/Cardinal-Cryptography/polkadot-sdk.git", branch = "aleph-v1.5.0" }
sc-rpc-api = { git = "https://github.com/Cardinal-Cryptography/polkadot-sdk.git", branch = "aleph-v1.5.0" }
Expand Down
82 changes: 36 additions & 46 deletions bin/node/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,19 +6,19 @@ use std::{
};

use finality_aleph::{
run_validator_node, AlephBlockImport, AlephConfig, AllBlockMetrics, BlockImporter,
ChannelProvider, Justification, JustificationTranslator, MillisecsPerBlock, NetConfig,
RateLimiterConfig, RedirectingBlockImport, SessionPeriod, SubstrateChainStatus,
SyncNetworkService, SyncOracle, TracingBlockImport, ValidatorAddressCache,
build_network, run_validator_node, AlephBlockImport, AlephConfig, AllBlockMetrics,
BlockImporter, BuildNetworkOutput, ChannelProvider, Justification, JustificationTranslator,
MillisecsPerBlock, RateLimiterConfig, RedirectingBlockImport, SessionPeriod,
SubstrateChainStatus, SyncOracle, TracingBlockImport, ValidatorAddressCache,
};
use log::warn;
use primitives::{
fake_runtime_api::fake_runtime::RuntimeApi, AlephSessionApi, Block, DEFAULT_BACKUP_FOLDER,
MAX_BLOCK_SIZE,
};
use sc_basic_authorship::ProposerFactory;
use sc_client_api::{BlockBackend, HeaderBackend};
use sc_consensus::ImportQueue;
use sc_client_api::HeaderBackend;
use sc_consensus::{ImportQueue, Link};
use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams};
use sc_consensus_slots::BackoffAuthoringBlocksStrategy;
use sc_service::{error::Error as ServiceError, Configuration, TFullClient, TaskManager};
Expand Down Expand Up @@ -245,17 +245,9 @@ fn get_rate_limit_config(aleph_config: &AlephCli) -> RateLimiterConfig {
}
}

fn chain_prefix(config: &Configuration, client: &Arc<FullClient>) -> String {
let genesis_hash = client
.block_hash(0)
.ok()
.flatten()
.expect("we should have a hash");
match config.chain_spec.fork_id() {
Some(fork_id) => format!("/{genesis_hash}/{fork_id}"),
None => format!("/{genesis_hash}"),
}
}
struct NoopLink;

impl Link<Block> for NoopLink {}

/// Builds a new service for a full client.
pub fn new_authority(
Expand All @@ -272,7 +264,7 @@ pub fn new_authority(

let backoff_authoring_blocks = Some(LimitNonfinalized(aleph_config.max_nonfinalized_blocks()));
let prometheus_registry = config.prometheus_registry().cloned();
let (sync_oracle, _) = SyncOracle::new();
let (sync_oracle, major_sync) = SyncOracle::new();
let proposer_factory = get_proposer_factory(&service_components, &config);
let slot_duration = sc_consensus_aura::slot_duration(&*service_components.client)?;
let (block_import, block_rx) = RedirectingBlockImport::new(service_components.client.clone());
Expand Down Expand Up @@ -309,23 +301,25 @@ pub fn new_authority(

let import_queue_handle = BlockImporter::new(service_components.import_queue.service());

let NetConfig {
net_config,
let BuildNetworkOutput {
network,
authentication_network,
block_sync_network,
} = NetConfig::new(&config, &chain_prefix(&config, &service_components.client));
let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_network) =
sc_service::build_network(sc_service::BuildNetworkParams {
config: &config,
net_config,
client: service_components.client.clone(),
transaction_pool: service_components.transaction_pool.clone(),
spawn_handle: service_components.task_manager.spawn_handle(),
import_queue: service_components.import_queue,
block_announce_validator_builder: None,
warp_sync_params: None,
block_relay: None,
})?;
sync_service,
tx_handler_controller,
system_rpc_tx,
} = build_network(
&config.network,
config.protocol_id(),
service_components.client.clone(),
major_sync,
service_components.transaction_pool.clone(),
&service_components.task_manager.spawn_handle(),
config
.prometheus_config
.as_ref()
.map(|config| config.registry.clone()),
)?;

let chain_status = SubstrateChainStatus::new(service_components.backend.clone())
.map_err(|e| ServiceError::Other(format!("failed to set up chain status: {e}")))?;
Expand All @@ -352,9 +346,15 @@ pub fn new_authority(
})
};

let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams {
network: network.clone(),
sync_service: sync_network.clone(),
service_components.task_manager.spawn_handle().spawn(
"import-queue",
None,
service_components.import_queue.run(Box::new(NoopLink)),
);

sc_service::spawn_tasks(sc_service::SpawnTasksParams {
network,
sync_service,
client: service_components.client.clone(),
keystore: service_components.keystore_container.local_keystore(),
task_manager: &mut service_components.task_manager,
Expand All @@ -374,14 +374,6 @@ pub fn new_authority(

let rate_limiter_config = get_rate_limit_config(&aleph_config);

// Network event stream needs to be created before starting the network,
// otherwise some events might be missed.
let sync_network_service = SyncNetworkService::new(
network,
sync_network,
vec![authentication_network.name(), block_sync_network.name()],
);

let AlephRuntimeVars {
millisecs_per_block,
session_period,
Expand All @@ -390,7 +382,6 @@ pub fn new_authority(
let aleph_config = AlephConfig {
authentication_network,
block_sync_network,
sync_network_service,
client: service_components.client,
chain_status,
import_queue_handle,
Expand Down Expand Up @@ -418,6 +409,5 @@ pub fn new_authority(
.spawn_essential_handle()
.spawn_blocking("aleph", None, run_validator_node(aleph_config));

network_starter.start_network();
Ok(service_components.task_manager)
}
3 changes: 3 additions & 0 deletions finality-aleph/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,9 @@ sc-keystore = { workspace = true }
sc-network = { workspace = true }
sc-network-common = { workspace = true }
sc-network-sync = { workspace = true }
sc-network-light = { workspace = true }
sc-network-transactions = { workspace = true }
sc-rpc = { workspace = true }
sc-service = { workspace = true }
sc-telemetry = { workspace = true }
sc-transaction-pool = { workspace = true }
Expand Down
13 changes: 8 additions & 5 deletions finality-aleph/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@ use parity_scale_codec::{Decode, Encode, Output};
use primitives as aleph_primitives;
use primitives::{AuthorityId, Block as AlephBlock, BlockHash, BlockNumber};
use sc_client_api::{
Backend, BlockBackend, BlockchainEvents, Finalizer, LockImportRun, StorageProvider,
Backend, BlockBackend, BlockchainEvents, Finalizer, LockImportRun, ProofProvider,
StorageProvider,
};
use sc_consensus::BlockImport;
use sc_keystore::LocalKeystore;
Expand All @@ -42,7 +43,6 @@ use crate::{

mod abft;
mod aggregation;
mod base_protocol;
mod block;
mod compatibility;
mod crypto;
Expand Down Expand Up @@ -73,7 +73,7 @@ pub use crate::{
metrics::{AllBlockMetrics, DefaultClock, FinalityRateMetrics, TimingBlockMetrics},
network::{
address_cache::{ValidatorAddressCache, ValidatorAddressingInfo},
NetConfig, ProtocolNetwork, SubstratePeerId, SyncNetworkService,
build_network, BuildNetworkOutput, ProtocolNetwork, SubstratePeerId,
},
nodes::run_validator_node,
session::SessionPeriod,
Expand Down Expand Up @@ -199,6 +199,8 @@ pub trait ClientForAleph<B, BE>:
+ BlockchainEvents<B>
+ BlockBackend<B>
+ StorageProvider<B, BE>
+ ProofProvider<B>
+ 'static
where
BE: Backend<B>,
B: Block,
Expand All @@ -217,7 +219,9 @@ where
+ BlockchainEvents<B>
+ BlockImport<B, Error = sp_consensus::Error>
+ BlockBackend<B>
+ StorageProvider<B, BE>,
+ StorageProvider<B, BE>
+ ProofProvider<B>
+ 'static,
{
}

Expand Down Expand Up @@ -258,7 +262,6 @@ pub struct RateLimiterConfig {
pub struct AlephConfig<C, SC, T> {
pub authentication_network: ProtocolNetwork,
pub block_sync_network: ProtocolNetwork,
pub sync_network_service: SyncNetworkService<AlephBlock>,
pub client: Arc<C>,
pub chain_status: SubstrateChainStatus,
pub import_queue_handle: BlockImporter,
Expand Down
2 changes: 1 addition & 1 deletion finality-aleph/src/metrics/chain_state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ impl ChainStateMetrics {
reorgs: register(
Histogram::with_opts(
HistogramOpts::new("aleph_reorgs", "Number of reorgs by length")
.buckets(vec![1., 2., 3., 5., 10.]),
.buckets(vec![1., 2., 4., 9.]),
)?,
&registry,
)?,
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,6 @@ use sp_runtime::traits::{Block, Header, Saturating};

use crate::{BlockHash, BlockNumber};

#[derive(Clone, Debug)]
pub enum DisconnectError {
PeerWasNotConnected,
}

/// The role of the connected node.
#[derive(Clone, Copy, Debug)]
pub enum Role {
Expand Down Expand Up @@ -86,6 +81,22 @@ impl Display for ConnectError {
}
}

/// Problems when handling peer disconnecting.
#[derive(Clone, Debug)]
pub enum DisconnectError {
/// The peer was not connected as far as we know.
PeerWasNotConnected,
}

impl Display for DisconnectError {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
use DisconnectError::*;
match self {
PeerWasNotConnected => write!(f, "peer was not connected"),
}
}
}

struct ConnectionLimits {
num_full_in_peers: usize,
num_full_out_peers: usize,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
//TODO(A0-3750): This code should be used.
#![allow(dead_code)]
mod config;
mod handler;
mod service;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver};
use sp_runtime::traits::{Block, Header};

use crate::{
base_protocol::{handler::Handler, LOG_TARGET},
network::base_protocol::{handler::Handler, LOG_TARGET},
BlockHash, BlockNumber,
};

Expand Down Expand Up @@ -70,7 +70,7 @@ where
protocol_names: Vec<ProtocolName>,
network: Arc<NetworkService<B, BlockHash>>,
events_from_network: Box<dyn NotificationService>,
) -> (Self, SyncingService<B>) {
) -> (Self, Arc<SyncingService<B>>) {
let (commands_for_service, commands_from_user) =
tracing_unbounded("mpsc_base_protocol", 100_000);
(
Expand All @@ -81,12 +81,12 @@ where
commands_from_user,
events_from_network,
},
SyncingService::new(
Arc::new(SyncingService::new(
commands_for_service,
// We don't care about this one, so a dummy value.
Arc::new(AtomicUsize::new(0)),
major_sync,
),
)),
)
}

Expand Down Expand Up @@ -167,6 +167,9 @@ where
},
NotificationStreamClosed { peer } => {
trace!(target: LOG_TARGET, "Disconnect event for peer {:?}", peer);
if let Err(e) = self.handler.on_peer_disconnect(peer) {
warn!(target: LOG_TARGET, "Problem removing disconnecting peer: {e}.");
}
let addresses: Vec<_> = iter::once(peer).collect();
for name in &self.protocol_names {
if let Err(e) = self
Expand Down
Loading

0 comments on commit 1786736

Please sign in to comment.