Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test(storage provider pallet): add setup for multiple partitions #194

Merged
merged 17 commits into from
Aug 14, 2024
Merged
Show file tree
Hide file tree
Changes from 15 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
50 changes: 28 additions & 22 deletions pallets/storage-provider/src/deadline.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,11 +82,8 @@ where
{
/// Construct a new [`Deadline`] instance.
pub fn new() -> Self {
let mut partitions = BoundedBTreeMap::new();
// create 1 initial partition because deadlines are tied to partition so at least 1 partition should be initialized.
let _ = partitions.try_insert(0, Partition::new());
Self {
partitions,
partitions: BoundedBTreeMap::new(),
expirations_blocks: BoundedBTreeMap::new(),
partitions_posted: BoundedBTreeSet::new(),
early_terminations: BoundedBTreeSet::new(),
Expand Down Expand Up @@ -148,31 +145,40 @@ where
let mut partitions = core::mem::take(&mut self.partitions).into_inner();
let initial_partitions = partitions.len();

// Needs to start at 1 because if we take the length of `self.partitions`
// it will always be `MAX_PARTITIONS_PER_DEADLINE` because the partitions are pre-initialized.
let mut partition_idx = 0;
// We can always start at the last partition. That is because we know
// that partitions before the last one are full. We achieve that by
// filling a new partition only when the current one is full.
let mut partition_idx = initial_partitions.saturating_sub(1);
loop {
// Get/create partition to update.
// Get the partition to which we want to add sectors. If the
// partition does not exist, create a new one. The new partition is
// created when it's our first time adding sectors to it.
let partition = partitions
.entry(partition_idx as u32)
.or_insert(Partition::new());

// Figure out which (if any) sectors we want to add to this partition.
// Get the current partition's sector count. If the current
// partition is full, create a new one and start filling that one.
let sector_count = partition.sectors.len() as u64;
if sector_count >= partition_size {
partition_idx += 1;
continue;
}

// Calculate how many sectors we can add to current partition.
let size = cmp::min(partition_size - sector_count, sectors.len() as u64) as usize;

// Split the sectors into two parts: one to add to the current
// partition and the rest which will be added to the next one.
let (partition_new_sectors, sectors) = sectors.split_at(size);

// Extract the sector numbers from the new sectors.
let new_partition_sectors: Vec<SectorNumber> = partition_new_sectors
.into_iter()
.map(|sector| sector.sector_number)
.collect();

// Add sectors to partition.
// Add new sector numbers to the current partition.
partition
.add_sectors(&new_partition_sectors)
.map_err(|_| DeadlineError::CouldNotAddSectors)?;
Expand All @@ -184,10 +190,10 @@ where
.map(|s| (s.expiration, partition_idx as PartitionNumber)),
);

// No more sectors to add
if sectors.is_empty() {
break;
}
partition_idx += 1;
}

let partitions = BoundedBTreeMap::try_from(partitions).map_err(|_| {
Expand Down Expand Up @@ -222,20 +228,20 @@ where
partition_sectors: &mut PartitionMap,
fault_expiration_block: BlockNumber,
) -> Result<(), DeadlineError> {
for (partition_number, partition) in self.partitions.iter_mut() {
if !partition_sectors.0.contains_key(&partition_number) {
return Err(DeadlineError::PartitionNotFound);
}
for (partition_number, faulty_sectors) in partition_sectors.0.iter() {
let partition = self
.partitions
.get_mut(partition_number)
.ok_or(DeadlineError::PartitionNotFound)?;

cernicc marked this conversation as resolved.
Show resolved Hide resolved
partition.record_faults(
sectors,
partition_sectors
.0
.get(partition_number)
.expect("Infallible because of the above check"),
faulty_sectors,
).map_err(|e| {
log::error!(target: LOG_TARGET, "record_faults: Error while recording faults in a partition: {e:?}");
DeadlineError::PartitionError(e)
})?;

// Update expiration block
if let Some((&block, _)) = self
.expirations_blocks
Expand All @@ -244,9 +250,9 @@ where
{
self.expirations_blocks.remove(&block);
self.expirations_blocks.try_insert(fault_expiration_block, *partition_number).map_err(|_| {
log::error!(target: LOG_TARGET, "record_faults: Could not insert new expiration");
DeadlineError::FailedToUpdateFaultExpiration
})?;
log::error!(target: LOG_TARGET, "record_faults: Could not insert new expiration");
DeadlineError::FailedToUpdateFaultExpiration
})?;
} else {
log::debug!(target: LOG_TARGET, "record_faults: Inserting partition number {partition_number}");
self.expirations_blocks.try_insert(fault_expiration_block, *partition_number).map_err(|_| {
Expand Down
10 changes: 10 additions & 0 deletions pallets/storage-provider/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -485,21 +485,29 @@ pub mod pallet {
Error::<T>::InvalidProofType,
);

// Sector that will be activated and required to be periodically
// proven
let new_sector =
SectorOnChainInfo::from_pre_commit(precommit.info.clone(), current_block);

// Mutate the storage provider state
StorageProviders::<T>::try_mutate(&owner, |maybe_sp| -> DispatchResult {
let sp = maybe_sp
.as_mut()
.ok_or(Error::<T>::StorageProviderNotFound)?;

// Activate the sector
sp.activate_sector(sector_number, new_sector.clone())
.map_err(|e| Error::<T>::StorageProviderError(e))?;

// Sectors which will be assigned to the deadlines
let mut new_sectors = BoundedVec::new();
new_sectors
.try_push(new_sector)
.expect("Infallible since only 1 element is inserted");

// Assign sectors to deadlines which specify when sectors needs
// to be proven
sp.assign_sectors_to_deadlines(
current_block,
new_sectors,
Expand All @@ -513,6 +521,7 @@ pub mod pallet {
)
.map_err(|e| Error::<T>::StorageProviderError(e))?;

// Remove sector from the pre-committed map
sp.remove_pre_committed_sector(sector_number)
.map_err(|e| Error::<T>::StorageProviderError(e))?;
Ok(())
Expand Down Expand Up @@ -725,6 +734,7 @@ pub mod pallet {
owner,
recoveries: params.recoveries,
});

Ok(())
}
}
Expand Down
35 changes: 25 additions & 10 deletions pallets/storage-provider/src/sector_map.rs
jmg-duarte marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
Expand Up @@ -57,11 +57,16 @@ impl PartitionMap {
/// * If the partition did not exist, a new set of sectors will be created.
/// * If the bounds are broken (partitions or sectors), the operation _IS NOT_ a no-op
/// and returns an error.
/// * If no sectors are passed to be inserted, the operation returns an error and no changes are made.
pub fn try_insert_sectors(
&mut self,
partition: PartitionNumber,
sectors: BoundedBTreeSet<SectorNumber, ConstU32<MAX_TERMINATIONS_PER_CALL>>,
) -> Result<(), SectorMapError> {
if sectors.is_empty() {
return Err(SectorMapError::EmptySectors);
}

if let Some(s) = self.0.get_mut(&partition) {
// NOTE(@jmg-duarte,24/07/2024): to make the operation a no-op we need to merge both
// sets into a single one and replace the original one if the bounds weren't broken
Expand Down Expand Up @@ -147,12 +152,11 @@ impl DeadlineSectorMap {
None => {
// create new partition map entry
let mut p_map = PartitionMap::new();
p_map
.try_insert_sectors(partition, sectors)
.map_err(|_| SectorMapError::FailedToInsertSector)?;
p_map.try_insert_sectors(partition, sectors)?;

self.0
.try_insert(deadline_index, p_map)
.map_err(|_| SectorMapError::FailedToInsertSector)?;
.map_err(|_| SectorMapError::FailedToInsertPartition)?;
Ok(())
}
}
Expand All @@ -163,6 +167,10 @@ impl DeadlineSectorMap {
pub enum SectorMapError {
/// Emitted when trying to insert sector(s) fails.
FailedToInsertSector,
/// Emitted when trying to insert partition fails.
FailedToInsertPartition,
/// Emits when trying to insert an empty set of sectors.
EmptySectors,
}

#[cfg(test)]
Expand All @@ -174,6 +182,7 @@ mod test {
use sp_core::bounded_btree_map;

use super::*;
use crate::tests::create_set;

#[test]
fn partition_map_add_sectors() {
Expand Down Expand Up @@ -212,6 +221,18 @@ mod test {
expect_sectors_exact(&map, partition, &sectors);
}

#[test]
fn partition_map_fail_empty_sectors() {
let mut map = PartitionMap::new();

let partition = 0;
let sectors = [];

assert!(map
.try_insert_sectors(partition, create_set(&sectors))
.is_err());
}

#[test]
fn partition_map_fail_large_input() {
let partition = 0;
Expand Down Expand Up @@ -278,12 +299,6 @@ mod test {
expect_deadline_sectors_exact(&map, deadline, partition, &sectors);
}

/// This is a helper function to easily create a set of sectors.
fn create_set<const T: u32>(sectors: &[u64]) -> BoundedBTreeSet<SectorNumber, ConstU32<T>> {
let sectors = sectors.iter().copied().collect::<BTreeSet<_>>();
BoundedBTreeSet::try_from(sectors).unwrap()
}

/// Checks that items in `expected_sectors` are in the actual partition. Any
/// extra items that are not in the `expected_sectors` are ignored.
fn expect_sectors_partial(
Expand Down
Loading