Skip to content

Commit

Permalink
accounts-db: Benchmark cache evictions
Browse files Browse the repository at this point in the history
The already existing `concurrent_{read,scan}_write` benchmarks are not
sufficient for benchmarking the eviction and evaluating what kind of
eviction policy performs the best, because they don't fill up the cache,
so eviction never happens.

Add a new benchmark, which starts measuring the concurrent reads and
writes on a full cache.
  • Loading branch information
vadorovsky committed Dec 11, 2024
1 parent 0c26485 commit 42ca375
Show file tree
Hide file tree
Showing 7 changed files with 297 additions and 9 deletions.
52 changes: 51 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -367,6 +367,7 @@ merlin = "3"
min-max-heap = "1.3.0"
mockall = "0.11.4"
modular-bitfield = "0.11.2"
ndarray = "0.16.1"
nix = "0.29.0"
num-bigint = "0.4.6"
num-derive = "0.4"
Expand Down
5 changes: 5 additions & 0 deletions accounts-db/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ assert_matches = { workspace = true }
criterion = { workspace = true }
libsecp256k1 = { workspace = true }
memoffset = { workspace = true }
ndarray = { workspace = true }
rand_chacha = { workspace = true }
serde_bytes = { workspace = true }
# See order-crates-for-publishing.py for using this unusual `path = "."`
Expand Down Expand Up @@ -103,6 +104,10 @@ harness = false
name = "bench_hashing"
harness = false

[[bench]]
name = "bench_read_only_accounts_cache"
harness = false

[[bench]]
name = "bench_serde"
harness = false
Expand Down
227 changes: 227 additions & 0 deletions accounts-db/benches/bench_read_only_accounts_cache.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,227 @@
#![feature(test)]

extern crate test;

use {
criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion},
ndarray::{Array2, ArrayView},
rand::{rngs::SmallRng, seq::SliceRandom, SeedableRng},
solana_accounts_db::{
accounts_db::AccountsDb, read_only_accounts_cache::ReadOnlyAccountsCache,
},
solana_sdk::{
account::{Account, AccountSharedData},
pubkey::{self, Pubkey},
},
std::{
iter,
sync::{
atomic::{AtomicBool, Ordering},
Arc, Barrier,
},
thread::{Builder, JoinHandle},
},
};

/// Sizes of accounts to bench.
const ACCOUNTS_SIZES: &[usize] = &[0, 512, 1024];
/// Numbers of reader and writer threads to bench.
const NUM_READERS_WRITERS: &[usize] = &[
8,
16,
// These parameters are likely to freeze your computer, if it has less than
// 32 cores.
// 32, 64, 128, 256, 512, 1024,
];

/// Benchmarks the read-only cache eviction mechanism. It does so by performing
/// multithreaded reads and writes on a full cache. Each write triggers
/// eviction. Background reads add more contention.
fn bench_read_only_accounts_cache_eviction(c: &mut Criterion) {
/// Number of 1 MiB accounts needed to initially fill the cache.
const NUM_ACCOUNTS_INIT: usize = 410;
/// Number of accounts used in the benchmarked writes (per thread).
const NUM_ACCOUNTS_PER_THREAD: usize = 512;

let mut group = c.benchmark_group("cache_eviction");

for account_size in ACCOUNTS_SIZES {
for num_readers_writers in NUM_READERS_WRITERS {
// Test on even numbers of threads.
assert!(*num_readers_writers % 2 == 0);

let cache = Arc::new(ReadOnlyAccountsCache::new(
AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_LO,
AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI,
AccountsDb::READ_ONLY_CACHE_MS_TO_SKIP_LRU_UPDATE,
));

// Prepare accounts for the cache fillup.
let pubkeys: Vec<_> = iter::repeat_with(pubkey::new_rand)
.take(NUM_ACCOUNTS_INIT)
.collect();
let accounts_data = iter::repeat(
Account {
lamports: 1,
data: vec![1; *account_size],
..Default::default()
}
.into(),
)
.take(NUM_ACCOUNTS_INIT);
let storable_accounts = pubkeys.iter().zip(accounts_data);

// Fill up the cache.
let slot = 0;
for (pubkey, account) in storable_accounts {
cache.store(*pubkey, slot, account);
}

// Prepare accounts for the N write threads. We want to perform both
// new writes and updates in each of them. In general, half of the
// operations should be new writes, other half - updates.
//
// To achieve that, generate a 2D array of public keys, with N colums
// and `NUM_ACCOUNTS_PER_THREAD` rows. Take the following steps:
//
// * Generate `NUM_ACCOUNTS_PER_THREAD / 2` rows with unique pubkeys.
// * Add `NUM_ACCOUNTS_PER_THREAD / 2` rows, with the same pubkeys as
// the upper half, but shuffled across columns. Example:
// * Upper rows:
// [0, 1, 2, 3]
// [4, 5, 6, 7]
// [...]
// * Bottom rows:
// [2, 1, 3, 0]
// [5, 4, 7, 6]
// [...]
// * That already gives us set of pubkeys where half is new and half
// triggers an update. But if we used the columns as they are right
// now, each thread would firstly write new accounts, and then
// update, these actiouns would be done in the same order.
// To add some entrophy here, shuffle the columns.
let mut rng = SmallRng::seed_from_u64(100);
let mut new_pubkeys: Array2<Pubkey> = Array2::from_shape_vec(
(NUM_ACCOUNTS_PER_THREAD / 2, *num_readers_writers),
vec![
solana_sdk::pubkey::new_rand();
num_readers_writers.saturating_mul(NUM_ACCOUNTS_PER_THREAD / 2)
],
)
.unwrap();
let new_rows: Vec<Vec<Pubkey>> = new_pubkeys
.rows()
.into_iter()
.map(|row| {
let mut shuffled_row = row.to_vec();
shuffled_row.shuffle(&mut rng);
shuffled_row
})
.collect();
for new_row in new_rows {
new_pubkeys
.push_row(ArrayView::from(new_row.as_slice()))
.unwrap();
}
let new_accounts: Vec<Vec<(Pubkey, AccountSharedData)>> = new_pubkeys
.columns()
.into_iter()
.map(|column| {
// Both `ArrayBase::as_slice` and `ArrayBase::as_mut_slice`
// return `None` in this case, so let's just collect the elements.
let mut pubkeys_for_thread = column
.into_iter()
.map(|pubkey| pubkey.to_owned())
.zip(
iter::repeat(
Account {
lamports: 1,
data: vec![1; *account_size],
..Default::default()
}
.into(),
)
.take(NUM_ACCOUNTS_PER_THREAD),
)
.collect::<Vec<_>>();
pubkeys_for_thread.shuffle(&mut rng);
pubkeys_for_thread
})
.collect();

// Spawn the reader threads in the background.
let stop_reader = Arc::new(AtomicBool::new(false));
let reader_handles = (0..*num_readers_writers).map(|i| {
let cache = cache.clone();
let pubkeys = pubkeys.clone();
let stop_reader = stop_reader.clone();
Builder::new()
.name(format!("reader{i:02}"))
.spawn({
move || {
// Continuously read random accounts.
let mut rng = SmallRng::seed_from_u64(i as u64);
while !stop_reader.load(Ordering::Relaxed) {
let pubkey = pubkeys.choose(&mut rng).unwrap();
test::black_box(cache.load(*pubkey, slot));
}
}
})
.unwrap()
});

let slot = 1;
let barrier = Arc::new(Barrier::new(*num_readers_writers));
// Benchmark reads and writes on a full cache, trigerring eviction on each
// write.
group.bench_function(
BenchmarkId::new(
"read_only_accounts_cache_eviction",
format!("{account_size}_{num_readers_writers}"),
),
|b| {
b.iter_batched(
// Setup the write threads.
|| {
(0..*num_readers_writers)
.map(|i| {
let cache = cache.clone();
let new_accounts = new_accounts[i].clone();
Builder::new()
.name(format!("writer{i:02}"))
.spawn({
let barrier = Arc::clone(&barrier);
move || {
barrier.wait();

// Write accounts.
for (pubkey, account) in new_accounts {
cache.store(pubkey, slot, account);
}
}
})
.unwrap()
})
.collect()
},
// Benchmark the write threads.
|writer_handles: Vec<JoinHandle<()>>| {
for writer_handle in writer_handles {
writer_handle.join().unwrap();
}
},
BatchSize::PerIteration,
)
},
);

stop_reader.store(true, Ordering::Relaxed);
for reader_handle in reader_handles {
reader_handle.join().unwrap();
}
}
}
}

criterion_group!(benches, bench_read_only_accounts_cache_eviction);
criterion_main!(benches);
6 changes: 3 additions & 3 deletions accounts-db/src/accounts_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1889,12 +1889,12 @@ impl AccountsDb {
pub const DEFAULT_ACCOUNTS_HASH_CACHE_DIR: &'static str = "accounts_hash_cache";

// read only cache does not update lru on read of an entry unless it has been at least this many ms since the last lru update
const READ_ONLY_CACHE_MS_TO_SKIP_LRU_UPDATE: u32 = 100;
pub const READ_ONLY_CACHE_MS_TO_SKIP_LRU_UPDATE: u32 = 100;

// The default high and low watermark sizes for the accounts read cache.
// If the cache size exceeds MAX_SIZE_HI, it'll evict entries until the size is <= MAX_SIZE_LO.
const DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_LO: usize = 400 * 1024 * 1024;
const DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI: usize = 410 * 1024 * 1024;
pub const DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_LO: usize = 400 * 1024 * 1024;
pub const DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI: usize = 410 * 1024 * 1024;

pub fn default_for_tests() -> Self {
Self::new_single_for_tests()
Expand Down
2 changes: 1 addition & 1 deletion accounts-db/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ mod file_io;
pub mod hardened_unpack;
pub mod partitioned_rewards;
pub mod pubkey_bins;
mod read_only_accounts_cache;
pub mod read_only_accounts_cache;
mod rolling_bit_field;
pub mod secondary_index;
pub mod shared_buffer_reader;
Expand Down
Loading

0 comments on commit 42ca375

Please sign in to comment.