diff --git a/Cargo.lock b/Cargo.lock index d29e987acdb3dd..606e2ea334650d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3683,6 +3683,16 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" +[[package]] +name = "matrixmultiply" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9380b911e3e96d10c1f415da0876389aaf1b56759054eeb0de7df940c456ba1a" +dependencies = [ + "autocfg", + "rawpointer", +] + [[package]] name = "memchr" version = "2.6.3" @@ -3848,6 +3858,21 @@ dependencies = [ "tempfile", ] +[[package]] +name = "ndarray" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "882ed72dce9365842bf196bdeedf5055305f11fc8c03dee7bb0194a6cad34841" +dependencies = [ + "matrixmultiply", + "num-complex 0.4.6", + "num-integer", + "num-traits", + "portable-atomic", + "portable-atomic-util", + "rawpointer", +] + [[package]] name = "net2" version = "0.2.37" @@ -3908,7 +3933,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8536030f9fea7127f841b45bb6243b27255787fb4eb83958aa1ef9d2fdc0c36" dependencies = [ "num-bigint 0.2.6", - "num-complex", + "num-complex 0.2.4", "num-integer", "num-iter", "num-rational", @@ -3946,6 +3971,15 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-complex" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" +dependencies = [ + "num-traits", +] + [[package]] name = "num-derive" version = "0.4.2" @@ -4441,6 +4475,15 @@ version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da544ee218f0d287a911e9c99a39a8c9bc8fcad3cb8db5959940044ecfc67265" +[[package]] +name = "portable-atomic-util" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" +dependencies = [ + "portable-atomic", +] + [[package]] name = "ppv-lite86" version = "0.2.15" @@ -4887,6 +4930,12 @@ dependencies = [ "bitflags 2.6.0", ] +[[package]] +name = "rawpointer" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" + [[package]] name = "rayon" version = "1.10.0" @@ -5954,6 +6003,7 @@ dependencies = [ "memmap2", "memoffset 0.9.1", "modular-bitfield", + "ndarray", "num_cpus", "num_enum", "qualifier_attr", diff --git a/Cargo.toml b/Cargo.toml index dd983f3398aac2..fb27fb6be90406 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -364,6 +364,7 @@ merlin = "3" min-max-heap = "1.3.0" mockall = "0.11.4" modular-bitfield = "0.11.2" +ndarray = "0.16.1" nix = "0.29.0" num-bigint = "0.4.6" num-derive = "0.4" diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index 82a983ede37a20..e5753746d1b11e 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -67,6 +67,7 @@ assert_matches = { workspace = true } criterion = { workspace = true } libsecp256k1 = { workspace = true } memoffset = { workspace = true } +ndarray = { workspace = true } rand_chacha = { workspace = true } serde_bytes = { workspace = true } # See order-crates-for-publishing.py for using this unusual `path = "."` @@ -103,6 +104,10 @@ harness = false name = "bench_hashing" harness = false +[[bench]] +name = "bench_read_only_accounts_cache" +harness = false + [[bench]] name = "bench_serde" harness = false diff --git a/accounts-db/benches/bench_read_only_accounts_cache.rs b/accounts-db/benches/bench_read_only_accounts_cache.rs new file mode 100644 index 00000000000000..b54f0102e5706c --- /dev/null +++ b/accounts-db/benches/bench_read_only_accounts_cache.rs @@ -0,0 +1,208 @@ +#![feature(test)] + +extern crate test; + +use { + criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}, + ndarray::{Array2, ArrayView}, + rand::{rngs::SmallRng, seq::SliceRandom, SeedableRng}, + solana_accounts_db::{ + accounts_db::AccountsDb, read_only_accounts_cache::ReadOnlyAccountsCache, + }, + solana_sdk::{ + account::{Account, ReadableAccount}, + pubkey::Pubkey, + }, + std::{ + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + thread::Builder, + }, +}; + +const NUM_READERS_WRITERS: &[usize] = &[ + 8, + 16, + // These parameters are likely to freeze your computer, if it has less than + // 32 cores. + // 32, 64, 128, 256, 512, 1024, +]; + +/// Benchmarks the read-only cache eviction mechanism. It does so by performing +/// multithreaded reads and writes on a full cache. Each write triggers +/// eviction. Background reads add more contention. +fn bench_read_only_accounts_cache_eviction(c: &mut Criterion) { + /// Number of 1 MiB accounts needed to initially fill the cache. + const NUM_ACCOUNTS_INIT: usize = 410; + /// Number of accounts used in the benchmarked writes (per thread). + const NUM_ACCOUNTS_PER_THREAD: usize = 512; + + let mut group = c.benchmark_group("cache_eviction"); + + for num_readers_writers in NUM_READERS_WRITERS { + // Test on even numbers of threads. + assert!(*num_readers_writers % 2 == 0); + + let cache = Arc::new(ReadOnlyAccountsCache::new( + AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_LO, + AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI, + AccountsDb::READ_ONLY_CACHE_MS_TO_SKIP_LRU_UPDATE, + )); + + // Prepare accounts for the cache fillup. + let pubkeys: Vec<_> = std::iter::repeat_with(solana_sdk::pubkey::new_rand) + .take(NUM_ACCOUNTS_INIT) + .collect(); + let accounts_data = std::iter::repeat( + Account { + lamports: 1, + // 1 MiB + data: vec![1; 1024 * 1024], + ..Default::default() + } + .to_account_shared_data(), + ) + .take(NUM_ACCOUNTS_INIT); + let storable_accounts = pubkeys.iter().zip(accounts_data); + + // Fill up the cache. + let slot = 0; + for (pubkey, account) in storable_accounts { + cache.store(*pubkey, slot, account); + } + + // Prepare accounts for the N write threads. We want to perform both + // new writes and updates in each of them. In general, half of the + // operations should be new writes, other half - updates. + // + // To achieve that, generate a 2D array of public keys, with N colums + // and `NUM_ACCOUNTS_PER_THREAD` rows. Take the following steps: + // + // * Generate `NUM_ACCOUNTS_PER_THREAD / 2` rows with unique pubkeys. + // * Add `NUM_ACCOUNTS_PER_THREAD / 2` rows, with the same pubkeys as + // the upper half, but shuffled across columns. Example: + // * Upper rows: + // [0, 1, 2, 3] + // [4, 5, 6, 7] + // [...] + // * Bottom rows: + // [2, 1, 3, 0] + // [5, 4, 7, 6] + // [...] + // * That already gives us set of pubkeys where half is new and half + // triggers an update. But if we used the columns as they are right + // now, each thread would firstly write new accounts, and then + // update, these actiouns would be done in the same order. + // To add some entrophy here, shuffle the columns. + let mut rng = SmallRng::seed_from_u64(100); + let mut new_pubkeys: Array2 = Array2::from_shape_vec( + (NUM_ACCOUNTS_PER_THREAD / 2, *num_readers_writers), + vec![ + solana_sdk::pubkey::new_rand(); + *num_readers_writers * (NUM_ACCOUNTS_PER_THREAD / 2) + ], + ) + .unwrap(); + let new_rows: Vec> = new_pubkeys + .rows() + .into_iter() + .map(|row| { + let mut shuffled_row = row.to_vec(); + shuffled_row.shuffle(&mut rng); + shuffled_row + }) + .collect(); + for new_row in new_rows { + new_pubkeys + .push_row(ArrayView::from(new_row.as_slice())) + .unwrap(); + } + let new_pubkeys: Vec> = new_pubkeys + .columns() + .into_iter() + .map(|column| { + // Both `ArrayBase::as_slice` and `ArrayBase::as_mut_slice` + // return `None` in this case, so let's just collect the elements. + let mut pubkeys_for_thread = column + .into_iter() + .map(|pubkey| pubkey.to_owned()) + .collect::>(); + pubkeys_for_thread.shuffle(&mut rng); + pubkeys_for_thread + }) + .collect(); + + // Spawn the reader threads in the background. + let stop_reader = Arc::new(AtomicBool::new(false)); + let reader_handles = (0..*num_readers_writers).map(|i| { + let cache = cache.clone(); + let pubkeys = pubkeys.clone(); + let stop_reader = stop_reader.clone(); + Builder::new() + .name(format!("reader{i:02}")) + .spawn({ + move || { + // Continuously read random accounts. + let mut rng = SmallRng::seed_from_u64(i as u64); + while !stop_reader.load(Ordering::Relaxed) { + let pubkey = pubkeys.choose(&mut rng).unwrap(); + test::black_box(cache.load(*pubkey, slot)); + } + } + }) + .unwrap() + }); + + // Benchmark reads and writes on a full cache, trigerring eviction on each + // write. + let slot = 1; + group.sample_size(10); + group.bench_function( + BenchmarkId::new("read_only_accounts_cache_eviction", num_readers_writers), + |b| { + b.iter(|| { + // Perform the writes. + let writer_handles = (0..*num_readers_writers).map(|i| { + let cache = cache.clone(); + let new_pubkeys = new_pubkeys[i].clone(); + + Builder::new() + .name(format!("writer{i:02}")) + .spawn({ + move || { + for pubkey in new_pubkeys { + cache.store( + pubkey, + slot, + Account { + lamports: 1, + // 1 MiB + data: vec![1; 1024 * 1024], + ..Default::default() + } + .to_account_shared_data(), + ); + } + } + }) + .unwrap() + }); + + for writer_handle in writer_handles { + writer_handle.join().unwrap(); + } + }) + }, + ); + + stop_reader.store(true, Ordering::Relaxed); + for reader_handle in reader_handles { + reader_handle.join().unwrap(); + } + } +} + +criterion_group!(benches, bench_read_only_accounts_cache_eviction); +criterion_main!(benches); diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 26923d5e05a224..77872dc9fb8165 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1889,12 +1889,12 @@ impl AccountsDb { pub const DEFAULT_ACCOUNTS_HASH_CACHE_DIR: &'static str = "accounts_hash_cache"; // read only cache does not update lru on read of an entry unless it has been at least this many ms since the last lru update - const READ_ONLY_CACHE_MS_TO_SKIP_LRU_UPDATE: u32 = 100; + pub const READ_ONLY_CACHE_MS_TO_SKIP_LRU_UPDATE: u32 = 100; // The default high and low watermark sizes for the accounts read cache. // If the cache size exceeds MAX_SIZE_HI, it'll evict entries until the size is <= MAX_SIZE_LO. - const DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_LO: usize = 400 * 1024 * 1024; - const DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI: usize = 410 * 1024 * 1024; + pub const DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_LO: usize = 400 * 1024 * 1024; + pub const DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI: usize = 410 * 1024 * 1024; pub fn default_for_tests() -> Self { Self::new_single_for_tests() diff --git a/accounts-db/src/lib.rs b/accounts-db/src/lib.rs index 8e7b4faf926b75..4fab1b17fa2c18 100644 --- a/accounts-db/src/lib.rs +++ b/accounts-db/src/lib.rs @@ -32,7 +32,7 @@ mod file_io; pub mod hardened_unpack; pub mod partitioned_rewards; pub mod pubkey_bins; -mod read_only_accounts_cache; +pub mod read_only_accounts_cache; mod rolling_bit_field; pub mod secondary_index; pub mod shared_buffer_reader; diff --git a/accounts-db/src/read_only_accounts_cache.rs b/accounts-db/src/read_only_accounts_cache.rs index 2431761bc5f535..2e2ba9cb11cefb 100644 --- a/accounts-db/src/read_only_accounts_cache.rs +++ b/accounts-db/src/read_only_accounts_cache.rs @@ -66,7 +66,7 @@ struct AtomicReadOnlyCacheStats { } #[derive(Debug)] -pub(crate) struct ReadOnlyAccountsCache { +pub struct ReadOnlyAccountsCache { cache: Arc>, /// When an item is first entered into the cache, it is added to the end of /// the queue. Also each time an entry is looked up from the cache it is @@ -93,7 +93,7 @@ pub(crate) struct ReadOnlyAccountsCache { } impl ReadOnlyAccountsCache { - pub(crate) fn new( + pub fn new( max_data_size_lo: usize, max_data_size_hi: usize, ms_to_skip_lru_update: u32, @@ -137,7 +137,7 @@ impl ReadOnlyAccountsCache { } } - pub(crate) fn load(&self, pubkey: Pubkey, slot: Slot) -> Option { + pub fn load(&self, pubkey: Pubkey, slot: Slot) -> Option { let (account, load_us) = measure_us!({ let mut found = None; if let Some(entry) = self.cache.get(&pubkey) { @@ -175,7 +175,7 @@ impl ReadOnlyAccountsCache { CACHE_ENTRY_SIZE + account.data().len() } - pub(crate) fn store(&self, pubkey: Pubkey, slot: Slot, account: AccountSharedData) { + pub fn store(&self, pubkey: Pubkey, slot: Slot, account: AccountSharedData) { let measure_store = Measure::start(""); self.highest_slot_stored.fetch_max(slot, Ordering::Release); let account_size = Self::account_size(&account);