Skip to content

Commit

Permalink
perf: avoid mem alloc
Browse files Browse the repository at this point in the history
  • Loading branch information
Devdutt Shenoi committed Oct 13, 2024
1 parent 6e47b99 commit 66e8175
Showing 1 changed file with 11 additions and 11 deletions.
22 changes: 11 additions & 11 deletions storage/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -198,8 +198,8 @@ fn id(path: &Path) -> Result<u64, Error> {

/// Gets list of file ids in the disk. Id of file backup@10 is 10.
/// Storing ids instead of full paths enables efficient indexing
fn get_file_ids(path: &Path) -> Result<VecDeque<u64>, Error> {
let mut file_ids = Vec::new();
fn get_file_ids(path: &Path, max_file_count: usize) -> Result<VecDeque<u64>, Error> {
let mut file_ids = Vec::with_capacity(max_file_count);
let files = fs::read_dir(path)?;
for file in files {
let path = file?.path();
Expand Down Expand Up @@ -326,7 +326,7 @@ struct Persistence {
impl Persistence {
fn new<P: Into<PathBuf>>(path: P, max_file_count: usize) -> Result<Self, Error> {
let path = path.into();
let backlog_files = get_file_ids(&path)?;
let backlog_files = get_file_ids(&path, max_file_count)?;
info!("List of file ids loaded from disk: {backlog_files:?}");

let bytes_occupied = backlog_files.iter().fold(0, |acc, id| {
Expand Down Expand Up @@ -464,7 +464,7 @@ mod test {
assert_eq!(storage.writer().len(), 1036);

// other messages on disk
let files = get_file_ids(&backup.path()).unwrap();
let files = get_file_ids(&backup.path(), 10).unwrap();
assert_eq!(files, vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
}

Expand All @@ -477,14 +477,14 @@ mod test {
// 11 files created. 10 on disk
write_n_publishes(&mut storage, 110);

let files = get_file_ids(&backup.path()).unwrap();
let files = get_file_ids(&backup.path(), 10).unwrap();
assert_eq!(files, vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);

// 11 files created. 10 on disk
write_n_publishes(&mut storage, 10);

assert_eq!(storage.writer().len(), 0);
let files = get_file_ids(&backup.path()).unwrap();
let files = get_file_ids(&backup.path(), 10).unwrap();
assert_eq!(files, vec![2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
}

Expand Down Expand Up @@ -537,7 +537,7 @@ mod test {
assert_eq!(storage.persistence.as_ref().unwrap().current_read_file_id, None);

// Ensure unread files are all present before read
let files = get_file_ids(&backup.path()).unwrap();
let files = get_file_ids(&backup.path(), 10).unwrap();
assert_eq!(files, vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);

// Successfully read 10 files with files still in storage after 10 reads
Expand All @@ -546,7 +546,7 @@ mod test {
let file_id = storage.persistence.as_ref().unwrap().current_read_file_id.unwrap();
assert_eq!(file_id, i);
// Ensure partially read file is still present in backup dir
let files = get_file_ids(&backup.path()).unwrap();
let files = get_file_ids(&backup.path(), 10).unwrap();
assert!(files.contains(&i));
}

Expand All @@ -555,7 +555,7 @@ mod test {
assert_eq!(storage.persistence.as_ref().unwrap().current_read_file_id, None);

// Ensure read files are all present before read
let files = get_file_ids(&backup.path()).unwrap();
let files = get_file_ids(&backup.path(), 10).unwrap();
assert_eq!(files, vec![]);
}

Expand All @@ -576,14 +576,14 @@ mod test {
assert_eq!(file_id, 0);

// Ensure all persistance files still exist
let files = get_file_ids(&backup.path()).unwrap();
let files = get_file_ids(&backup.path(), 10).unwrap();
assert_eq!(files, vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);

// Write 10 more files onto disk, 10 publishes per file
write_n_publishes(&mut storage, 100);

// Ensure none of the earlier files exist on disk
let files = get_file_ids(&backup.path()).unwrap();
let files = get_file_ids(&backup.path(), 10).unwrap();
assert_eq!(files, vec![10, 11, 12, 13, 14, 15, 16, 17, 18, 19]);
}
}

0 comments on commit 66e8175

Please sign in to comment.