Skip to content

Commit

Permalink
Track last used physical addr
Browse files Browse the repository at this point in the history
Signed-off-by: Graham MacDonald <[email protected]>
  • Loading branch information
gmacd committed Feb 15, 2024
1 parent 1a10e98 commit c4fca39
Show file tree
Hide file tree
Showing 2 changed files with 134 additions and 60 deletions.
2 changes: 2 additions & 0 deletions aarch64/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,8 @@ pub extern "C" fn main9(dtb_va: usize) {
vm::switch(&*ptr::addr_of!(KPGTBL));
}

// From this point we can use the global allocator

print_binary_sections();
print_memory_info();
print_board_info();
Expand Down
192 changes: 132 additions & 60 deletions port/src/bitmapalloc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,20 +15,20 @@ impl<const SIZE_BYTES: usize> Bitmap<SIZE_BYTES> {

/// Is bit `i` within the bitmap set?
pub fn is_set(&self, i: usize) -> bool {
let byteidx = i / 8;
let bitidx = i % 8;
let byte = self.bytes[byteidx];
byte & (1 << bitidx) > 0
let byte_idx = i / 8;
let bit_idx = i % 8;
let byte = self.bytes[byte_idx];
byte & (1 << bit_idx) > 0
}

/// Set bit `i` within the bitmap
pub fn set(&mut self, i: usize, b: bool) {
let byteidx = i / 8;
let bitidx = i % 8;
let byte_idx = i / 8;
let bit_idx = i % 8;
if b {
self.bytes[byteidx] |= 1 << bitidx;
self.bytes[byte_idx] |= 1 << bit_idx;
} else {
self.bytes[byteidx] &= !(1 << bitidx);
self.bytes[byte_idx] &= !(1 << bit_idx);
}
}
}
Expand All @@ -47,8 +47,9 @@ pub enum BitmapPageAllocError {
/// will be marked as allocated.
pub struct BitmapPageAlloc<const NUM_BITMAPS: usize, const BITMAP_SIZE_BYTES: usize> {
bitmaps: [Bitmap<BITMAP_SIZE_BYTES>; NUM_BITMAPS],
alloc_page_size: usize, // Size of pages represented by single bit
end: PhysAddr, // Upper bound of physical memory
alloc_page_size: usize, // Size of pages represented by single bit
end: PhysAddr, // Upper bound of physical memory
next_pa_to_scan: PhysAddr, // PhysAddr from which to start scanning for next allocation
}

impl<const NUM_BITMAPS: usize, const BITMAP_SIZE_BYTES: usize>
Expand All @@ -60,6 +61,7 @@ impl<const NUM_BITMAPS: usize, const BITMAP_SIZE_BYTES: usize>
bitmaps: [const { Bitmap::<BITMAP_SIZE_BYTES>::new(0xff) }; NUM_BITMAPS],
alloc_page_size,
end,
next_pa_to_scan: PhysAddr::new(0),
}
}

Expand Down Expand Up @@ -117,38 +119,31 @@ impl<const NUM_BITMAPS: usize, const BITMAP_SIZE_BYTES: usize>
let end_range = PhysRange::new(self.end, PhysAddr::new(self.max_bytes() as u64));
self.mark_range(&end_range, true, false)?;

self.next_pa_to_scan = PhysAddr::new(0); // Just set to 0 for simplicity - could be smarter

Ok(())
}

/// Try to allocate the next available page.
/// TODO Add marker to last allocated and use that
pub fn allocate(&mut self) -> Result<PhysAddr, BitmapPageAllocError> {
let mut num_pages_remaining = self.end.addr() as usize / self.alloc_page_size;
for bitmapidx in 0..self.bitmaps.len() {
let bitmap = &mut self.bitmaps[bitmapidx];
for byteidx in 0..bitmap.bytes.len() {
let byte = &mut bitmap.bytes[byteidx];
if *byte != 0xff {
let num_leading_ones = byte.trailing_ones() as usize;
let bit = 1 << num_leading_ones;
*byte |= bit;

let pa = ((bitmapidx * self.bytes_per_bitmap())
+ (byteidx * 8 * self.alloc_page_size)
+ (num_leading_ones * self.alloc_page_size))
as u64;

return Ok(PhysAddr::new(pa));
}

if num_pages_remaining > 8 {
num_pages_remaining -= 8;
} else {
num_pages_remaining = 0;
}
}
let (first_bitmap_idx, first_byte_idx, _) = self.physaddr_as_indices(self.next_pa_to_scan);

let found_indices = self
.indices_from(first_bitmap_idx, first_byte_idx)
.find(|indices| self.byte(indices) != 0xff);

if let Some(indices) = found_indices {
// Mark the page as allocated and return the address
let byte = &mut self.bitmaps[indices.bitmap].bytes[indices.byte];
let num_leading_ones = byte.trailing_ones() as usize;
*byte |= 1 << num_leading_ones;

let pa = self.indices_as_physaddr(indices.bitmap, indices.byte, num_leading_ones);
self.next_pa_to_scan = pa;
Ok(pa)
} else {
Err(BitmapPageAllocError::OutOfSpace)
}
Err(BitmapPageAllocError::OutOfSpace)
}

/// Deallocate the page corresponding to the given PhysAddr.
Expand All @@ -165,6 +160,8 @@ impl<const NUM_BITMAPS: usize, const BITMAP_SIZE_BYTES: usize>
}
bitmap.set(bit_idx, false);

self.next_pa_to_scan = pa; // Next allocation will reuse this

Ok(())
}

Expand All @@ -173,8 +170,8 @@ impl<const NUM_BITMAPS: usize, const BITMAP_SIZE_BYTES: usize>
// We count free because the last bits might be marked partially 'allocated'
// if the end comes in the middle of a byte in the bitmap.
let mut free_bytes: usize = 0;
for byte in self.bytes() {
free_bytes += byte.count_zeros() as usize * self.alloc_page_size;
for indices in self.indices() {
free_bytes += self.byte(&indices).count_zeros() as usize * self.alloc_page_size;
}
let total = self.end.0 as usize;
(total - free_bytes, total)
Expand All @@ -201,6 +198,16 @@ impl<const NUM_BITMAPS: usize, const BITMAP_SIZE_BYTES: usize>
(bitmap_idx, byte_idx, bit_idx)
}

/// Given the bitmap index, byte index within the bitmap, and bit index within the byte,
/// return the corresponding PhysAddr.
fn indices_as_physaddr(&self, bitmap_idx: usize, byte_idx: usize, bit_idx: usize) -> PhysAddr {
PhysAddr::new(
((bitmap_idx * self.bytes_per_bitmap())
+ (byte_idx * self.bytes_per_bitmap_byte())
+ (bit_idx * self.alloc_page_size)) as u64,
)
}

fn mark_range(
&mut self,
range: &PhysRange,
Expand All @@ -226,24 +233,71 @@ impl<const NUM_BITMAPS: usize, const BITMAP_SIZE_BYTES: usize>
/// Iterate over each of the bytes in turn. Iterates only over the bytes
/// covering pages up to `end`. If `end` is within one of the bytes, that
/// byte will be returned.
fn bytes(&self) -> impl Iterator<Item = u8> + '_ {
let mut bitmapidx = 0;
let mut byteidx = 0;
let mut currpa = PhysAddr::new(0);
fn indices(&self) -> impl Iterator<Item = ByteIndices> + '_ {
self.indices_from(0, 0)
}

/// Iterate over each of the bytes in turn, starting from a particular bitmap
/// and byte, and looping to iterate across all bytes. Iterates only over the bytes
/// covering pages up to `end`. If `end` is within one of the bytes, that
/// byte will be returned.
fn indices_from(
&self,
start_bitmap_idx: usize,
start_byte_idx: usize,
) -> impl Iterator<Item = ByteIndices> + '_ {
let mut bitmap_idx = start_bitmap_idx;
let mut byte_idx = start_byte_idx;
let mut passed_first = false;
let mut currpa = self.indices_as_physaddr(bitmap_idx, byte_idx, 0);

core::iter::from_fn(move || {
if bitmapidx >= self.bitmaps.len() || currpa >= self.end {
// Catch when we've iterated to the end of the last bitmap and need to
// cycle back to the start
if bitmap_idx >= self.bitmaps.len() || currpa >= self.end {
bitmap_idx = 0;
byte_idx = 0;
currpa = PhysAddr::new(0);
}

// Catch when we've iterated over all the bytes
if passed_first && bitmap_idx == start_bitmap_idx && byte_idx == start_byte_idx {
return None;
}
let byte = self.bitmaps[bitmapidx].bytes[byteidx];
byteidx += 1;
if byteidx >= BITMAP_SIZE_BYTES {
byteidx = 0;
bitmapidx += 1;
passed_first = true;

// Return the byte and prepare for the next
let indices = ByteIndices { bitmap: bitmap_idx, byte: byte_idx };
byte_idx += 1;
if byte_idx >= BITMAP_SIZE_BYTES {
byte_idx = 0;
bitmap_idx += 1;
currpa.0 += self.alloc_page_size as u64;
}
Some(byte)
Some(indices)
})
}

fn byte(&self, indices: &ByteIndices) -> u8 {
self.bitmaps[indices.bitmap].bytes[indices.byte]
}

#[cfg(test)]
fn bytes(&self) -> Vec<u8> {
self.indices().map(|idx| self.byte(&idx)).collect::<Vec<u8>>()
}

#[cfg(test)]
fn bytes_from(&self, start_bitmap_idx: usize, start_byte_idx: usize) -> Vec<u8> {
self.indices_from(start_bitmap_idx, start_byte_idx)
.map(|idx| self.byte(&idx))
.collect::<Vec<u8>>()
}
}

struct ByteIndices {
bitmap: usize,
byte: usize,
}

/// fmt::Debug is useful in small test cases, but would be too verbose for a
Expand All @@ -253,8 +307,8 @@ impl<const NUM_BITMAPS: usize, const BITMAP_SIZE_BYTES: usize> fmt::Debug
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "0x")?;
for b in self.bytes() {
write!(f, "{:02x}", b)?;
for b in self.indices() {
write!(f, "{:02x}", self.byte(&b))?;
}
Ok(())
}
Expand Down Expand Up @@ -286,23 +340,28 @@ mod tests {
}
}

#[test]
fn iterate() {
let alloc = BitmapPageAlloc::<2, 2>::new_all_allocated(4);
assert_eq!(alloc.bytes(), vec![255; 4]);
assert_eq!(alloc.bytes_from(1, 0), vec![255; 4]);
}

#[test]
fn bitmappagealloc_mark_allocated_and_free() -> Result<(), BitmapPageAllocError> {
// Create a new allocator and mark it all freed
// 2 bitmaps, 2 bytes per bitmap, mapped to pages of 4 bytes
// 32 bits, 128 bytes physical memory
let mut alloc = BitmapPageAlloc::<2, 2>::new_all_allocated(4);
alloc.mark_free(&PhysRange::with_end(0, alloc.max_bytes() as u64))?;
println!("BitmapPageAlloc: {:?}", alloc);

// Mark a range as allocated - 10 bits
alloc.mark_allocated(&PhysRange::with_end(4, 44))?;
println!("BitmapPageAlloc: {:?}", alloc);
assert_eq!(alloc.bytes().collect::<Vec<u8>>(), [0xfe, 0x07, 0x00, 0x00]);
assert_eq!(alloc.bytes(), [0xfe, 0x07, 0x00, 0x00]);

// Deallocate a range - first 2 bits
alloc.mark_free(&PhysRange::with_end(0, 8))?;
assert_eq!(alloc.bytes().collect::<Vec<u8>>(), [0xfc, 0x07, 0x00, 0x00]);
assert_eq!(alloc.bytes(), [0xfc, 0x07, 0x00, 0x00]);
Ok(())
}

Expand All @@ -318,8 +377,7 @@ mod tests {
// Mark a range as allocated - 10 bits
alloc.mark_allocated(&PhysRange::with_end(4, 44))?;
assert_eq!(alloc.usage_bytes(), (40, 128));
println!("BitmapPageAlloc: {:?}, {:?}", alloc, alloc.usage_bytes());
assert_eq!(alloc.bytes().collect::<Vec<u8>>(), [0xfe, 0x07, 0x00, 0x00]);
assert_eq!(alloc.bytes(), [0xfe, 0x07, 0x00, 0x00]);

// Now try to allocate the next 3 free pages
assert_eq!(alloc.allocate()?, PhysAddr::new(0));
Expand All @@ -331,19 +389,19 @@ mod tests {
for _ in 0..19 {
alloc.allocate()?;
}
assert_eq!(alloc.bytes().collect::<Vec<u8>>(), [0xff, 0xff, 0xff, 0xff]);
assert_eq!(alloc.bytes(), [0xff, 0xff, 0xff, 0xff]);
assert_eq!(alloc.allocate().unwrap_err(), BitmapPageAllocError::OutOfSpace);

// Now try to deallocate the second page
assert!(alloc.deallocate(PhysAddr::new(4)).is_ok());
assert_eq!(alloc.bytes().collect::<Vec<u8>>(), [0xfd, 0xff, 0xff, 0xff]);
assert_eq!(alloc.bytes(), [0xfd, 0xff, 0xff, 0xff]);

// Ensure double deallocation fails
assert_eq!(
alloc.deallocate(PhysAddr::new(4)).unwrap_err(),
BitmapPageAllocError::NotAllocated
);
assert_eq!(alloc.bytes().collect::<Vec<u8>>(), [0xfd, 0xff, 0xff, 0xff]);
assert_eq!(alloc.bytes(), [0xfd, 0xff, 0xff, 0xff]);

// Allocate once more, expecting the physical address we just deallocated
assert_eq!(alloc.allocate()?, PhysAddr::new(4));
Expand All @@ -355,6 +413,7 @@ mod tests {
fn physaddr_as_indices() {
let alloc = BitmapPageAlloc::<2, 4096>::new_all_allocated(4096);
let bytes_per_bitmap = alloc.bytes_per_bitmap() as u64;

assert_eq!(alloc.physaddr_as_indices(PhysAddr::new(0)), (0, 0, 0));
assert_eq!(alloc.physaddr_as_indices(PhysAddr::new(4096)), (0, 0, 1));
assert_eq!(alloc.physaddr_as_indices(PhysAddr::new(8192)), (0, 0, 2));
Expand All @@ -366,4 +425,17 @@ mod tests {
(1, 1, 1)
);
}

#[test]
fn indices_as_physaddr() {
let alloc = BitmapPageAlloc::<2, 4096>::new_all_allocated(4096);
let bytes_per_bitmap = alloc.bytes_per_bitmap() as u64;

assert_eq!(alloc.indices_as_physaddr(0, 0, 0), PhysAddr::new(0));
assert_eq!(alloc.indices_as_physaddr(0, 0, 1), PhysAddr::new(4096));
assert_eq!(alloc.indices_as_physaddr(0, 1, 0), PhysAddr::new(4096 * 8));
assert_eq!(alloc.indices_as_physaddr(0, 1, 1), PhysAddr::new(4096 * 9));
assert_eq!(alloc.indices_as_physaddr(1, 0, 0), PhysAddr::new(bytes_per_bitmap));
assert_eq!(alloc.indices_as_physaddr(1, 1, 1), PhysAddr::new(bytes_per_bitmap + 4096 * 9));
}
}

0 comments on commit c4fca39

Please sign in to comment.