diff --git a/aarch64/src/kalloc.rs b/aarch64/src/kalloc.rs index 973ae3b..78e8f1f 100644 --- a/aarch64/src/kalloc.rs +++ b/aarch64/src/kalloc.rs @@ -1,57 +1,73 @@ +/// This module acts as an interface between the portable allocator and the +/// arch-specific use of it. +/// +/// The page allocator is constructed and finalised in a number of phases: +/// 1. `init_page_allocator` to create a fixed size allocator assuming everything +/// is in use except a small number of statically defined pages available for +/// setting up the initial page tables. +/// 2. `free_unused_ranges` to mark available ranges as the inverse of the +/// physical memory map within the bounds of the available memory. +use crate::kmem; +use crate::kmem::physaddr_as_ptr_mut; use crate::vm::Page4K; use core::ptr; +use port::bitmapalloc::BitmapPageAlloc; +use port::bitmapalloc::BitmapPageAllocError; +use port::mem::PhysRange; use port::{ mcslock::{Lock, LockNode}, mem::PAGE_SIZE_4K, }; -static FREE_LIST: Lock = Lock::new("kmem", FreeList { next: None }); +/// Set up bitmap page allocator assuming everything is allocated. +static PAGE_ALLOC: Lock> = Lock::new( + "page_alloc", + const { BitmapPageAlloc::<16, PAGE_SIZE_4K>::new_all_allocated(PAGE_SIZE_4K) }, +); -#[repr(align(4096))] -struct FreeList { - next: Option>, -} -unsafe impl Send for FreeList {} +/// The bitmap allocator has all pages marked as allocated initially. We'll +/// add some pages (mark free) to allow us to set up the page tables and build +/// a memory map. Once the memory map has been build, we can mark all the unused +/// space as available. This allows us to use only one page allocator throughout. +pub fn init_page_allocator() { + static mut NODE: LockNode = LockNode::new(); + let mut lock = PAGE_ALLOC.lock(unsafe { &*ptr::addr_of!(NODE) }); + let page_alloc = &mut *lock; -#[derive(Debug)] -pub enum Error { - NoFreeBlocks, + let early_pages_range = kmem::early_pages_range(); + if let Err(err) = page_alloc.mark_free(&early_pages_range) { + panic!("Couldn't mark early pages free: range: {} err: {:?}", early_pages_range, err); + } } -impl FreeList { - pub fn put(&mut self, page: &mut Page4K) { - let ptr = (page as *mut Page4K).addr(); - assert_eq!(ptr % PAGE_SIZE_4K, 0, "freeing unaligned page"); - page.scribble(); - let f = page as *mut Page4K as *mut FreeList; - unsafe { - ptr::write(f, FreeList { next: self.next }); - } - self.next = ptr::NonNull::new(f); - } +/// Free unused pages in mem that aren't covered by the memory map. Assumes +/// that custom_map is sorted. +pub fn free_unused_ranges<'a>( + available_mem: &PhysRange, + used_ranges: impl Iterator, +) -> Result<(), BitmapPageAllocError> { + static mut NODE: LockNode = LockNode::new(); + let mut lock = PAGE_ALLOC.lock(unsafe { &*ptr::addr_of!(NODE) }); + let page_alloc = &mut *lock; - pub fn get(&mut self) -> Result<&'static mut Page4K, Error> { - let mut next = self.next.ok_or(Error::NoFreeBlocks)?; - let next = unsafe { next.as_mut() }; - self.next = next.next; - let pg = unsafe { &mut *(next as *mut FreeList as *mut Page4K) }; - pg.clear(); - Ok(pg) - } + page_alloc.free_unused_ranges(available_mem, used_ranges) } -pub unsafe fn free_pages(pages: &mut [Page4K]) { +/// Try to allocate a page +pub fn allocate() -> Result<&'static mut Page4K, BitmapPageAllocError> { static mut NODE: LockNode = LockNode::new(); - let mut lock = FREE_LIST.lock(unsafe { &*ptr::addr_of!(NODE) }); - let fl = &mut *lock; - for page in pages.iter_mut() { - fl.put(page); + let mut lock = PAGE_ALLOC.lock(unsafe { &NODE }); + let page_alloc = &mut *lock; + + match page_alloc.allocate() { + Ok(page_pa) => Ok(unsafe { &mut *physaddr_as_ptr_mut::(page_pa) }), + Err(err) => Err(err), } } -pub fn alloc() -> Result<&'static mut Page4K, Error> { +pub fn usage_bytes() -> (usize, usize) { static mut NODE: LockNode = LockNode::new(); - let mut lock = FREE_LIST.lock(unsafe { &*ptr::addr_of!(NODE) }); - let fl = &mut *lock; - fl.get() + let mut lock = PAGE_ALLOC.lock(unsafe { &NODE }); + let page_alloc = &mut *lock; + page_alloc.usage_bytes() } diff --git a/aarch64/src/kmem.rs b/aarch64/src/kmem.rs index 14e8cef..e7a7a28 100644 --- a/aarch64/src/kmem.rs +++ b/aarch64/src/kmem.rs @@ -1,7 +1,5 @@ -use port::mem::PhysAddr; - -use crate::{param::KZERO, vm::Page4K}; -use core::{mem, slice}; +use crate::param::KZERO; +use port::mem::{PhysAddr, PhysRange}; // These map to definitions in kernel.ld extern "C" { @@ -52,20 +50,9 @@ pub fn from_ptr_to_physaddr(a: *const T) -> PhysAddr { from_virt_to_physaddr(a.addr()) } -unsafe fn page_slice_mut<'a>(pstart: *mut Page4K, pend: *mut Page4K) -> &'a mut [Page4K] { - let ustart = pstart.addr(); - let uend = pend.addr(); - const PAGE_SIZE: usize = mem::size_of::(); - assert_eq!(ustart % PAGE_SIZE, 0, "page_slice_mut: unaligned start page"); - assert_eq!(uend % PAGE_SIZE, 0, "page_slice_mut: unaligned end page"); - assert!(ustart < uend, "page_slice_mut: bad range"); - - let len = (uend - ustart) / PAGE_SIZE; - unsafe { slice::from_raw_parts_mut(ustart as *mut Page4K, len) } -} - -pub fn early_pages() -> &'static mut [Page4K] { - let early_start = early_pagetables_addr() as *mut Page4K; - let early_end = eearly_pagetables_addr() as *mut Page4K; - unsafe { page_slice_mut(early_start, early_end) } +pub fn early_pages_range() -> PhysRange { + PhysRange::new( + from_virt_to_physaddr(early_pagetables_addr()), + from_virt_to_physaddr(eearly_pagetables_addr()), + ) } diff --git a/aarch64/src/mailbox.rs b/aarch64/src/mailbox.rs index 587c2df..51ed579 100644 --- a/aarch64/src/mailbox.rs +++ b/aarch64/src/mailbox.rs @@ -4,7 +4,7 @@ use core::mem; use core::mem::MaybeUninit; use port::fdt::DeviceTree; use port::mcslock::{Lock, LockNode}; -use port::mem::VirtRange; +use port::mem::{PhysAddr, PhysRange, VirtRange}; const MBOX_READ: usize = 0x00; const MBOX_STATUS: usize = 0x18; @@ -191,7 +191,7 @@ pub struct MemoryInfo { pub end: u32, } -pub fn get_arm_memory() -> MemoryInfo { +pub fn get_arm_memory() -> PhysRange { let tags = Tag:: { tag_id0: TagId::GetArmMemory, tag_buffer_size0: 12, @@ -204,10 +204,10 @@ pub fn get_arm_memory() -> MemoryInfo { let size = res.size; let end = start + size; - MemoryInfo { start, size, end } + PhysRange::new(PhysAddr::new(start as u64), PhysAddr::new(end as u64)) } -pub fn get_vc_memory() -> MemoryInfo { +pub fn get_vc_memory() -> PhysRange { let tags = Tag:: { tag_id0: TagId::GetVcMemory, tag_buffer_size0: 12, @@ -220,7 +220,7 @@ pub fn get_vc_memory() -> MemoryInfo { let size = res.size; let end = start + size; - MemoryInfo { start, size, end } + PhysRange::new(PhysAddr::new(start as u64), PhysAddr::new(end as u64)) } pub fn get_firmware_revision() -> u32 { diff --git a/aarch64/src/main.rs b/aarch64/src/main.rs index 4e10056..bde7fd8 100644 --- a/aarch64/src/main.rs +++ b/aarch64/src/main.rs @@ -5,6 +5,7 @@ #![feature(alloc_error_handler)] #![feature(asm_const)] #![feature(core_intrinsics)] +#![feature(inline_const)] #![feature(stdsimd)] #![feature(strict_provenance)] #![forbid(unsafe_op_in_unsafe_fn)] @@ -39,7 +40,7 @@ unsafe fn print_memory_range(name: &str, start: &*const c_void, end: &*const c_v let start = start as *const _ as u64; let end = end as *const _ as u64; let size = end - start; - println!(" {name}{start:#x}-{end:#x} ({size:#x})"); + println!(" {name}{start:#x}..{end:#x} ({size:#x})"); } fn print_binary_sections() { @@ -67,12 +68,17 @@ fn print_binary_sections() { } } -fn print_physical_memory_map() { +fn print_memory_info() { println!("Physical memory map:"); - let mailbox::MemoryInfo { start, size, end } = mailbox::get_arm_memory(); - println!(" Memory:\t{start:#018x}-{end:#018x} ({size:#x})"); - let mailbox::MemoryInfo { start, size, end } = mailbox::get_vc_memory(); - println!(" Video:\t{start:#018x}-{end:#018x} ({size:#x})"); + let arm_mem = mailbox::get_arm_memory(); + println!(" Memory:\t{arm_mem} ({:#x})", arm_mem.size()); + let vc_mem = mailbox::get_vc_memory(); + println!(" Video:\t{vc_mem} ({:#x})", vc_mem.size()); + + println!("Memory usage::"); + let (used, total) = kalloc::usage_bytes(); + println!(" Used:\t\t{used:#016x}"); + println!(" Total:\t{total:#016x}"); } // https://github.com/raspberrypi/documentation/blob/develop/documentation/asciidoc/computers/raspberry-pi/revision-codes.adoc @@ -121,15 +127,13 @@ pub extern "C" fn main9(dtb_va: usize) { // Map address space accurately using rust VM code to manage page tables unsafe { - kalloc::free_pages(kmem::early_pages()); - let dtb_range = PhysRange::with_len(from_virt_to_physaddr(dtb_va).addr(), dt.size()); - vm::init(&dt, &mut *ptr::addr_of_mut!(KPGTBL), dtb_range); + vm::init(&mut *ptr::addr_of_mut!(KPGTBL), dtb_range, mailbox::get_arm_memory()); vm::switch(&*ptr::addr_of!(KPGTBL)); } print_binary_sections(); - print_physical_memory_map(); + print_memory_info(); print_board_info(); kernel_root().print_recursive_tables(); diff --git a/aarch64/src/vm.rs b/aarch64/src/vm.rs index b3e8428..da2fec4 100644 --- a/aarch64/src/vm.rs +++ b/aarch64/src/vm.rs @@ -13,7 +13,7 @@ use core::fmt; use core::ptr::write_volatile; use num_enum::{FromPrimitive, IntoPrimitive}; use port::{ - fdt::DeviceTree, + bitmapalloc::BitmapPageAllocError, mem::{PhysAddr, PhysRange, PAGE_SIZE_1G, PAGE_SIZE_2M, PAGE_SIZE_4K}, }; @@ -48,12 +48,6 @@ impl Page4K { core::intrinsics::volatile_set_memory(&mut self.0, 0u8, 1); } } - - pub fn scribble(&mut self) { - unsafe { - core::intrinsics::volatile_set_memory(self, 0b1010_1010u8, 1); - } - } } #[derive(Debug, IntoPrimitive, FromPrimitive)] @@ -79,7 +73,7 @@ pub enum AccessPermission { pub enum Shareable { #[num_enum(default)] Non = 0, // Non-shareable (single core) - Unpredictable = 1, // Unpredicatable! + Unpredictable = 1, // Unpredictable! Outer = 2, // Outer shareable (shared across CPUs, GPU) Inner = 3, // Inner shareable (shared across CPUs) } @@ -280,13 +274,13 @@ fn recursive_table_addr(va: usize, level: Level) -> usize { #[derive(Debug)] pub enum PageTableError { - AllocationFailed(kalloc::Error), + AllocationFailed(BitmapPageAllocError), EntryIsNotTable, PhysRangeIsZero, } -impl From for PageTableError { - fn from(err: kalloc::Error) -> PageTableError { +impl From for PageTableError { + fn from(err: BitmapPageAllocError) -> PageTableError { PageTableError::AllocationFailed(err) } } @@ -331,7 +325,7 @@ impl Table { } fn alloc_pagetable() -> Result<&'static mut Table, PageTableError> { - let page = kalloc::alloc()?; + let page = kalloc::allocate()?; page.clear(); Ok(unsafe { &mut *(page as *mut Page4K as *mut Table) }) } @@ -471,7 +465,9 @@ fn print_pte(indent: usize, i: usize, level: Level, pte: Entry) { } } -pub unsafe fn init(_dt: &DeviceTree, kpage_table: &mut PageTable, dtb_range: PhysRange) { +pub unsafe fn init(kpage_table: &mut PageTable, dtb_range: PhysRange, available_mem: PhysRange) { + kalloc::init_page_allocator(); + // We use recursive page tables, but we have to be careful in the init call, // since the kpage_table is not currently pointed to by ttbr1_el1. Any // recursive addressing of (511, 511, 511, 511) always points to the @@ -519,8 +515,13 @@ pub unsafe fn init(_dt: &DeviceTree, kpage_table: &mut PageTable, dtb_range: Phy for (name, range, flags, page_size) in custom_map.iter() { let mapped_range = kpage_table.map_phys_range(range, *flags, *page_size).expect("init mapping failed"); + + // if let Err(err) = kalloc::mark_allocated(range) { + // panic!("Couldn't mark range allocated: range: {} err: {:?}", range, err); + // } + println!( - " {:14}{:#018x}-{:#018x} to {:#018x}-{:#018x} flags: {:?} page_size: {:?}", + " {:14}{:#018x}..{:#018x} to {:#018x}..{:#018x} flags: {:?} page_size: {:?}", name, range.start().addr(), range.end().addr(), @@ -530,6 +531,10 @@ pub unsafe fn init(_dt: &DeviceTree, kpage_table: &mut PageTable, dtb_range: Phy page_size ); } + + if let Err(err) = kalloc::free_unused_ranges(&available_mem, custom_map.map(|m| m.1).iter()) { + panic!("Couldn't mark unused pages as free: err: {:?}", err); + } } /// Return the root kernel page table physical address diff --git a/port/src/bitmapalloc.rs b/port/src/bitmapalloc.rs new file mode 100644 index 0000000..7433213 --- /dev/null +++ b/port/src/bitmapalloc.rs @@ -0,0 +1,260 @@ +use crate::mem::{PhysAddr, PhysRange}; + +struct Bitmap { + bytes: [u8; SIZE], +} + +impl Bitmap { + pub const fn new(init_value: u8) -> Self { + Self { bytes: [init_value; SIZE] } + } + + #[allow(dead_code)] + pub fn is_set(&self, i: usize) -> bool { + let byteidx = i % 8; + let bitidx = i - byteidx * 8; + let byte = self.bytes[byteidx]; + byte & (1 << bitidx) > 0 + } + + pub fn set(&mut self, i: usize, b: bool) { + let byteidx = i / 8; + let bitidx = i % 8; + if b { + self.bytes[byteidx] |= 1 << bitidx; + } else { + self.bytes[byteidx] &= !(1 << bitidx); + } + } +} + +#[derive(Debug)] +pub enum BitmapPageAllocError { + OutOfBounds, + MisalignedAddr, + OutOfSpace, +} + +/// Allocator where each page is represented by a single bit. +/// 0: free, 1: allocated +/// `end` is used to indicate the extent of the memory. Anything beyond this +/// will be marked as allocated. +pub struct BitmapPageAlloc { + bitmaps: [Bitmap; NUM_BITMAPS], + alloc_page_size: usize, // Size of pages represented by single bit + end: PhysAddr, // Upper bound of physical memory +} + +impl BitmapPageAlloc { + pub const fn new_all_allocated(alloc_page_size: usize) -> Self { + let end = PhysAddr::new((NUM_BITMAPS * BITMAP_SIZE * 8 * alloc_page_size) as u64); + Self { + bitmaps: [const { Bitmap::::new(0xff) }; NUM_BITMAPS], + alloc_page_size, + end, + } + } + + /// Returns number of physical bytes a single bitmap can cover. + const fn bytes_per_bitmap(&self) -> usize { + BITMAP_SIZE * 8 * self.alloc_page_size + } + + /// Mark the bits corresponding to the given physical range as allocated, + /// regardless of the existing state. + pub fn mark_allocated(&mut self, range: &PhysRange) -> Result<(), BitmapPageAllocError> { + self.mark_range(range, true, true) + } + + /// Mark the bits corresponding to the given physical range as free, + /// regardless of the existing state. + pub fn mark_free(&mut self, range: &PhysRange) -> Result<(), BitmapPageAllocError> { + self.mark_range(range, false, true) + } + + /// Free unused pages in mem that aren't covered by the memory map. Assumes + /// that custom_map is sorted and that available_mem can be used to set the + /// upper bound of the allocator. + pub fn free_unused_ranges<'a>( + &mut self, + available_mem: &PhysRange, + used_ranges: impl Iterator, + ) -> Result<(), BitmapPageAllocError> { + let mut next_start = available_mem.start(); + for range in used_ranges { + if next_start < range.0.start { + self.mark_free(&PhysRange::new(next_start, range.0.start))?; + } + if next_start < range.0.end { + next_start = range.0.end; + } + } + if next_start < available_mem.end() { + self.mark_free(&PhysRange::new(next_start, available_mem.end()))?; + } + + self.end = available_mem.0.end; + + // Mark everything past the end point as allocated + let end_range = PhysRange::new( + self.end, + PhysAddr::new((self.bytes_per_bitmap() * self.bitmaps.len()) as u64), + ); + self.mark_range(&end_range, true, false)?; + + Ok(()) + } + + /// Try to allocate the next available page. + /// TODO Add marker to last allocated and use that + pub fn allocate(&mut self) -> Result { + let mut num_pages_remaining = self.end.addr() as usize / self.alloc_page_size; + for bitmapidx in 0..self.bitmaps.len() { + let bitmap = &mut self.bitmaps[bitmapidx]; + for byteidx in 0..bitmap.bytes.len() { + let byte = &mut bitmap.bytes[byteidx]; + if *byte != 0xff { + let num_leading_ones = byte.trailing_ones() as usize; + let bit = 1 << num_leading_ones; + *byte |= bit; + + let pa = ((bitmapidx * self.bytes_per_bitmap()) + + (byteidx * 8 * self.alloc_page_size) + + (num_leading_ones * self.alloc_page_size)) + as u64; + return Ok(PhysAddr::new(pa)); + } + + if num_pages_remaining > 8 { + num_pages_remaining -= 8; + } else { + num_pages_remaining = 0; + } + } + } + Err(BitmapPageAllocError::OutOfSpace) + } + + /// Calculate the available bytes and return along with the total. + /// Returns (used, total) + pub fn usage_bytes(&self) -> (usize, usize) { + // We count free because the last bits might be marked partially 'allocated' + // if the end comes in the middle of a byte in the bitmap. + let mut free_bytes: usize = 0; + for byte in self.bytes() { + free_bytes += byte.count_zeros() as usize * self.alloc_page_size; + } + let total = self.end.0 as usize; + (total - free_bytes, total) + } + + fn mark_range( + &mut self, + range: &PhysRange, + mark_allocated: bool, + check_end: bool, + ) -> Result<(), BitmapPageAllocError> { + if check_end && range.0.end > self.end { + return Err(BitmapPageAllocError::OutOfBounds); + } + + let bytes_per_bitmap = self.bytes_per_bitmap(); + for pa in range.step_by_rounded(self.alloc_page_size) { + let bitmap_idx = pa.addr() as usize / bytes_per_bitmap; + if bitmap_idx >= self.bitmaps.len() { + return Err(BitmapPageAllocError::OutOfBounds); + } + + let offset_into_bitmap = pa.addr() as usize % bytes_per_bitmap; + let bitmap_byte_idx = offset_into_bitmap / self.alloc_page_size; + + let bitmap = &mut self.bitmaps[bitmap_idx]; + bitmap.set(bitmap_byte_idx, mark_allocated); + } + Ok(()) + } + + /// Iterate over each of the bytes in turn. Iterates only over the bytes + /// covering pages up to `end`. If `end` is within one of the bytes, that + /// byte will be returned. + fn bytes(&self) -> impl Iterator + '_ { + let mut bitmapidx = 0; + let mut byteidx = 0; + let mut currpa = PhysAddr::new(0); + core::iter::from_fn(move || { + let byte = self.bitmaps[bitmapidx].bytes[byteidx]; + byteidx += 1; + if byteidx >= BITMAP_SIZE { + byteidx = 0; + bitmapidx += 1; + if bitmapidx >= self.bitmaps.len() || currpa >= self.end { + return None; + } + currpa.0 += self.alloc_page_size as u64; + } + Some(byte) + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bitmap_new() { + let bitmap = Bitmap::<4096>::new(0); + for byte in bitmap.bytes { + assert_eq!(byte, 0x00); + } + } + + #[test] + fn bitmap_set() { + let mut bitmap = Bitmap::<4096>::new(0); + assert!(!bitmap.is_set(0)); + bitmap.set(0, true); + assert!(bitmap.is_set(0)); + + // Assert only this bit is set + assert_eq!(bitmap.bytes[0], 1); + for i in 1..bitmap.bytes.len() { + assert_eq!(bitmap.bytes[i], 0); + } + } + + #[test] + fn bitmappagealloc_allocate_and_deallocate() { + // Create a new allocator and mark it all freed + let mut alloc = BitmapPageAlloc::<2, 4096>::new_all_allocated(4096); + assert!(alloc + .mark_free(&PhysRange::with_end(0, 2 * alloc.bytes_per_bitmap() as u64)) + .is_ok()); + + // Mark a range as allocated - 10 bits + assert!(alloc.mark_allocated(&PhysRange::with_end(0x1000, 0xb000)).is_ok()); + + for (i, byte) in alloc.bytes().enumerate() { + if i == 0 { + assert_eq!(byte, 0xfe); + } else if i == 1 { + assert_eq!(byte, 0x07); + } else { + assert_eq!(byte, 0x00); + } + } + + // Deallocate a range + assert!(alloc.mark_free(&PhysRange::with_end(0x1000, 0x4000)).is_ok()); + + for (i, byte) in alloc.bytes().enumerate() { + if i == 0 { + assert_eq!(byte, 0xf0); + } else if i == 1 { + assert_eq!(byte, 0x07); + } else { + assert_eq!(byte, 0x00); + } + } + } +} diff --git a/port/src/lib.rs b/port/src/lib.rs index aca277a..9fa7e0d 100644 --- a/port/src/lib.rs +++ b/port/src/lib.rs @@ -1,9 +1,11 @@ #![allow(clippy::upper_case_acronyms)] #![cfg_attr(not(any(test)), no_std)] +#![feature(inline_const)] #![feature(maybe_uninit_slice)] #![feature(step_trait)] #![forbid(unsafe_op_in_unsafe_fn)] +pub mod bitmapalloc; pub mod dat; pub mod devcons; pub mod fdt; diff --git a/port/src/mem.rs b/port/src/mem.rs index d232c0c..06ab209 100644 --- a/port/src/mem.rs +++ b/port/src/mem.rs @@ -44,7 +44,7 @@ impl From<&RegBlock> for VirtRange { #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord)] #[repr(transparent)] -pub struct PhysAddr(u64); +pub struct PhysAddr(pub u64); impl PhysAddr { pub const fn new(value: u64) -> Self { @@ -104,6 +104,14 @@ impl fmt::Debug for PhysAddr { pub struct PhysRange(pub Range); impl PhysRange { + pub fn new(start: PhysAddr, end: PhysAddr) -> Self { + Self(start..end) + } + + pub fn with_end(start: u64, end: u64) -> Self { + Self(PhysAddr(start)..PhysAddr(end)) + } + pub fn with_len(start: u64, len: usize) -> Self { Self(PhysAddr(start)..PhysAddr(start + len as u64)) } @@ -126,6 +134,10 @@ impl PhysRange { self.0.end } + pub fn size(&self) -> usize { + (self.0.end.addr() - self.0.start.addr()) as usize + } + pub fn step_by_rounded(&self, step_size: usize) -> StepBy> { let startpa = self.start().round_down(step_size as u64); let endpa = self.end().round_up(step_size as u64); @@ -133,6 +145,13 @@ impl PhysRange { } } +impl fmt::Display for PhysRange { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:#016x}..{:#016x}", self.0.start.addr(), self.0.end.addr())?; + Ok(()) + } +} + impl From<&RegBlock> for PhysRange { fn from(r: &RegBlock) -> Self { let start = PhysAddr(r.addr); @@ -140,32 +159,3 @@ impl From<&RegBlock> for PhysRange { PhysRange(start..end) } } - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn physaddr_step() { - let range = PhysRange(PhysAddr::new(4096)..PhysAddr::new(4096 * 3)); - let pas = range.step_by_rounded(PAGE_SIZE_4K).collect::>(); - assert_eq!(pas, [PhysAddr::new(4096), PhysAddr::new(4096 * 2)]); - } - - #[test] - fn physaddr_step_rounds_up_and_down() { - // Start should round down to 8192 - // End should round up to 16384 - let range = PhysRange(PhysAddr::new(9000)..PhysAddr::new(5000 * 3)); - let pas = range.step_by_rounded(PAGE_SIZE_4K).collect::>(); - assert_eq!(pas, [PhysAddr::new(4096 * 2), PhysAddr::new(4096 * 3)]); - } - - #[test] - fn physaddr_step_2m() { - let range = - PhysRange(PhysAddr::new(0x3f000000)..PhysAddr::new(0x3f000000 + 4 * 1024 * 1024)); - let pas = range.step_by_rounded(PAGE_SIZE_2M).collect::>(); - assert_eq!(pas, [PhysAddr::new(0x3f000000), PhysAddr::new(0x3f000000 + 2 * 1024 * 1024)]); - } -}