Skip to content

Commit

Permalink
Simple bitmap page allocator
Browse files Browse the repository at this point in the history
Replaces the freelist-based page allocator and can be used for lifetime of kernel.

Signed-off-by: Graham MacDonald <[email protected]>
  • Loading branch information
gmacd committed Feb 1, 2024
1 parent f57b0cf commit 7a14ea6
Show file tree
Hide file tree
Showing 8 changed files with 380 additions and 116 deletions.
90 changes: 53 additions & 37 deletions aarch64/src/kalloc.rs
Original file line number Diff line number Diff line change
@@ -1,57 +1,73 @@
/// This module acts as an interface between the portable allocator and the
/// arch-specific use of it.
///
/// The page allocator is constructed and finalised in a number of phases:
/// 1. `init_page_allocator` to create a fixed size allocator assuming everything
/// is in use except a small number of statically defined pages available for
/// setting up the initial page tables.
/// 2. `free_unused_ranges` to mark available ranges as the inverse of the
/// physical memory map within the bounds of the available memory.
use crate::kmem;
use crate::kmem::physaddr_as_ptr_mut;
use crate::vm::Page4K;
use core::ptr;
use port::bitmapalloc::BitmapPageAlloc;
use port::bitmapalloc::BitmapPageAllocError;
use port::mem::PhysRange;
use port::{
mcslock::{Lock, LockNode},
mem::PAGE_SIZE_4K,
};

static FREE_LIST: Lock<FreeList> = Lock::new("kmem", FreeList { next: None });
/// Set up bitmap page allocator assuming everything is allocated.
static PAGE_ALLOC: Lock<BitmapPageAlloc<16, PAGE_SIZE_4K>> = Lock::new(
"page_alloc",
const { BitmapPageAlloc::<16, PAGE_SIZE_4K>::new_all_allocated(PAGE_SIZE_4K) },
);

#[repr(align(4096))]
struct FreeList {
next: Option<ptr::NonNull<FreeList>>,
}
unsafe impl Send for FreeList {}
/// The bitmap allocator has all pages marked as allocated initially. We'll
/// add some pages (mark free) to allow us to set up the page tables and build
/// a memory map. Once the memory map has been build, we can mark all the unused
/// space as available. This allows us to use only one page allocator throughout.
pub fn init_page_allocator() {
static mut NODE: LockNode = LockNode::new();
let mut lock = PAGE_ALLOC.lock(unsafe { &*ptr::addr_of!(NODE) });
let page_alloc = &mut *lock;

#[derive(Debug)]
pub enum Error {
NoFreeBlocks,
let early_pages_range = kmem::early_pages_range();
if let Err(err) = page_alloc.mark_free(&early_pages_range) {
panic!("Couldn't mark early pages free: range: {} err: {:?}", early_pages_range, err);
}
}

impl FreeList {
pub fn put(&mut self, page: &mut Page4K) {
let ptr = (page as *mut Page4K).addr();
assert_eq!(ptr % PAGE_SIZE_4K, 0, "freeing unaligned page");
page.scribble();
let f = page as *mut Page4K as *mut FreeList;
unsafe {
ptr::write(f, FreeList { next: self.next });
}
self.next = ptr::NonNull::new(f);
}
/// Free unused pages in mem that aren't covered by the memory map. Assumes
/// that custom_map is sorted.
pub fn free_unused_ranges<'a>(
available_mem: &PhysRange,
used_ranges: impl Iterator<Item = &'a PhysRange>,
) -> Result<(), BitmapPageAllocError> {
static mut NODE: LockNode = LockNode::new();
let mut lock = PAGE_ALLOC.lock(unsafe { &*ptr::addr_of!(NODE) });
let page_alloc = &mut *lock;

pub fn get(&mut self) -> Result<&'static mut Page4K, Error> {
let mut next = self.next.ok_or(Error::NoFreeBlocks)?;
let next = unsafe { next.as_mut() };
self.next = next.next;
let pg = unsafe { &mut *(next as *mut FreeList as *mut Page4K) };
pg.clear();
Ok(pg)
}
page_alloc.free_unused_ranges(available_mem, used_ranges)
}

pub unsafe fn free_pages(pages: &mut [Page4K]) {
/// Try to allocate a page
pub fn allocate() -> Result<&'static mut Page4K, BitmapPageAllocError> {
static mut NODE: LockNode = LockNode::new();
let mut lock = FREE_LIST.lock(unsafe { &*ptr::addr_of!(NODE) });
let fl = &mut *lock;
for page in pages.iter_mut() {
fl.put(page);
let mut lock = PAGE_ALLOC.lock(unsafe { &NODE });
let page_alloc = &mut *lock;

match page_alloc.allocate() {
Ok(page_pa) => Ok(unsafe { &mut *physaddr_as_ptr_mut::<Page4K>(page_pa) }),
Err(err) => Err(err),
}
}

pub fn alloc() -> Result<&'static mut Page4K, Error> {
pub fn usage_bytes() -> (usize, usize) {
static mut NODE: LockNode = LockNode::new();
let mut lock = FREE_LIST.lock(unsafe { &*ptr::addr_of!(NODE) });
let fl = &mut *lock;
fl.get()
let mut lock = PAGE_ALLOC.lock(unsafe { &NODE });
let page_alloc = &mut *lock;
page_alloc.usage_bytes()
}
27 changes: 7 additions & 20 deletions aarch64/src/kmem.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
use port::mem::PhysAddr;

use crate::{param::KZERO, vm::Page4K};
use core::{mem, slice};
use crate::param::KZERO;
use port::mem::{PhysAddr, PhysRange};

// These map to definitions in kernel.ld
extern "C" {
Expand Down Expand Up @@ -52,20 +50,9 @@ pub fn from_ptr_to_physaddr<T>(a: *const T) -> PhysAddr {
from_virt_to_physaddr(a.addr())
}

unsafe fn page_slice_mut<'a>(pstart: *mut Page4K, pend: *mut Page4K) -> &'a mut [Page4K] {
let ustart = pstart.addr();
let uend = pend.addr();
const PAGE_SIZE: usize = mem::size_of::<Page4K>();
assert_eq!(ustart % PAGE_SIZE, 0, "page_slice_mut: unaligned start page");
assert_eq!(uend % PAGE_SIZE, 0, "page_slice_mut: unaligned end page");
assert!(ustart < uend, "page_slice_mut: bad range");

let len = (uend - ustart) / PAGE_SIZE;
unsafe { slice::from_raw_parts_mut(ustart as *mut Page4K, len) }
}

pub fn early_pages() -> &'static mut [Page4K] {
let early_start = early_pagetables_addr() as *mut Page4K;
let early_end = eearly_pagetables_addr() as *mut Page4K;
unsafe { page_slice_mut(early_start, early_end) }
pub fn early_pages_range() -> PhysRange {
PhysRange::new(
from_virt_to_physaddr(early_pagetables_addr()),
from_virt_to_physaddr(eearly_pagetables_addr()),
)
}
10 changes: 5 additions & 5 deletions aarch64/src/mailbox.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ use core::mem;
use core::mem::MaybeUninit;
use port::fdt::DeviceTree;
use port::mcslock::{Lock, LockNode};
use port::mem::VirtRange;
use port::mem::{PhysAddr, PhysRange, VirtRange};

const MBOX_READ: usize = 0x00;
const MBOX_STATUS: usize = 0x18;
Expand Down Expand Up @@ -191,7 +191,7 @@ pub struct MemoryInfo {
pub end: u32,
}

pub fn get_arm_memory() -> MemoryInfo {
pub fn get_arm_memory() -> PhysRange {
let tags = Tag::<EmptyRequest> {
tag_id0: TagId::GetArmMemory,
tag_buffer_size0: 12,
Expand All @@ -204,10 +204,10 @@ pub fn get_arm_memory() -> MemoryInfo {
let size = res.size;
let end = start + size;

MemoryInfo { start, size, end }
PhysRange::new(PhysAddr::new(start as u64), PhysAddr::new(end as u64))
}

pub fn get_vc_memory() -> MemoryInfo {
pub fn get_vc_memory() -> PhysRange {
let tags = Tag::<EmptyRequest> {
tag_id0: TagId::GetVcMemory,
tag_buffer_size0: 12,
Expand All @@ -220,7 +220,7 @@ pub fn get_vc_memory() -> MemoryInfo {
let size = res.size;
let end = start + size;

MemoryInfo { start, size, end }
PhysRange::new(PhysAddr::new(start as u64), PhysAddr::new(end as u64))
}

pub fn get_firmware_revision() -> u32 {
Expand Down
24 changes: 14 additions & 10 deletions aarch64/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#![feature(alloc_error_handler)]
#![feature(asm_const)]
#![feature(core_intrinsics)]
#![feature(inline_const)]
#![feature(stdsimd)]
#![feature(strict_provenance)]
#![forbid(unsafe_op_in_unsafe_fn)]
Expand Down Expand Up @@ -39,7 +40,7 @@ unsafe fn print_memory_range(name: &str, start: &*const c_void, end: &*const c_v
let start = start as *const _ as u64;
let end = end as *const _ as u64;
let size = end - start;
println!(" {name}{start:#x}-{end:#x} ({size:#x})");
println!(" {name}{start:#x}..{end:#x} ({size:#x})");
}

fn print_binary_sections() {
Expand Down Expand Up @@ -67,12 +68,17 @@ fn print_binary_sections() {
}
}

fn print_physical_memory_map() {
fn print_memory_info() {
println!("Physical memory map:");
let mailbox::MemoryInfo { start, size, end } = mailbox::get_arm_memory();
println!(" Memory:\t{start:#018x}-{end:#018x} ({size:#x})");
let mailbox::MemoryInfo { start, size, end } = mailbox::get_vc_memory();
println!(" Video:\t{start:#018x}-{end:#018x} ({size:#x})");
let arm_mem = mailbox::get_arm_memory();
println!(" Memory:\t{arm_mem} ({:#x})", arm_mem.size());
let vc_mem = mailbox::get_vc_memory();
println!(" Video:\t{vc_mem} ({:#x})", vc_mem.size());

println!("Memory usage::");
let (used, total) = kalloc::usage_bytes();
println!(" Used:\t\t{used:#016x}");
println!(" Total:\t{total:#016x}");
}

// https://github.com/raspberrypi/documentation/blob/develop/documentation/asciidoc/computers/raspberry-pi/revision-codes.adoc
Expand Down Expand Up @@ -121,15 +127,13 @@ pub extern "C" fn main9(dtb_va: usize) {

// Map address space accurately using rust VM code to manage page tables
unsafe {
kalloc::free_pages(kmem::early_pages());

let dtb_range = PhysRange::with_len(from_virt_to_physaddr(dtb_va).addr(), dt.size());
vm::init(&dt, &mut *ptr::addr_of_mut!(KPGTBL), dtb_range);
vm::init(&mut *ptr::addr_of_mut!(KPGTBL), dtb_range, mailbox::get_arm_memory());
vm::switch(&*ptr::addr_of!(KPGTBL));
}

print_binary_sections();
print_physical_memory_map();
print_memory_info();
print_board_info();

kernel_root().print_recursive_tables();
Expand Down
33 changes: 19 additions & 14 deletions aarch64/src/vm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ use core::fmt;
use core::ptr::write_volatile;
use num_enum::{FromPrimitive, IntoPrimitive};
use port::{
fdt::DeviceTree,
bitmapalloc::BitmapPageAllocError,
mem::{PhysAddr, PhysRange, PAGE_SIZE_1G, PAGE_SIZE_2M, PAGE_SIZE_4K},
};

Expand Down Expand Up @@ -48,12 +48,6 @@ impl Page4K {
core::intrinsics::volatile_set_memory(&mut self.0, 0u8, 1);
}
}

pub fn scribble(&mut self) {
unsafe {
core::intrinsics::volatile_set_memory(self, 0b1010_1010u8, 1);
}
}
}

#[derive(Debug, IntoPrimitive, FromPrimitive)]
Expand All @@ -79,7 +73,7 @@ pub enum AccessPermission {
pub enum Shareable {
#[num_enum(default)]
Non = 0, // Non-shareable (single core)
Unpredictable = 1, // Unpredicatable!
Unpredictable = 1, // Unpredictable!
Outer = 2, // Outer shareable (shared across CPUs, GPU)
Inner = 3, // Inner shareable (shared across CPUs)
}
Expand Down Expand Up @@ -280,13 +274,13 @@ fn recursive_table_addr(va: usize, level: Level) -> usize {

#[derive(Debug)]
pub enum PageTableError {
AllocationFailed(kalloc::Error),
AllocationFailed(BitmapPageAllocError),
EntryIsNotTable,
PhysRangeIsZero,
}

impl From<kalloc::Error> for PageTableError {
fn from(err: kalloc::Error) -> PageTableError {
impl From<BitmapPageAllocError> for PageTableError {
fn from(err: BitmapPageAllocError) -> PageTableError {
PageTableError::AllocationFailed(err)
}
}
Expand Down Expand Up @@ -331,7 +325,7 @@ impl Table {
}

fn alloc_pagetable() -> Result<&'static mut Table, PageTableError> {
let page = kalloc::alloc()?;
let page = kalloc::allocate()?;
page.clear();
Ok(unsafe { &mut *(page as *mut Page4K as *mut Table) })
}
Expand Down Expand Up @@ -471,7 +465,9 @@ fn print_pte(indent: usize, i: usize, level: Level, pte: Entry) {
}
}

pub unsafe fn init(_dt: &DeviceTree, kpage_table: &mut PageTable, dtb_range: PhysRange) {
pub unsafe fn init(kpage_table: &mut PageTable, dtb_range: PhysRange, available_mem: PhysRange) {
kalloc::init_page_allocator();

// We use recursive page tables, but we have to be careful in the init call,
// since the kpage_table is not currently pointed to by ttbr1_el1. Any
// recursive addressing of (511, 511, 511, 511) always points to the
Expand Down Expand Up @@ -519,8 +515,13 @@ pub unsafe fn init(_dt: &DeviceTree, kpage_table: &mut PageTable, dtb_range: Phy
for (name, range, flags, page_size) in custom_map.iter() {
let mapped_range =
kpage_table.map_phys_range(range, *flags, *page_size).expect("init mapping failed");

// if let Err(err) = kalloc::mark_allocated(range) {
// panic!("Couldn't mark range allocated: range: {} err: {:?}", range, err);
// }

println!(
" {:14}{:#018x}-{:#018x} to {:#018x}-{:#018x} flags: {:?} page_size: {:?}",
" {:14}{:#018x}..{:#018x} to {:#018x}..{:#018x} flags: {:?} page_size: {:?}",
name,
range.start().addr(),
range.end().addr(),
Expand All @@ -530,6 +531,10 @@ pub unsafe fn init(_dt: &DeviceTree, kpage_table: &mut PageTable, dtb_range: Phy
page_size
);
}

if let Err(err) = kalloc::free_unused_ranges(&available_mem, custom_map.map(|m| m.1).iter()) {
panic!("Couldn't mark unused pages as free: err: {:?}", err);
}
}

/// Return the root kernel page table physical address
Expand Down
Loading

0 comments on commit 7a14ea6

Please sign in to comment.