diff --git a/aarch64/lib/kernel.ld b/aarch64/lib/kernel.ld index 4e1cd05..0be01b7 100644 --- a/aarch64/lib/kernel.ld +++ b/aarch64/lib/kernel.ld @@ -51,6 +51,12 @@ SECTIONS { } ebss = .; + heap = .; + .heap : ALIGN(4096) { + . = ALIGN(8*2097152); + } + eheap = .; + /* Reserve section for early pagetables. Align to 2MiB to allow us to map as a 2MiB page.Note that this won't be needed once we transition to recursive pagetables. diff --git a/aarch64/src/init.rs b/aarch64/src/init.rs new file mode 100644 index 0000000..ae05f8c --- /dev/null +++ b/aarch64/src/init.rs @@ -0,0 +1,143 @@ +use crate::devcons; +use crate::kmem; +use crate::kmem::from_virt_to_physaddr; +use crate::kmem::heap_virtrange; +use crate::mailbox; +use crate::pagealloc; +use crate::registers; +use crate::trap; +use crate::vm; +use crate::vm::kernel_root; +use crate::vm::PageTable; +use crate::vmalloc; +use alloc::boxed::Box; +use core::ptr; +use port::bumpalloc::Bump; +use port::fdt::DeviceTree; +use port::mem::PAGE_SIZE_4K; +use port::mem::{PhysRange, VirtRange}; +use port::println; + +static mut KPGTBL: PageTable = PageTable::empty(); + +unsafe fn print_memory_range(name: &str, range: VirtRange) { + let start = range.start(); + let end = range.end(); + let size = range.size(); + println!(" {name}{start:#x}..{end:#x} ({size:#x})"); +} + +fn print_binary_sections() { + println!("Binary sections:"); + unsafe { + print_memory_range("boottext:\t", kmem::boottext_virtrange()); + print_memory_range("text:\t\t", kmem::text_virtrange()); + print_memory_range("rodata:\t", kmem::rodata_virtrange()); + print_memory_range("data:\t\t", kmem::data_virtrange()); + print_memory_range("bss:\t\t", kmem::bss_virtrange()); + print_memory_range("heap:\t\t", kmem::heap_virtrange()); + print_memory_range("total:\t", kmem::total_virtrange()); + } +} + +fn print_physical_memory_info() { + println!("Physical memory map:"); + let arm_mem = mailbox::get_arm_memory(); + println!(" Memory:\t{arm_mem} ({:#x})", arm_mem.size()); + let vc_mem = mailbox::get_vc_memory(); + println!(" Video:\t{vc_mem} ({:#x})", vc_mem.size()); +} + +fn print_memory_info() { + println!("Memory usage:"); + let (used, total) = pagealloc::usage_bytes(); + println!(" Used:\t\t{used:#016x}"); + println!(" Total:\t{total:#016x}"); +} + +// https://github.com/raspberrypi/documentation/blob/develop/documentation/asciidoc/computers/raspberry-pi/revision-codes.adoc +fn print_pi_name(board_revision: u32) { + let name = match board_revision { + 0xa21041 => "Raspberry Pi 2B", + 0xa02082 => "Raspberry Pi 3B", + 0xb03115 => "Raspberry Pi 4B", + 0xa220a0 => "Raspberry Compute Module 3", + _ => "Unrecognised", + }; + println!(" Board Name:\t{name}"); +} + +fn print_board_info() { + println!("Board information:"); + let board_revision = mailbox::get_board_revision(); + print_pi_name(board_revision); + println!(" Board Rev:\t{board_revision:#010x}"); + let model = mailbox::get_board_model(); + println!(" Board Model:\t{model:#010x}"); + let serial = mailbox::get_board_serial(); + println!(" Serial Num:\t{serial:#010x}"); + let mailbox::MacAddress { a, b, c, d, e, f } = mailbox::get_board_macaddr(); + println!(" MAC Address:\t{a:02x}:{b:02x}:{c:02x}:{d:02x}:{e:02x}:{f:02x}"); + let fw_revision = mailbox::get_firmware_revision(); + println!(" Firmware Rev:\t{fw_revision:#010x}"); +} + +/// This function is concerned with preparing the system to the point where an +/// allocator can be set up and allocation is available. We can't assume +/// there's any allocator available when executing this function. +fn init_pre_allocator(dtb_va: usize) { + trap::init(); + + // Parse the DTB before we set up memory so we can correctly map it + let dt = unsafe { DeviceTree::from_usize(dtb_va).unwrap() }; + + // Set up uart so we can log as early as possible + mailbox::init(&dt); + devcons::init(&dt); + + println!(); + println!("r9 from the Internet"); + println!("DTB found at: {:#x}", dtb_va); + println!("midr_el1: {:?}", registers::MidrEl1::read()); + + print_binary_sections(); + print_physical_memory_info(); + print_board_info(); + + // Map address space accurately using rust VM code to manage page tables + unsafe { + let dtb_range = PhysRange::with_len(from_virt_to_physaddr(dtb_va).addr(), dt.size()); + vm::init(&mut *ptr::addr_of_mut!(KPGTBL), dtb_range, mailbox::get_arm_memory()); + vm::switch(&*ptr::addr_of!(KPGTBL)); + } +} + +pub fn init(dtb_va: usize) { + init_pre_allocator(dtb_va); + + static BUMP_ALLOC: Bump<{ 4 * PAGE_SIZE_4K }, PAGE_SIZE_4K> = Bump::new(0); + vmalloc::init(&BUMP_ALLOC, heap_virtrange()); + BUMP_ALLOC.print_status(); + + // From this point we can use the global allocator + + let _b = Box::new("ddododo"); + + print_memory_info(); + + kernel_root().print_recursive_tables(); + + println!("looping now"); + + // { + // let test = vmalloc::alloc(unsafe { Layout::from_size_align_unchecked(1024, 16) }); + // println!("test alloc: {:p}", test); + // let test2 = vmalloc::alloc(unsafe { Layout::from_size_align_unchecked(1024, 16) }); + // println!("test alloc: {:p}", test2); + // let test3 = vmalloc::alloc(unsafe { Layout::from_size_align_unchecked(1024, 4096) }); + // println!("test alloc: {:p}", test3); + // } + + #[allow(clippy::empty_loop)] + loop {} +} diff --git a/aarch64/src/kmem.rs b/aarch64/src/kmem.rs index e7a7a28..9c9efbe 100644 --- a/aarch64/src/kmem.rs +++ b/aarch64/src/kmem.rs @@ -1,36 +1,86 @@ use crate::param::KZERO; -use port::mem::{PhysAddr, PhysRange}; +use port::mem::{PhysAddr, PhysRange, VirtRange}; // These map to definitions in kernel.ld extern "C" { + static boottext: [u64; 0]; + static eboottext: [u64; 0]; + static text: [u64; 0]; static etext: [u64; 0]; + static rodata: [u64; 0]; static erodata: [u64; 0]; + static data: [u64; 0]; + static edata: [u64; 0]; + static bss: [u64; 0]; static ebss: [u64; 0]; static early_pagetables: [u64; 0]; static eearly_pagetables: [u64; 0]; + static heap: [u64; 0]; + static eheap: [u64; 0]; + static end: [u64; 0]; } -pub fn text_addr() -> usize { - 0xffff_8000_0000_0000 +fn start_addr() -> usize { + unsafe { boottext.as_ptr().addr() } } -pub fn etext_addr() -> usize { +fn end_addr() -> usize { + unsafe { end.as_ptr().addr() } +} + +fn boottext_addr() -> usize { + unsafe { boottext.as_ptr().addr() } +} + +fn eboottext_addr() -> usize { + unsafe { eboottext.as_ptr().addr() } +} + +fn text_addr() -> usize { + unsafe { text.as_ptr().addr() } +} + +fn etext_addr() -> usize { unsafe { etext.as_ptr().addr() } } -pub fn erodata_addr() -> usize { +fn rodata_addr() -> usize { + unsafe { rodata.as_ptr().addr() } +} + +fn erodata_addr() -> usize { unsafe { erodata.as_ptr().addr() } } -pub fn ebss_addr() -> usize { +fn data_addr() -> usize { + unsafe { data.as_ptr().addr() } +} + +fn edata_addr() -> usize { + unsafe { edata.as_ptr().addr() } +} + +fn bss_addr() -> usize { + unsafe { bss.as_ptr().addr() } +} + +fn ebss_addr() -> usize { unsafe { ebss.as_ptr().addr() } } -pub fn early_pagetables_addr() -> usize { +fn heap_addr() -> usize { + unsafe { heap.as_ptr().addr() } +} + +fn eheap_addr() -> usize { + unsafe { eheap.as_ptr().addr() } +} + +fn early_pagetables_addr() -> usize { unsafe { early_pagetables.as_ptr().addr() } } -pub fn eearly_pagetables_addr() -> usize { +fn eearly_pagetables_addr() -> usize { unsafe { eearly_pagetables.as_ptr().addr() } } @@ -50,7 +100,51 @@ pub fn from_ptr_to_physaddr(a: *const T) -> PhysAddr { from_virt_to_physaddr(a.addr()) } -pub fn early_pages_range() -> PhysRange { +pub fn kernel_text_physrange() -> PhysRange { + PhysRange(from_virt_to_physaddr(text_addr())..from_virt_to_physaddr(etext_addr())) +} + +pub fn kernel_data_physrange() -> PhysRange { + PhysRange::with_len(from_virt_to_physaddr(etext_addr()).addr(), erodata_addr() - etext_addr()) +} + +pub fn kernel_bss_physrange() -> PhysRange { + PhysRange::with_len(from_virt_to_physaddr(erodata_addr()).addr(), ebss_addr() - erodata_addr()) +} + +pub fn kernel_heap_physrange() -> PhysRange { + PhysRange::with_len(from_virt_to_physaddr(heap_addr()).addr(), eheap_addr() - heap_addr()) +} + +pub fn total_virtrange() -> VirtRange { + VirtRange(start_addr()..end_addr()) +} + +pub fn boottext_virtrange() -> VirtRange { + VirtRange(boottext_addr()..eboottext_addr()) +} + +pub fn text_virtrange() -> VirtRange { + VirtRange(text_addr()..etext_addr()) +} + +pub fn rodata_virtrange() -> VirtRange { + VirtRange(rodata_addr()..erodata_addr()) +} + +pub fn data_virtrange() -> VirtRange { + VirtRange(data_addr()..edata_addr()) +} + +pub fn bss_virtrange() -> VirtRange { + VirtRange(bss_addr()..ebss_addr()) +} + +pub fn heap_virtrange() -> VirtRange { + VirtRange::with_len(heap_addr(), eheap_addr() - heap_addr()) +} + +pub fn early_pages_physrange() -> PhysRange { PhysRange::new( from_virt_to_physaddr(early_pagetables_addr()), from_virt_to_physaddr(eearly_pagetables_addr()), diff --git a/aarch64/src/main.rs b/aarch64/src/main.rs index 2edce89..984304e 100644 --- a/aarch64/src/main.rs +++ b/aarch64/src/main.rs @@ -2,150 +2,44 @@ #![allow(internal_features)] #![cfg_attr(not(any(test)), no_std)] #![cfg_attr(not(test), no_main)] +#![feature(allocator_api)] #![feature(alloc_error_handler)] -#![feature(asm_const)] +#![feature(const_refs_to_static)] #![feature(core_intrinsics)] #![feature(strict_provenance)] +#![feature(sync_unsafe_cell)] #![forbid(unsafe_op_in_unsafe_fn)] +/// Keep this file as sparse as possible for two reasons: +/// 1. We keep the rust main weirdness isolated +/// 2. rust-analyzer gets confused about cfgs and thinks none of this code is +/// enabled and is therefore greyed out in VS Code, so let's move the bulk +/// of the code elsewhere. mod devcons; +mod init; mod io; mod kmem; mod mailbox; mod pagealloc; mod param; mod registers; +mod runtime; mod trap; mod uartmini; mod uartpl011; mod vm; +mod vmalloc; + +extern crate alloc; -use crate::kmem::from_virt_to_physaddr; -use crate::vm::kernel_root; -use core::ffi::c_void; -use core::ptr; -use port::fdt::DeviceTree; -use port::mem::PhysRange; -use port::println; -use vm::PageTable; +use crate::init::init; #[cfg(not(test))] core::arch::global_asm!(include_str!("l.S")); -static mut KPGTBL: PageTable = PageTable::empty(); - -unsafe fn print_memory_range(name: &str, start: &*const c_void, end: &*const c_void) { - let start = start as *const _ as u64; - let end = end as *const _ as u64; - let size = end - start; - println!(" {name}{start:#x}..{end:#x} ({size:#x})"); -} - -fn print_binary_sections() { - extern "C" { - static boottext: *const c_void; - static eboottext: *const c_void; - static text: *const c_void; - static etext: *const c_void; - static rodata: *const c_void; - static erodata: *const c_void; - static data: *const c_void; - static edata: *const c_void; - static bss: *const c_void; - static end: *const c_void; - } - - println!("Binary sections:"); - unsafe { - print_memory_range("boottext:\t", &boottext, &eboottext); - print_memory_range("text:\t\t", &text, &etext); - print_memory_range("rodata:\t", &rodata, &erodata); - print_memory_range("data:\t\t", &data, &edata); - print_memory_range("bss:\t\t", &bss, &end); - print_memory_range("total:\t", &boottext, &end); - } -} - -fn print_physical_memory_info() { - println!("Physical memory map:"); - let arm_mem = mailbox::get_arm_memory(); - println!(" Memory:\t{arm_mem} ({:#x})", arm_mem.size()); - let vc_mem = mailbox::get_vc_memory(); - println!(" Video:\t{vc_mem} ({:#x})", vc_mem.size()); -} - -fn print_memory_info() { - println!("Memory usage:"); - let (used, total) = pagealloc::usage_bytes(); - println!(" Used:\t\t{used:#016x}"); - println!(" Total:\t{total:#016x}"); -} - -// https://github.com/raspberrypi/documentation/blob/develop/documentation/asciidoc/computers/raspberry-pi/revision-codes.adoc -fn print_pi_name(board_revision: u32) { - let name = match board_revision { - 0xa21041 => "Raspberry Pi 2B", - 0xa02082 => "Raspberry Pi 3B", - 0xb03115 => "Raspberry Pi 4B", - 0xa220a0 => "Raspberry Compute Module 3", - _ => "Unrecognised", - }; - println!(" Board Name:\t{name}"); -} - -fn print_board_info() { - println!("Board information:"); - let board_revision = mailbox::get_board_revision(); - print_pi_name(board_revision); - println!(" Board Rev:\t{board_revision:#010x}"); - let model = mailbox::get_board_model(); - println!(" Board Model:\t{model:#010x}"); - let serial = mailbox::get_board_serial(); - println!(" Serial Num:\t{serial:#010x}"); - let mailbox::MacAddress { a, b, c, d, e, f } = mailbox::get_board_macaddr(); - println!(" MAC Address:\t{a:02x}:{b:02x}:{c:02x}:{d:02x}:{e:02x}:{f:02x}"); - let fw_revision = mailbox::get_firmware_revision(); - println!(" Firmware Rev:\t{fw_revision:#010x}"); -} - /// dtb_va is the virtual address of the DTB structure. The physical address is /// assumed to be dtb_va-KZERO. #[no_mangle] pub extern "C" fn main9(dtb_va: usize) { - trap::init(); - - // Parse the DTB before we set up memory so we can correctly map it - let dt = unsafe { DeviceTree::from_usize(dtb_va).unwrap() }; - - // Set up uart so we can log as early as possible - mailbox::init(&dt); - devcons::init(&dt); - - println!(); - println!("r9 from the Internet"); - println!("DTB found at: {:#x}", dtb_va); - println!("midr_el1: {:?}", registers::MidrEl1::read()); - - print_binary_sections(); - print_physical_memory_info(); - print_board_info(); - - // Map address space accurately using rust VM code to manage page tables - unsafe { - let dtb_range = PhysRange::with_len(from_virt_to_physaddr(dtb_va).addr(), dt.size()); - vm::init(&mut *ptr::addr_of_mut!(KPGTBL), dtb_range, mailbox::get_arm_memory()); - vm::switch(&*ptr::addr_of!(KPGTBL)); - } - - // From this point we can use the global allocator - - print_memory_info(); - - kernel_root().print_recursive_tables(); - - println!("looping now"); - - #[allow(clippy::empty_loop)] - loop {} + init(dtb_va); } -mod runtime; diff --git a/aarch64/src/pagealloc.rs b/aarch64/src/pagealloc.rs index f3ca378..9d8367a 100644 --- a/aarch64/src/pagealloc.rs +++ b/aarch64/src/pagealloc.rs @@ -33,7 +33,7 @@ pub fn init_page_allocator() { let mut lock = PAGE_ALLOC.lock(&node); let page_alloc = &mut *lock; - let early_pages_range = kmem::early_pages_range(); + let early_pages_range = kmem::early_pages_physrange(); if let Err(err) = page_alloc.mark_free(&early_pages_range) { panic!("Couldn't mark early pages free: range: {} err: {:?}", early_pages_range, err); } diff --git a/aarch64/src/runtime.rs b/aarch64/src/runtime.rs index aa65a36..665b4ad 100644 --- a/aarch64/src/runtime.rs +++ b/aarch64/src/runtime.rs @@ -5,12 +5,16 @@ extern crate alloc; use crate::kmem::physaddr_as_virt; use crate::registers::rpi_mmio; use crate::uartmini::MiniUart; -use alloc::alloc::{GlobalAlloc, Layout}; +use crate::vmalloc; +use alloc::alloc::Layout; use core::fmt::Write; use core::panic::PanicInfo; use port::devcons::PanicConsole; use port::mem::VirtRange; +#[global_allocator] +static ALLOCATOR: vmalloc::Allocator = vmalloc::Allocator {}; + // TODO // - Add qemu integration test // - Use Console via println!() macro once available @@ -39,17 +43,3 @@ pub fn panic(info: &PanicInfo) -> ! { fn oom(_layout: Layout) -> ! { panic!("oom"); } - -struct FakeAlloc; - -unsafe impl GlobalAlloc for FakeAlloc { - unsafe fn alloc(&self, _layout: Layout) -> *mut u8 { - panic!("fake alloc"); - } - unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) { - panic!("fake dealloc"); - } -} - -#[global_allocator] -static FAKE_ALLOCATOR: FakeAlloc = FakeAlloc {}; diff --git a/aarch64/src/vm.rs b/aarch64/src/vm.rs index 02c8c52..6619d7f 100644 --- a/aarch64/src/vm.rs +++ b/aarch64/src/vm.rs @@ -2,8 +2,8 @@ use crate::{ kmem::{ - ebss_addr, erodata_addr, etext_addr, from_ptr_to_physaddr, from_virt_to_physaddr, - physaddr_as_ptr_mut, physaddr_as_virt, text_addr, + from_ptr_to_physaddr, kernel_bss_physrange, kernel_data_physrange, kernel_heap_physrange, + kernel_text_physrange, physaddr_as_ptr_mut, physaddr_as_virt, }, pagealloc, registers::rpi_mmio, @@ -485,24 +485,14 @@ pub unsafe fn init(kpage_table: &mut PageTable, dtb_range: PhysRange, available_ // TODO leave the first page unmapped to catch null pointer dereferences in unsafe code let custom_map = { - let text_range = - PhysRange(from_virt_to_physaddr(text_addr())..from_virt_to_physaddr(etext_addr())); - let data_range = PhysRange::with_len( - from_virt_to_physaddr(etext_addr()).addr(), - erodata_addr() - etext_addr(), - ); - let bss_range = PhysRange::with_len( - from_virt_to_physaddr(erodata_addr()).addr(), - ebss_addr() - erodata_addr(), - ); - let mmio_range = rpi_mmio().expect("mmio base detect failed"); let mut map = [ ("DTB", dtb_range, Entry::ro_kernel_data(), PageSize::Page4K), - ("Kernel Text", text_range, Entry::ro_kernel_text(), PageSize::Page2M), - ("Kernel Data", data_range, Entry::ro_kernel_data(), PageSize::Page2M), - ("Kernel BSS", bss_range, Entry::rw_kernel_data(), PageSize::Page2M), + ("Kernel Text", kernel_text_physrange(), Entry::ro_kernel_text(), PageSize::Page2M), + ("Kernel Data", kernel_data_physrange(), Entry::ro_kernel_data(), PageSize::Page2M), + ("Kernel BSS", kernel_bss_physrange(), Entry::rw_kernel_data(), PageSize::Page2M), + ("Kernel Heap", kernel_heap_physrange(), Entry::rw_kernel_data(), PageSize::Page2M), ("MMIO", mmio_range, Entry::ro_kernel_device(), PageSize::Page2M), ]; map.sort_by_key(|a| a.1.start()); diff --git a/aarch64/src/vmalloc.rs b/aarch64/src/vmalloc.rs new file mode 100644 index 0000000..438a06c --- /dev/null +++ b/aarch64/src/vmalloc.rs @@ -0,0 +1,48 @@ +use alloc::alloc::{GlobalAlloc, Layout}; +use core::mem::MaybeUninit; +use port::{ + mcslock::{Lock, LockNode}, + mem::VirtRange, + vmemalloc::VmemAlloc, +}; + +#[cfg(not(test))] +use port::println; + +// TODO replace with some sort of OnceLock? We need this to be dynamically created, +// but we're assuming VmAlloc is Sync. +static VMEM_ALLOC: Lock> = Lock::new("vmemalloc", None); + +pub fn init(early_allocator: &'static dyn core::alloc::Allocator, heap_range: VirtRange) { + let node = LockNode::new(); + let mut vmalloc = VMEM_ALLOC.lock(&node); + *vmalloc = Some({ + static mut MAYBE_VMALLOC: MaybeUninit = MaybeUninit::uninit(); + unsafe { + MAYBE_VMALLOC.write({ + let vmemalloc = VmemAlloc::new(early_allocator, heap_range); + vmemalloc.init(); + vmemalloc + }); + MAYBE_VMALLOC.assume_init_mut() + } + }); +} + +pub struct Allocator {} + +unsafe impl GlobalAlloc for Allocator { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + println!("vmalloc::alloc"); + + // Get the main allocator + let node = LockNode::new(); + let mut lock = VMEM_ALLOC.lock(&node); + let vmemalloc = lock.as_deref_mut().unwrap(); + vmemalloc.alloc(layout) + } + + unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) { + panic!("fake dealloc"); + } +} diff --git a/lib/aarch64-unknown-none-elf.json b/lib/aarch64-unknown-none-elf.json index ee81954..c12cb41 100644 --- a/lib/aarch64-unknown-none-elf.json +++ b/lib/aarch64-unknown-none-elf.json @@ -1,6 +1,6 @@ { "arch": "aarch64", - "data-layout": "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", + "data-layout": "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32", "disable-redzone": true, "executables": true, "features": "+strict-align,+neon,+fp-armv8", diff --git a/port/src/bumpalloc.rs b/port/src/bumpalloc.rs new file mode 100644 index 0000000..dd53783 --- /dev/null +++ b/port/src/bumpalloc.rs @@ -0,0 +1,158 @@ +use core::alloc::{AllocError, Allocator, Layout}; +use core::cell::UnsafeCell; +use core::ptr::NonNull; +use core::sync::atomic::{AtomicUsize, Ordering::Relaxed}; + +#[cfg(not(test))] +use crate::println; + +/// Bump allocator to be used for earliest allocations in r9. These allocations +/// can never be freed - attempting to do so will panic. +#[repr(C, align(4096))] +pub struct Bump { + bytes: UnsafeCell<[u8; SIZE_BYTES]>, + next_offset: AtomicUsize, + wasted: AtomicUsize, +} + +unsafe impl Send + for Bump +{ +} +unsafe impl Sync + for Bump +{ +} + +impl + Bump +{ + pub const fn new(init_value: u8) -> Self { + Self { + bytes: UnsafeCell::new([init_value; SIZE_BYTES]), + next_offset: AtomicUsize::new(0), + wasted: AtomicUsize::new(0), + } + } + + pub fn print_status(&self) { + let allocated = self.next_offset.load(Relaxed); + let remaining = SIZE_BYTES - allocated; + let wasted = self.wasted.load(Relaxed); + println!( + "Bump: allocated: {allocated} free: {remaining} total: {SIZE_BYTES} wasted: {wasted}" + ); + } + + /// Test helper to get the offset of the result in the buffer + #[cfg(test)] + fn result_offset(&self, result: Result, AllocError>) -> Option { + unsafe { + result + .ok() + .map(|bytes| bytes.byte_offset_from(NonNull::new_unchecked(self.bytes.get()))) + } + } +} + +unsafe impl Allocator + for Bump +{ + fn allocate(&self, layout: Layout) -> Result, AllocError> { + let size = layout.size(); + let align = layout.align(); + + if align > MAX_SUPPORTED_ALIGN { + return Err(AllocError {}); + } + + let mut wasted = 0; + let mut alloc_offset = 0; + if self + .next_offset + .fetch_update(Relaxed, Relaxed, |last_offset| { + let align_mask = !(align - 1); + alloc_offset = if last_offset & !align_mask != 0 { + (last_offset + align) & align_mask + } else { + last_offset + }; + wasted = alloc_offset - last_offset; + + let new_offset = alloc_offset + size; + if new_offset > SIZE_BYTES { + None + } else { + Some(new_offset) + } + }) + .is_err() + { + Err(AllocError {}) + } else { + self.wasted.fetch_add(wasted, Relaxed); + Ok(unsafe { NonNull::new_unchecked(self.bytes.get().byte_add(alloc_offset)) }) + } + } + + unsafe fn deallocate(&self, _ptr: NonNull, _layout: Layout) { + // panic!("Can't deallocate from Bump allocator (ptr: {:p}, layout: {:?})", ptr, layout) + } +} + +#[cfg(test)] +mod tests { + use crate::mem::PAGE_SIZE_4K; + + use super::*; + + #[test] + fn bump_new() { + let bump = Bump::::new(0); + let result = unsafe { bump.allocate(Layout::from_size_align_unchecked(4096, 4096)) }; + assert!(result.is_ok()); + assert_eq!(bump.result_offset(result), Some(0)); + assert_eq!(bump.wasted.load(Relaxed), 0); + assert_eq!(bump.next_offset.load(Relaxed), 4096); + + // Next should fail - out of space + let result = unsafe { bump.allocate(Layout::from_size_align_unchecked(1, 1)) }; + assert!(result.is_err()); + } + + #[test] + fn bump_alignment() { + let bump = Bump::<{ 3 * PAGE_SIZE_4K }, PAGE_SIZE_4K>::new(0); + + // Small allocation + let mut expected_waste = 0; + let result = unsafe { bump.allocate(Layout::from_size_align_unchecked(16, 1)) }; + assert!(result.is_ok()); + assert_eq!(bump.result_offset(result), Some(0)); + assert_eq!(bump.wasted.load(Relaxed), expected_waste); + assert_eq!(bump.next_offset.load(Relaxed), 16); + + // Align next allocation to 4096, wasting space + expected_waste += 4096 - 16; + let result = unsafe { bump.allocate(Layout::from_size_align_unchecked(16, 4096)) }; + assert!(result.is_ok()); + assert_eq!(bump.result_offset(result), Some(4096)); + assert_eq!(bump.wasted.load(Relaxed), expected_waste); + assert_eq!(bump.next_offset.load(Relaxed), 4096 + 16); + + // Align next allocation to 4096, wasting space + expected_waste += 4096 - 16; + let result = unsafe { bump.allocate(Layout::from_size_align_unchecked(4096, 4096)) }; + assert!(result.is_ok()); + assert_eq!(bump.result_offset(result), Some(2 * 4096)); + assert_eq!(bump.wasted.load(Relaxed), expected_waste); + assert_eq!(bump.next_offset.load(Relaxed), 3 * 4096); + } + + #[test] + fn align_too_high() { + let bump = Bump::::new(0); + let result = unsafe { bump.allocate(Layout::from_size_align_unchecked(4096, 8192)) }; + assert!(result.is_err()); + } +} diff --git a/port/src/fdt.rs b/port/src/fdt.rs index b4a6251..f05900b 100644 --- a/port/src/fdt.rs +++ b/port/src/fdt.rs @@ -46,6 +46,7 @@ fn align4(n: usize) -> usize { } /// DeviceTree is the class entrypoint to the Devicetree operations. +/// /// This code focuses only on parsing a Flattened Devicetree without using the heap. /// The Devicetree specification can be found here: /// https://www.devicetree.org/specifications/ diff --git a/port/src/lib.rs b/port/src/lib.rs index 8120f05..fb0704f 100644 --- a/port/src/lib.rs +++ b/port/src/lib.rs @@ -1,12 +1,19 @@ #![allow(clippy::upper_case_acronyms)] #![cfg_attr(not(any(test)), no_std)] +#![feature(allocator_api)] #![feature(maybe_uninit_slice)] +#![feature(slice_ptr_get)] #![feature(step_trait)] #![forbid(unsafe_op_in_unsafe_fn)] pub mod bitmapalloc; +pub mod bumpalloc; pub mod dat; pub mod devcons; pub mod fdt; pub mod mcslock; pub mod mem; +pub mod vmem; +pub mod vmemalloc; + +extern crate alloc; diff --git a/port/src/mem.rs b/port/src/mem.rs index 40154ee..3232d4b 100644 --- a/port/src/mem.rs +++ b/port/src/mem.rs @@ -12,7 +12,13 @@ pub const PAGE_SIZE_1G: usize = 1 << 30; pub struct VirtRange(pub Range); impl VirtRange { - pub fn with_len(start: usize, len: usize) -> Self { + pub fn from_any(a: T) -> Self { + let addr = &a as *const _ as usize; + let size = core::mem::size_of_val(&a); + Self(addr..addr + size) + } + + pub const fn with_len(start: usize, len: usize) -> Self { Self(start..start + len) } @@ -32,6 +38,10 @@ impl VirtRange { pub fn end(&self) -> usize { self.0.end } + + pub fn size(&self) -> usize { + self.0.end - self.0.start + } } impl From<&RegBlock> for VirtRange { @@ -42,6 +52,13 @@ impl From<&RegBlock> for VirtRange { } } +impl fmt::Debug for VirtRange { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "VirtRange({:#016x}..{:#016x})", self.start(), self.end())?; + Ok(()) + } +} + #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord)] #[repr(transparent)] pub struct PhysAddr(pub u64); diff --git a/port/src/vmem.rs b/port/src/vmem.rs new file mode 100644 index 0000000..91c5c90 --- /dev/null +++ b/port/src/vmem.rs @@ -0,0 +1,931 @@ +use crate::mem::VirtRange; +use crate::{mcslock::Lock, mem::PAGE_SIZE_4K}; +use alloc::sync::Arc; +use core::{alloc::Layout, ops::Range, ptr::null_mut, slice}; + +// TODO reserve recursive area in vmem(?) +// TODO Add hashtable for allocated tags - makes it faster when freeing, given only an address. +// TODO Add support for quantum caches once we have slab allocators implemented. +// TODO Add power-of-two freelists for freed allocations. + +#[derive(Debug, PartialEq)] +pub enum BoundaryError { + ZeroSize, +} + +#[derive(Debug, PartialEq)] +pub enum AllocError { + NoSpace, + AllocationNotFound, +} + +#[cfg(test)] +type BoundaryResult = core::result::Result; + +#[derive(Clone, Copy, Debug, PartialEq)] +pub struct Boundary { + start: usize, + size: usize, +} + +impl Boundary { + #[cfg(test)] + fn new(start: usize, size: usize) -> BoundaryResult { + if size == 0 { + Err(BoundaryError::ZeroSize) + } else { + Ok(Self { start, size }) + } + } + + fn new_unchecked(start: usize, size: usize) -> Self { + Self { start, size } + } + + #[allow(dead_code)] + fn overlaps(&self, other: &Boundary) -> bool { + let boundary_end = self.start + self.size; + let tag_end = other.start + other.size; + (self.start <= other.start && boundary_end > other.start) + || (self.start < tag_end && boundary_end >= tag_end) + || (self.start <= other.start && boundary_end >= tag_end) + } + + #[allow(dead_code)] + fn end(&self) -> usize { + self.start + self.size + } +} + +impl From for Boundary { + fn from(r: VirtRange) -> Self { + Boundary::new_unchecked(r.start(), r.size()) + } +} + +impl From> for Boundary { + fn from(r: Range) -> Self { + Boundary::new_unchecked(r.start, r.end - r.start) + } +} + +#[derive(Clone, Copy, Debug, PartialEq)] +enum TagType { + Allocated, + Free, + Span, +} + +#[derive(Clone, Copy, Debug, PartialEq)] +struct Tag { + tag_type: TagType, + boundary: Boundary, +} + +impl Tag { + fn new(tag_type: TagType, boundary: Boundary) -> Self { + Self { tag_type, boundary } + } + + #[cfg(test)] + fn new_allocated(boundary: Boundary) -> Self { + Tag::new(TagType::Allocated, boundary) + } + + fn new_free(boundary: Boundary) -> Self { + Tag::new(TagType::Free, boundary) + } + + fn new_span(boundary: Boundary) -> Self { + Tag::new(TagType::Span, boundary) + } +} + +// impl fmt::Debug for Tag { +// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { +// write!( +// f, +// "Tag({:?} {}..{} (size: {}))", +// self.tag_type, +// self.boundary.start, +// self.boundary.start + self.boundary.size, +// self.boundary.size +// )?; +// Ok(()) +// } +// } + +#[derive(Debug)] +struct TagItem { + tag: Tag, + next: *mut TagItem, + prev: *mut TagItem, +} + +impl TagItem { + #[cfg(test)] + fn new_allocated(boundary: Boundary) -> Self { + Self { tag: Tag::new_allocated(boundary), next: null_mut(), prev: null_mut() } + } +} + +/// Pool of boundary tags. Vmem uses external boundary tags. We allocate a page +/// of tags at a time, making them available via this pool. This allows us to +/// set up the pool initially with a static page, before we have any kind of +/// allocator. The pool can later be populated dynamically. +struct TagPool { + tags: *mut TagItem, +} + +impl TagPool { + fn new() -> Self { + Self { tags: null_mut() } + } + + fn add(&mut self, tag: &mut TagItem) { + if self.tags.is_null() { + self.tags = tag; + } else { + tag.next = self.tags; + unsafe { (*tag.next).prev = tag }; + self.tags = tag; + } + } + + fn take(&mut self, tag: Tag) -> *mut TagItem { + if let Some(tag_item) = unsafe { self.tags.as_mut() } { + self.tags = tag_item.next; + if let Some(next_tag) = unsafe { self.tags.as_mut() } { + next_tag.prev = null_mut(); + } + tag_item.next = null_mut(); + tag_item.prev = null_mut(); + tag_item.tag = tag; + tag_item as *mut TagItem + } else { + null_mut() + } + } + + #[allow(dead_code)] + fn len(&self) -> usize { + let mut n = 0; + let mut free_tag = self.tags; + while let Some(tag) = unsafe { free_tag.as_ref() } { + n += 1; + free_tag = tag.next; + } + n + } +} + +/// Ordered list of tags (by Tag::start) +/// This is a simple linked list that assumes no overlaps. +struct TagList { + tags: *mut TagItem, +} + +impl TagList { + fn new() -> Self { + Self { tags: null_mut() } + } + + fn push(&mut self, new_tag: &mut TagItem) { + if self.tags.is_null() { + self.tags = new_tag; + } else { + let mut curr_tag_item = self.tags; + while let Some(item) = unsafe { curr_tag_item.as_mut() } { + if item.tag.boundary.start > new_tag.tag.boundary.start { + // Insert before tag + if let Some(prev_tag) = unsafe { item.prev.as_mut() } { + prev_tag.next = new_tag; + } else { + // Inserting as first tag + self.tags = new_tag; + } + new_tag.next = item; + item.prev = new_tag; + return; + } + if item.next.is_null() { + // Inserting as last tag + new_tag.prev = item; + item.next = new_tag; + return; + } + curr_tag_item = item.next; + } + } + } + + /// Remove tag_item from the list. Placing tag_item onto the free list is + /// the callers responsibility. + fn unlink(tag_item: &mut TagItem) { + if let Some(prev) = unsafe { tag_item.prev.as_mut() } { + prev.next = tag_item.next; + } + if let Some(next) = unsafe { tag_item.next.as_mut() } { + next.prev = tag_item.prev; + } + tag_item.next = null_mut(); + tag_item.prev = null_mut(); + } + + fn len(&self) -> usize { + let mut n = 0; + let mut curr_tag = self.tags; + while let Some(tag) = unsafe { curr_tag.as_ref() } { + n += 1; + curr_tag = tag.next; + } + n + } + + fn tags_iter(&self) -> impl Iterator + '_ { + let mut curr_tag_item = self.tags; + core::iter::from_fn(move || { + if let Some(item) = unsafe { curr_tag_item.as_ref() } { + curr_tag_item = item.next; + Some(item.tag) + } else { + None + } + }) + } + + // fn add_tag(&mut self, boundary: Boundary, free_tags: &mut TagStack) -> BoundaryResult<()> { + // // Code to pop a tag + // // let tag = unsafe { + // // arena.free_tags.pop().as_mut().expect("Arena::new_with_tags no free tags") + // // }; + + // if boundary.size == 0 { + // return Err(BoundaryError::ZeroSize); + // } + + // let bstart = boundary.start; + // let bend = boundary.start + boundary.size; + + // let mut curr_tag = self.tags; + // while let Some(tag) = unsafe { curr_tag.as_ref() } { + // let tag_start = tag.boundary.start; + // let tag_end = tag_start + tag.boundary.size; + // if (bstart <= tag_start && bend > tag_start) + // || (bstart < tag_end && bend >= tag_end) + // || (bstart <= tag_start && bend >= tag_end) + // {} + // curr_tag = tag.next; + // } + + // Ok(()) + // } +} + +// TODO this needs to be Sync, so actually make it sync +pub struct Arena { + name: &'static str, + quantum: usize, + + tag_pool: TagPool, // Pool of available tags + segment_list: TagList, // List of all segments in address order + + //parent: Option<&Arena>, // Parent arena to import from +} + +// unsafe impl Send for Arena {} +// unsafe impl Sync for Arena {} + +pub trait Allocator { + fn alloc(&mut self, size: usize) -> *mut u8; + fn free(&mut self, addr: *mut u8); +} + +impl Arena { + pub fn new( + name: &'static str, + initial_span: Option, + quantum: usize, + _parent: Option, &dyn core::alloc::Allocator>>, + ) -> Self { + // println!("Arena::new name:{} initial_span:{:?} quantum:{:x}", name, initial_span, quantum); + + let mut arena = + Self { name, quantum, segment_list: TagList::new(), tag_pool: TagPool::new() }; + //arena.add_tags_to_pool(tags); + + if let Some(span) = initial_span { + arena.add_initial_span(span); + } + + arena + } + + /// Only to be used for creation of initial heap + /// Create a new arena, assuming there is no dynamic allocation available, + /// and all free tags come from the free_tags provided. + pub fn new_with_allocator( + name: &'static str, + initial_span: Option, + quantum: usize, + allocator: &'static dyn core::alloc::Allocator, + ) -> Self { + let layout = unsafe { Layout::from_size_align_unchecked(PAGE_SIZE_4K, PAGE_SIZE_4K) }; + let tags_buffer = + allocator.allocate_zeroed(layout).expect("unable to allocate initial vmem tags"); + let tags = unsafe { + slice::from_raw_parts_mut( + tags_buffer.as_mut_ptr() as *mut TagItem, + layout.size() / size_of::(), + ) + }; + + Self::new_with_tags(name, initial_span, quantum, tags) + } + + /// Only to be used for creation of initial heap + /// Create a new arena, assuming there is no dynamic allocation available, + /// and all free tags come from the free_tags provided. + fn new_with_tags( + name: &'static str, + initial_span: Option, + quantum: usize, + tags: &mut [TagItem], + ) -> Self { + // println!( + // "Arena::new_with_tags name:{} initial_span:{:?} quantum:{:x}", + // name, initial_span, quantum + // ); + + let mut arena = + Self { name, quantum, segment_list: TagList::new(), tag_pool: TagPool::new() }; + arena.add_tags_to_pool(tags); + + if let Some(span) = initial_span { + arena.add_initial_span(span); + } + + arena + } + + fn add_initial_span(&mut self, span: Boundary) { + assert_eq!(span.start % self.quantum, 0); + assert_eq!(span.size % self.quantum, 0); + assert!(span.start.checked_add(span.size).is_some()); + self.add_free_span(span); + } + + pub fn name(&self) -> &'static str { + self.name + } + + fn add_free_span(&mut self, boundary: Boundary) { + self.segment_list.push(unsafe { + self.tag_pool.take(Tag::new_span(boundary)).as_mut().expect("no free tags") + }); + self.segment_list.push(unsafe { + self.tag_pool.take(Tag::new_free(boundary)).as_mut().expect("no free tags") + }); + } + + fn add_tags_to_pool(&mut self, tags: &mut [TagItem]) { + for tag in tags { + tag.next = null_mut(); + tag.prev = null_mut(); + self.tag_pool.add(tag); + } + } + + /// Allocate a segment, returned as a boundary + fn alloc_segment(&mut self, size: usize) -> Result { + // println!("alloc_segment size: {}", size); + + // Round size up to a multiple of quantum + let size = { + let rem = size % self.quantum; + if rem == 0 { + size + } else { + size + (self.quantum - rem) + } + }; + + // Find the first free tag that's large enough + let mut curr_item = self.segment_list.tags; + while let Some(item) = unsafe { curr_item.as_mut() } { + if item.tag.tag_type == TagType::Free && item.tag.boundary.size >= size { + // Mark this tag as allocated, and if there's any left over space, + // create and insert a new tag + item.tag.tag_type = TagType::Allocated; + if item.tag.boundary.size > size { + // Work out the size of the new free item, and change the size + // of the current, now allocated, item + let remainder = item.tag.boundary.size - size; + item.tag.boundary.size = size; + + let new_tag = Tag::new_free(Boundary::new_unchecked( + item.tag.boundary.start + size, + remainder, + )); + let new_item = + unsafe { self.tag_pool.take(new_tag).as_mut().expect("no free tags") }; + + // Insert new_item after item + new_item.next = item.next; + new_item.prev = item; + item.next = new_item; + if !new_item.next.is_null() { + unsafe { (*new_item.next).prev = new_item }; + } + } + return Ok(item.tag.boundary); + } + curr_item = item.next; + } + Err(AllocError::NoSpace) + } + + // Free addr. We don't need to know size because we don't merge allocations. + // (We only merge freed segments) + // TODO Error on precondition fail + fn free_segment(&mut self, addr: usize) -> Result<(), AllocError> { + // Need to manually scan the used tags + let mut curr_item = self.segment_list.tags; + while let Some(item) = unsafe { curr_item.as_mut() } { + if item.tag.boundary.start == addr && item.tag.tag_type == TagType::Allocated { + break; + } + curr_item = item.next; + } + + if curr_item.is_null() { + return Err(AllocError::AllocationNotFound); + } + + let curr_tag: &mut TagItem = unsafe { curr_item.as_mut() }.unwrap(); + + // Found tag to free + let prev_type = unsafe { curr_tag.prev.as_ref() }.map(|t| t.tag.tag_type); + let next_type = unsafe { curr_tag.next.as_ref() }.map(|t| t.tag.tag_type); + + match (prev_type, next_type) { + (Some(TagType::Allocated), Some(TagType::Allocated)) + | (Some(TagType::Span), Some(TagType::Span)) + | (Some(TagType::Span), Some(TagType::Allocated)) + | (Some(TagType::Allocated), Some(TagType::Span)) + | (Some(TagType::Span), None) + | (Some(TagType::Allocated), None) => { + // No frees on either side + // -> Change curr_tag to free + curr_tag.tag.tag_type = TagType::Free; + } + (Some(TagType::Span), Some(TagType::Free)) + | (Some(TagType::Allocated), Some(TagType::Free)) => { + // Prev non-free, next free + // Change next tag start to merge with curr_tag, release curr_tag + let next = unsafe { curr_tag.next.as_mut() }.unwrap(); + next.tag.boundary.start = curr_tag.tag.boundary.start; + next.tag.boundary.size += curr_tag.tag.boundary.size; + TagList::unlink(curr_tag); + self.tag_pool.add(curr_tag); + } + (Some(TagType::Free), None) + | (Some(TagType::Free), Some(TagType::Span)) + | (Some(TagType::Free), Some(TagType::Allocated)) => { + // Prev free, next non-free + // Change prev tag size to merge with curr_tag, release curr_tag + let prev = unsafe { curr_tag.prev.as_mut() }.unwrap(); + prev.tag.boundary.size += curr_tag.tag.boundary.size; + TagList::unlink(curr_tag); + self.tag_pool.add(curr_tag); + } + (Some(TagType::Free), Some(TagType::Free)) => { + // Prev and next both free + // Change prev size to merge with both curr_tag and next, release curr_tag + let prev = unsafe { curr_tag.prev.as_mut() }.unwrap(); + let next = unsafe { curr_tag.next.as_mut() }.unwrap(); + prev.tag.boundary.size += curr_tag.tag.boundary.size + next.tag.boundary.size; + TagList::unlink(curr_tag); + TagList::unlink(next); + self.tag_pool.add(curr_tag); + self.tag_pool.add(next); + } + (None, None) + | (None, Some(TagType::Span)) + | (None, Some(TagType::Allocated)) + | (None, Some(TagType::Free)) => { + self.assert_tags_are_consistent(); + panic!("Unexpected tags when freeing"); + } + } + + Ok(()) + } + + fn tags_iter(&self) -> impl Iterator + '_ { + self.segment_list.tags_iter() + } + + /// Checks that all invariants are correct. + fn assert_tags_are_consistent(&self) { + // There must be at least 2 tags + debug_assert!(self.segment_list.len() >= 2); + + // Tags must be in order, without gaps + let mut last_tag: Option = None; + let mut last_span: Option = None; + let mut last_span_total = 0; + for (i, tag) in self.tags_iter().enumerate() { + debug_assert!(tag.boundary.size > 0); + + if i == 0 { + debug_assert_eq!(tag.tag_type, TagType::Span); + debug_assert!(last_tag.is_none()); + debug_assert!(last_span.is_none()); + debug_assert_eq!(last_span_total, 0); + } else { + debug_assert!(last_tag.is_some()); + debug_assert!(last_span.is_some()); + + // Tags should be ordered + let last_tag = last_tag.unwrap(); + let out_of_order = (last_tag.tag_type == TagType::Span + && tag.boundary.start >= last_tag.boundary.start) + || (last_tag.tag_type != TagType::Span + && tag.boundary.start > last_tag.boundary.start); + debug_assert!( + out_of_order, + "Tags out of order: tag{}: {:?}, tag{}: {:?}", + i - 1, + last_tag, + i, + tag, + ); + } + + match tag.tag_type { + TagType::Span => { + // Spans must not overlap + if last_span.is_some() { + debug_assert_eq!(last_span_total, last_span.unwrap().boundary.size); + } + last_span = Some(tag); + } + TagType::Allocated | TagType::Free => { + last_span_total += tag.boundary.size; + // First tag after span should have same start as span + if last_tag.is_some_and(|t| t.tag_type == TagType::Span) { + debug_assert_eq!(tag.boundary.start, last_tag.unwrap().boundary.start); + } + } + } + last_tag = Some(tag); + } + } +} + +impl Allocator for Arena { + fn alloc(&mut self, size: usize) -> *mut u8 { + let boundary = self.alloc_segment(size); + if let Ok(boundary) = boundary { + boundary.start as *mut u8 + } else { + null_mut() + } + } + + fn free(&mut self, addr: *mut u8) { + let _ = self.free_segment(addr as usize); + } +} + +#[cfg(test)] +mod tests { + + use super::*; + + #[test] + fn ensure_sizes() { + assert_eq!(size_of::(), 24); + assert_eq!(size_of::(), 40); + } + + #[test] + fn test_boundary() { + assert!(Boundary::new(10, 1).is_ok()); + assert_eq!(Boundary::new(10, 0), BoundaryResult::Err(BoundaryError::ZeroSize)); + + // Overlap left + assert!(!Boundary::new(10, 10).unwrap().overlaps(&Boundary::new(2, 5).unwrap())); + assert!(!Boundary::new(10, 10).unwrap().overlaps(&Boundary::new(0, 10).unwrap())); + assert!(Boundary::new(10, 10).unwrap().overlaps(&Boundary::new(0, 11).unwrap())); + + // Overlap right + assert!(!Boundary::new(10, 10).unwrap().overlaps(&Boundary::new(25, 5).unwrap())); + assert!(!Boundary::new(10, 10).unwrap().overlaps(&Boundary::new(20, 10).unwrap())); + assert!(Boundary::new(10, 10).unwrap().overlaps(&Boundary::new(19, 1).unwrap())); + + // Exact match + assert!(Boundary::new(10, 10).unwrap().overlaps(&Boundary::new(10, 10).unwrap())); + + // Inside + assert!(Boundary::new(10, 10).unwrap().overlaps(&Boundary::new(15, 1).unwrap())); + assert!(Boundary::new(10, 10).unwrap().overlaps(&Boundary::new(10, 1).unwrap())); + assert!(Boundary::new(10, 10).unwrap().overlaps(&Boundary::new(19, 1).unwrap())); + + // Outside left + assert!(!Boundary::new(10, 10).unwrap().overlaps(&Boundary::new(0, 1).unwrap())); + assert!(!Boundary::new(10, 10).unwrap().overlaps(&Boundary::new(0, 10).unwrap())); + + // Outside right + assert!(!Boundary::new(10, 10).unwrap().overlaps(&Boundary::new(20, 1).unwrap())); + assert!(!Boundary::new(10, 10).unwrap().overlaps(&Boundary::new(25, 1).unwrap())); + } + + // Page4K would normally be in the arch crate, but we define something + // similar here for testing. + #[repr(C, align(4096))] + #[derive(Clone, Copy)] + pub struct Page4K([u8; 4096]); + + #[test] + fn test_tagstack() { + let mut page = Page4K([0; 4096]); + const NUM_TAGS: usize = size_of::() / size_of::(); + let tags = unsafe { &mut *(&mut page as *mut Page4K as *mut [TagItem; NUM_TAGS]) }; + let mut tag_stack = TagPool::new(); + + assert_eq!(tag_stack.len(), 0); + for tag in tags { + tag_stack.add(tag); + } + assert_eq!(tag_stack.len(), NUM_TAGS); + } + + #[test] + fn test_taglist() { + let mut list = TagList::new(); + assert_eq!(list.len(), 0); + assert_eq!(list.tags_iter().collect::>(), []); + + let mut tag1 = TagItem::new_allocated(Boundary::new(100, 100).unwrap()); + list.push(&mut tag1); + assert_eq!(list.len(), 1); + assert_eq!( + list.tags_iter().collect::>(), + [Tag::new_allocated(Boundary::new(100, 100).unwrap())] + ); + + // Insert new at end + let mut tag2 = TagItem::new_allocated(Boundary::new(500, 100).unwrap()); + list.push(&mut tag2); + assert_eq!(list.len(), 2); + assert_eq!( + list.tags_iter().collect::>(), + [ + Tag::new_allocated(Boundary::new(100, 100).unwrap()), + Tag::new_allocated(Boundary::new(500, 100).unwrap()) + ] + ); + + // Insert new at start + let mut tag3 = TagItem::new_allocated(Boundary::new(0, 100).unwrap()); + list.push(&mut tag3); + assert_eq!(list.len(), 3); + assert_eq!( + list.tags_iter().collect::>(), + [ + Tag::new_allocated(Boundary::new(0, 100).unwrap()), + Tag::new_allocated(Boundary::new(100, 100).unwrap()), + Tag::new_allocated(Boundary::new(500, 100).unwrap()) + ] + ); + + // Insert new in middle + let mut tag4 = TagItem::new_allocated(Boundary::new(200, 100).unwrap()); + list.push(&mut tag4); + assert_eq!(list.len(), 4); + assert_eq!( + list.tags_iter().collect::>(), + [ + Tag::new_allocated(Boundary::new(0, 100).unwrap()), + Tag::new_allocated(Boundary::new(100, 100).unwrap()), + Tag::new_allocated(Boundary::new(200, 100).unwrap()), + Tag::new_allocated(Boundary::new(500, 100).unwrap()) + ] + ); + } + + fn create_arena_with_static_tags( + name: &'static str, + initial_span: Option, + quantum: usize, + _parent_arena: Option<&mut Arena>, + ) -> Arena { + let mut page = Page4K([0; 4096]); + const NUM_TAGS: usize = size_of::() / size_of::(); + let tags = unsafe { &mut *(&mut page as *mut Page4K as *mut [TagItem; NUM_TAGS]) }; + Arena::new_with_tags(name, initial_span, quantum, tags) + } + + fn assert_tags_eq(arena: &Arena, expected: &[Tag]) { + arena.assert_tags_are_consistent(); + let actual_tags = arena.tags_iter().collect::>(); + assert_eq!(actual_tags, expected, "arena tag mismatch"); + } + + #[test] + fn test_arena_create() { + let arena = create_arena_with_static_tags( + "test", + Some(Boundary::new_unchecked(4096, 4096 * 20)), + 4096, + None, + ); + assert_eq!(arena.tag_pool.len(), 100); + + assert_tags_eq( + &arena, + &[ + Tag::new_span(Boundary::new(4096, 4096 * 20).unwrap()), + Tag::new_free(Boundary::new(4096, 4096 * 20).unwrap()), + ], + ); + } + + #[test] + fn test_arena_alloc() { + let mut arena = create_arena_with_static_tags( + "test", + Some(Boundary::new_unchecked(4096, 4096 * 20)), + 4096, + None, + ); + + arena.alloc(4096 * 2); + + assert_tags_eq( + &arena, + &[ + Tag::new_span(Boundary::new(4096, 4096 * 20).unwrap()), + Tag::new_allocated(Boundary::new(4096, 4096 * 2).unwrap()), + Tag::new_free(Boundary::new(4096 * 3, 4096 * 18).unwrap()), + ], + ); + } + + #[test] + fn test_arena_alloc_rounds_if_wrong_granule() { + let mut arena = create_arena_with_static_tags( + "test", + Some(Boundary::new_unchecked(4096, 4096 * 20)), + 4096, + None, + ); + let a = arena.alloc_segment(1024); + assert_eq!(a.unwrap().size, 4096); + } + + #[test] + fn test_arena_free() { + let mut arena = create_arena_with_static_tags( + "test", + Some(Boundary::new_unchecked(4096, 4096 * 20)), + 4096, + None, + ); + assert_eq!(arena.tag_pool.len(), 100); + + // We need to test each case where we're freeing by scanning the tags linearly. + // To do this we run through each case (comments from the `free` function) + + // Prev and next both non-free + let a1 = arena.alloc(4096); + let a2 = arena.alloc(4096); + assert_eq!(arena.tag_pool.len(), 98); + assert_tags_eq( + &arena, + &[ + Tag::new(TagType::Span, Boundary::new(4096, 4096 * 20).unwrap()), + Tag::new(TagType::Allocated, Boundary::new(4096, 4096).unwrap()), + Tag::new(TagType::Allocated, Boundary::new(4096 * 2, 4096).unwrap()), + Tag::new(TagType::Free, Boundary::new(4096 * 3, 4096 * 18).unwrap()), + ], + ); + arena.free(a1); + assert_eq!(arena.tag_pool.len(), 98); + assert_tags_eq( + &arena, + &[ + Tag::new(TagType::Span, Boundary::new(4096, 4096 * 20).unwrap()), + Tag::new(TagType::Free, Boundary::new(4096, 4096).unwrap()), + Tag::new(TagType::Allocated, Boundary::new(4096 * 2, 4096).unwrap()), + Tag::new(TagType::Free, Boundary::new(4096 * 3, 4096 * 18).unwrap()), + ], + ); + + // Prev and next both free + arena.free(a2); + assert_eq!(arena.tag_pool.len(), 100); + assert_tags_eq( + &arena, + &[ + Tag::new(TagType::Span, Boundary::new(4096, 4096 * 20).unwrap()), + Tag::new(TagType::Free, Boundary::new(4096, 4096 * 20).unwrap()), + ], + ); + + // Prev free, next non-free + let a1 = arena.alloc(4096); + let a2 = arena.alloc(4096); + let a3 = arena.alloc(4096); + arena.free(a1); + assert_eq!(arena.tag_pool.len(), 97); + assert_tags_eq( + &arena, + &[ + Tag::new(TagType::Span, Boundary::new(4096, 4096 * 20).unwrap()), + Tag::new(TagType::Free, Boundary::new(4096, 4096).unwrap()), + Tag::new(TagType::Allocated, Boundary::new(4096 * 2, 4096).unwrap()), + Tag::new(TagType::Allocated, Boundary::new(4096 * 3, 4096).unwrap()), + Tag::new(TagType::Free, Boundary::new(4096 * 4, 4096 * 17).unwrap()), + ], + ); + arena.free(a2); + assert_eq!(arena.tag_pool.len(), 98); + assert_tags_eq( + &arena, + &[ + Tag::new(TagType::Span, Boundary::new(4096, 4096 * 20).unwrap()), + Tag::new(TagType::Free, Boundary::new(4096, 4096 * 2).unwrap()), + Tag::new(TagType::Allocated, Boundary::new(4096 * 3, 4096).unwrap()), + Tag::new(TagType::Free, Boundary::new(4096 * 4, 4096 * 17).unwrap()), + ], + ); + + // Prev non-free, next free + arena.free(a3); + let a1 = arena.alloc(4096); + assert_eq!(arena.tag_pool.len(), 99); + assert_tags_eq( + &arena, + &[ + Tag::new(TagType::Span, Boundary::new(4096, 4096 * 20).unwrap()), + Tag::new(TagType::Allocated, Boundary::new(4096, 4096).unwrap()), + Tag::new(TagType::Free, Boundary::new(4096 * 2, 4096 * 19).unwrap()), + ], + ); + arena.free(a1); + assert_eq!(arena.tag_pool.len(), 100); + assert_tags_eq( + &arena, + &[ + Tag::new(TagType::Span, Boundary::new(4096, 4096 * 20).unwrap()), + Tag::new(TagType::Free, Boundary::new(4096, 4096 * 20).unwrap()), + ], + ); + } + + // #[test] + // fn test_arena_nesting() { + // // Create a page of tags we can share amongst the first arenas + // let mut page = Page4K([0; 4096]); + // const NUM_TAGS: usize = size_of::() / size_of::(); + // let all_tags = unsafe { &mut *(&mut page as *mut Page4K as *mut [TagItem; NUM_TAGS]) }; + + // const NUM_ARENAS: usize = 4; + // const NUM_TAGS_PER_ARENA: usize = NUM_TAGS / NUM_ARENAS; + // let (arena1_tags, all_tags) = all_tags.split_at_mut(NUM_TAGS_PER_ARENA); + // let (arena2_tags, all_tags) = all_tags.split_at_mut(NUM_TAGS_PER_ARENA); + // let (arena3a_tags, all_tags) = all_tags.split_at_mut(NUM_TAGS_PER_ARENA); + // let (arena3b_tags, _) = all_tags.split_at_mut(NUM_TAGS_PER_ARENA); + + // let mut arena1 = Arena::new_with_tags( + // "arena1", + // Some(Boundary::new_unchecked(4096, 4096 * 20)), + // 4096, + // arena1_tags, + // ); + + // // Import all + // let mut arena2 = Arena::new_with_tags("arena2", None, 4096, arena2_tags); + + // // Import first half + // let mut arena3a = Arena::new_with_tags( + // "arena3a", + // Some(Boundary::from(4096..4096 * 10)), + // 4096, + // arena3a_tags, + // ); + + // // Import second half + // let mut arena3b = Arena::new_with_tags( + // "arena3b", + // Some(Boundary::from(4096 * 10..4096 * 21)), + // 4096, + // arena3b_tags, + // ); + + // // Let's do some allocations + // } +} diff --git a/port/src/vmemalloc.rs b/port/src/vmemalloc.rs new file mode 100644 index 0000000..96b1125 --- /dev/null +++ b/port/src/vmemalloc.rs @@ -0,0 +1,131 @@ +use crate::{ + mcslock::{Lock, LockNode}, + mem::{VirtRange, PAGE_SIZE_4K}, + vmem::{Allocator, Arena, Boundary}, +}; +use alloc::sync::Arc; +use core::alloc::{AllocError, Layout}; +use core::ptr::NonNull; + +/// VmAlloc is an attempt to write a Bonwick vmem-style allocator. It currently +/// expects another allocator to exist beforehand. +/// TODO Use the allocator api trait. +pub struct VmemAlloc { + heap_arena: Arc, &'static dyn core::alloc::Allocator>, + va_arena: Option, &'static dyn core::alloc::Allocator>>, + kmem_default_arena: Option, &'static dyn core::alloc::Allocator>>, +} + +impl VmemAlloc { + // TODO Specify quantum caching + pub fn new( + early_allocator: &'static dyn core::alloc::Allocator, + heap_range: VirtRange, + ) -> Self { + let heap_arena = Arc::new_in( + Lock::new( + "heap_arena", + Arena::new_with_allocator( + "heap", + Some(Boundary::from(heap_range)), + PAGE_SIZE_4K, + early_allocator, + ), + ), + early_allocator, + ); + + // va_arena imports from heap_arena, so can use allocations from that heap to + // allocate blocks of tags. + let va_arena = Arc::new_in( + Lock::new( + "kmem_va", + Arena::new("kmem_va_arena", None, PAGE_SIZE_4K, Some(heap_arena.clone())), + ), + early_allocator, + ); + + // kmem_default_arena - backing store for most object caches + let kmem_default_arena = Arc::new_in( + Lock::new( + "kmem_default_arena", + Arena::new("kmem_default", None, PAGE_SIZE_4K, Some(va_arena.clone())), + ), + early_allocator, + ); + + Self { heap_arena, va_arena: Some(va_arena), kmem_default_arena: Some(kmem_default_arena) } + } + + /// Create the remaining early arenas. To be called immediately after new() + /// as it uses self as the allocator. + pub fn init(&self) { + // va_arena imports from heap_arena, so can use allocations from that heap to + // allocate blocks of tags. + let va_arena = Arc::new_in( + Lock::new( + "kmem_va", + Arena::new("kmem_va_arena", None, PAGE_SIZE_4K, Some(self.heap_arena.clone())), + ), + self, + ); + + // kmem_default_arena - backing store for most object caches + // let kmem_default_arena = Arc::new_in( + // Lock::new( + // "kmem_default_arena", + // Arena::new("kmem_default", None, PAGE_SIZE_4K, Some(va_arena.clone())), + // ), + // self, + // ); + //self.va_arena = Some(va_arena as Allocator); + } + + pub fn alloc(&self, layout: Layout) -> *mut u8 { + let node = LockNode::new(); + let mut guard = self + .kmem_default_arena + .as_deref() + .expect("kmem_default_arena not yet created") + .lock(&node); + // TODO use layout properly + guard.alloc(layout.size()) + } +} + +unsafe impl core::alloc::Allocator for VmemAlloc { + fn allocate( + &self, + layout: Layout, + ) -> Result, core::alloc::AllocError> { + let bytes = self.alloc(layout); + if bytes.is_null() { + Err(AllocError {}) + } else { + let nonnull_bytes_ptr = NonNull::new(bytes).unwrap(); + Ok(NonNull::slice_from_raw_parts(nonnull_bytes_ptr, layout.size())) + } + } + + unsafe fn deallocate(&self, _ptr: core::ptr::NonNull, _layout: Layout) { + todo!() + } +} + +#[cfg(test)] +mod tests { + + use crate::bumpalloc::Bump; + + use super::*; + + #[test] + fn alloc_with_importing() { + static BUMP_ALLOC: Bump<{ 4 * PAGE_SIZE_4K }, PAGE_SIZE_4K> = Bump::new(0); + let vmalloc = + VmemAlloc::new(&BUMP_ALLOC, VirtRange::with_len(0xffff800000800000, 0x1000000)); + vmalloc.init(); + let b = vmalloc.alloc(unsafe { Layout::from_size_align_unchecked(1024, 1) }); + assert_ne!(b, 0 as *mut u8); + } +} diff --git a/riscv64/Cargo.toml b/riscv64/Cargo.toml index 2ea5ad9..7a81798 100644 --- a/riscv64/Cargo.toml +++ b/riscv64/Cargo.toml @@ -11,3 +11,8 @@ port = { path = "../port" } [features] opensbi = [] + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = [ + 'cfg(platform, values("nezha", "virt"))', +] } diff --git a/riscv64/src/main.rs b/riscv64/src/main.rs index 19f1621..8551859 100644 --- a/riscv64/src/main.rs +++ b/riscv64/src/main.rs @@ -1,6 +1,4 @@ #![feature(alloc_error_handler)] -#![feature(asm_const)] -#![feature(panic_info_message)] #![cfg_attr(not(any(test)), no_std)] #![cfg_attr(not(test), no_main)] #![allow(clippy::upper_case_acronyms)] diff --git a/riscv64/src/runtime.rs b/riscv64/src/runtime.rs index 78eef68..d228373 100644 --- a/riscv64/src/runtime.rs +++ b/riscv64/src/runtime.rs @@ -18,7 +18,7 @@ extern "C" fn eh_personality() {} fn panic(info: &PanicInfo) -> ! { print!("Panic: "); if let Some(p) = info.location() { - println!("line {}, file {}: {}", p.line(), p.file(), info.message().unwrap()); + println!("line {}, file {}: {}", p.line(), p.file(), info.message()); } else { println!("no information available."); } diff --git a/rust-toolchain.toml b/rust-toolchain.toml index edc3dd8..a179883 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,8 +1,8 @@ [toolchain] -channel = "nightly-2024-06-08" -components = [ "rustfmt", "rust-src", "clippy", "llvm-tools" ] +channel = "nightly-2024-09-08" +components = ["rustfmt", "rust-src", "clippy", "llvm-tools"] targets = [ "aarch64-unknown-none", "riscv64gc-unknown-none-elf", - "x86_64-unknown-none" + "x86_64-unknown-none", ] diff --git a/x86_64/src/main.rs b/x86_64/src/main.rs index cd85932..ebd4a42 100644 --- a/x86_64/src/main.rs +++ b/x86_64/src/main.rs @@ -1,5 +1,4 @@ #![feature(alloc_error_handler)] -#![feature(asm_const)] #![feature(naked_functions)] #![feature(sync_unsafe_cell)] #![cfg_attr(not(any(test)), no_std)]