From 4a11bbb98046430d8de2bb46f0364f3ba542310a Mon Sep 17 00:00:00 2001 From: Graham MacDonald Date: Wed, 25 Sep 2024 22:15:22 +0100 Subject: [PATCH] wip Signed-off-by: Graham MacDonald --- aarch64/src/init.rs | 17 +++--- aarch64/src/runtime.rs | 20 ++----- aarch64/src/vmalloc.rs | 88 ++++++++++----------------- port/src/lib.rs | 1 + port/src/vmem.rs | 16 +++-- port/src/vmemalloc.rs | 131 +++++++++++++++++++++++++++++++++++++++++ rust-toolchain.toml | 2 +- 7 files changed, 182 insertions(+), 93 deletions(-) create mode 100644 port/src/vmemalloc.rs diff --git a/aarch64/src/init.rs b/aarch64/src/init.rs index 34aa0bf..ae05f8c 100644 --- a/aarch64/src/init.rs +++ b/aarch64/src/init.rs @@ -11,7 +11,6 @@ use crate::vm::kernel_root; use crate::vm::PageTable; use crate::vmalloc; use alloc::boxed::Box; -use core::alloc::Layout; use core::ptr; use port::bumpalloc::Bump; use port::fdt::DeviceTree; @@ -130,14 +129,14 @@ pub fn init(dtb_va: usize) { println!("looping now"); - { - let test = vmalloc::alloc(unsafe { Layout::from_size_align_unchecked(1024, 16) }); - println!("test alloc: {:p}", test); - let test2 = vmalloc::alloc(unsafe { Layout::from_size_align_unchecked(1024, 16) }); - println!("test alloc: {:p}", test2); - let test3 = vmalloc::alloc(unsafe { Layout::from_size_align_unchecked(1024, 4096) }); - println!("test alloc: {:p}", test3); - } + // { + // let test = vmalloc::alloc(unsafe { Layout::from_size_align_unchecked(1024, 16) }); + // println!("test alloc: {:p}", test); + // let test2 = vmalloc::alloc(unsafe { Layout::from_size_align_unchecked(1024, 16) }); + // println!("test alloc: {:p}", test2); + // let test3 = vmalloc::alloc(unsafe { Layout::from_size_align_unchecked(1024, 4096) }); + // println!("test alloc: {:p}", test3); + // } #[allow(clippy::empty_loop)] loop {} diff --git a/aarch64/src/runtime.rs b/aarch64/src/runtime.rs index b844348..665b4ad 100644 --- a/aarch64/src/runtime.rs +++ b/aarch64/src/runtime.rs @@ -6,12 +6,15 @@ use crate::kmem::physaddr_as_virt; use crate::registers::rpi_mmio; use crate::uartmini::MiniUart; use crate::vmalloc; -use alloc::alloc::{GlobalAlloc, Layout}; +use alloc::alloc::Layout; use core::fmt::Write; use core::panic::PanicInfo; use port::devcons::PanicConsole; use port::mem::VirtRange; +#[global_allocator] +static ALLOCATOR: vmalloc::Allocator = vmalloc::Allocator {}; + // TODO // - Add qemu integration test // - Use Console via println!() macro once available @@ -40,18 +43,3 @@ pub fn panic(info: &PanicInfo) -> ! { fn oom(_layout: Layout) -> ! { panic!("oom"); } - -struct Allocator {} - -unsafe impl GlobalAlloc for Allocator { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - vmalloc::alloc(layout) - } - - unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) { - panic!("fake dealloc"); - } -} - -#[global_allocator] -static ALLOCATOR: Allocator = Allocator {}; diff --git a/aarch64/src/vmalloc.rs b/aarch64/src/vmalloc.rs index 7860b91..438a06c 100644 --- a/aarch64/src/vmalloc.rs +++ b/aarch64/src/vmalloc.rs @@ -1,76 +1,48 @@ -use alloc::sync::Arc; -use core::{alloc::Layout, mem::MaybeUninit}; +use alloc::alloc::{GlobalAlloc, Layout}; +use core::mem::MaybeUninit; use port::{ mcslock::{Lock, LockNode}, - mem::{VirtRange, PAGE_SIZE_4K}, - vmem::{Allocator, Arena, Boundary}, + mem::VirtRange, + vmemalloc::VmemAlloc, }; +#[cfg(not(test))] +use port::println; + // TODO replace with some sort of OnceLock? We need this to be dynamically created, // but we're assuming VmAlloc is Sync. -static VMALLOC: Lock> = Lock::new("vmalloc", None); - -// The core arenas are statically allocated. They cannot be created in const -// functions, so the we declare them as MaybeUninit before intialising and -// referening them from VmAlloc, from where they can be used in the global allocator. -//static mut MAYBE_HEAP_ARENA: MaybeUninit = MaybeUninit::uninit(); - -/// VmAlloc is an attempt to write a Bonwick vmem-style allocator. It currently -/// expects another allocator to exist beforehand. -/// TODO Use the allocator api trait. -struct VmAlloc { - heap_arena: Arc, &'static dyn core::alloc::Allocator>, - _va_arena: Arc, &'static dyn core::alloc::Allocator>, -} - -impl VmAlloc { - fn new(early_allocator: &'static dyn core::alloc::Allocator, heap_range: VirtRange) -> Self { - let heap_arena = Arc::new_in( - Lock::new( - "heap_arena", - Arena::new_with_allocator( - "heap", - Some(Boundary::from(heap_range)), - PAGE_SIZE_4K, - early_allocator, - ), - ), - early_allocator, - ); - - // va_arena imports from heap_arena, so can use allocations from that heap to - // allocate blocks of tags. - let va_arena = Arc::new_in( - Lock::new( - "kmem_va_arena", - Arena::new("kmem_va_arena", None, PAGE_SIZE_4K, Some(heap_arena.clone())), - ), - early_allocator, - ); - - Self { heap_arena, _va_arena: va_arena } - } -} +static VMEM_ALLOC: Lock> = Lock::new("vmemalloc", None); pub fn init(early_allocator: &'static dyn core::alloc::Allocator, heap_range: VirtRange) { let node = LockNode::new(); - let mut vmalloc = VMALLOC.lock(&node); + let mut vmalloc = VMEM_ALLOC.lock(&node); *vmalloc = Some({ - static mut MAYBE_VMALLOC: MaybeUninit = MaybeUninit::uninit(); + static mut MAYBE_VMALLOC: MaybeUninit = MaybeUninit::uninit(); unsafe { - MAYBE_VMALLOC.write(VmAlloc::new(early_allocator, heap_range)); + MAYBE_VMALLOC.write({ + let vmemalloc = VmemAlloc::new(early_allocator, heap_range); + vmemalloc.init(); + vmemalloc + }); MAYBE_VMALLOC.assume_init_mut() } }); } -pub fn alloc(layout: Layout) -> *mut u8 { - let node = LockNode::new(); - let mut lock = VMALLOC.lock(&node); - let vmalloc = lock.as_deref_mut().unwrap(); +pub struct Allocator {} - let node = LockNode::new(); - let mut guard = vmalloc.heap_arena.lock(&node); - // TODO use layout properly - guard.alloc(layout.size()) +unsafe impl GlobalAlloc for Allocator { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + println!("vmalloc::alloc"); + + // Get the main allocator + let node = LockNode::new(); + let mut lock = VMEM_ALLOC.lock(&node); + let vmemalloc = lock.as_deref_mut().unwrap(); + vmemalloc.alloc(layout) + } + + unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) { + panic!("fake dealloc"); + } } diff --git a/port/src/lib.rs b/port/src/lib.rs index e5e17d6..fb0704f 100644 --- a/port/src/lib.rs +++ b/port/src/lib.rs @@ -14,5 +14,6 @@ pub mod fdt; pub mod mcslock; pub mod mem; pub mod vmem; +pub mod vmemalloc; extern crate alloc; diff --git a/port/src/vmem.rs b/port/src/vmem.rs index ee16575..91c5c90 100644 --- a/port/src/vmem.rs +++ b/port/src/vmem.rs @@ -3,9 +3,6 @@ use crate::{mcslock::Lock, mem::PAGE_SIZE_4K}; use alloc::sync::Arc; use core::{alloc::Layout, ops::Range, ptr::null_mut, slice}; -#[cfg(not(test))] -use crate::println; - // TODO reserve recursive area in vmem(?) // TODO Add hashtable for allocated tags - makes it faster when freeing, given only an address. // TODO Add support for quantum caches once we have slab allocators implemented. @@ -311,10 +308,11 @@ impl Arena { quantum: usize, _parent: Option, &dyn core::alloc::Allocator>>, ) -> Self { - println!("Arena::new name:{} initial_span:{:?} quantum:{:x}", name, initial_span, quantum); + // println!("Arena::new name:{} initial_span:{:?} quantum:{:x}", name, initial_span, quantum); let mut arena = Self { name, quantum, segment_list: TagList::new(), tag_pool: TagPool::new() }; + //arena.add_tags_to_pool(tags); if let Some(span) = initial_span { arena.add_initial_span(span); @@ -354,10 +352,10 @@ impl Arena { quantum: usize, tags: &mut [TagItem], ) -> Self { - println!( - "Arena::new_with_tags name:{} initial_span:{:?} quantum:{:x}", - name, initial_span, quantum - ); + // println!( + // "Arena::new_with_tags name:{} initial_span:{:?} quantum:{:x}", + // name, initial_span, quantum + // ); let mut arena = Self { name, quantum, segment_list: TagList::new(), tag_pool: TagPool::new() }; @@ -400,7 +398,7 @@ impl Arena { /// Allocate a segment, returned as a boundary fn alloc_segment(&mut self, size: usize) -> Result { - println!("alloc_segment size: {}", size); + // println!("alloc_segment size: {}", size); // Round size up to a multiple of quantum let size = { diff --git a/port/src/vmemalloc.rs b/port/src/vmemalloc.rs new file mode 100644 index 0000000..96b1125 --- /dev/null +++ b/port/src/vmemalloc.rs @@ -0,0 +1,131 @@ +use crate::{ + mcslock::{Lock, LockNode}, + mem::{VirtRange, PAGE_SIZE_4K}, + vmem::{Allocator, Arena, Boundary}, +}; +use alloc::sync::Arc; +use core::alloc::{AllocError, Layout}; +use core::ptr::NonNull; + +/// VmAlloc is an attempt to write a Bonwick vmem-style allocator. It currently +/// expects another allocator to exist beforehand. +/// TODO Use the allocator api trait. +pub struct VmemAlloc { + heap_arena: Arc, &'static dyn core::alloc::Allocator>, + va_arena: Option, &'static dyn core::alloc::Allocator>>, + kmem_default_arena: Option, &'static dyn core::alloc::Allocator>>, +} + +impl VmemAlloc { + // TODO Specify quantum caching + pub fn new( + early_allocator: &'static dyn core::alloc::Allocator, + heap_range: VirtRange, + ) -> Self { + let heap_arena = Arc::new_in( + Lock::new( + "heap_arena", + Arena::new_with_allocator( + "heap", + Some(Boundary::from(heap_range)), + PAGE_SIZE_4K, + early_allocator, + ), + ), + early_allocator, + ); + + // va_arena imports from heap_arena, so can use allocations from that heap to + // allocate blocks of tags. + let va_arena = Arc::new_in( + Lock::new( + "kmem_va", + Arena::new("kmem_va_arena", None, PAGE_SIZE_4K, Some(heap_arena.clone())), + ), + early_allocator, + ); + + // kmem_default_arena - backing store for most object caches + let kmem_default_arena = Arc::new_in( + Lock::new( + "kmem_default_arena", + Arena::new("kmem_default", None, PAGE_SIZE_4K, Some(va_arena.clone())), + ), + early_allocator, + ); + + Self { heap_arena, va_arena: Some(va_arena), kmem_default_arena: Some(kmem_default_arena) } + } + + /// Create the remaining early arenas. To be called immediately after new() + /// as it uses self as the allocator. + pub fn init(&self) { + // va_arena imports from heap_arena, so can use allocations from that heap to + // allocate blocks of tags. + let va_arena = Arc::new_in( + Lock::new( + "kmem_va", + Arena::new("kmem_va_arena", None, PAGE_SIZE_4K, Some(self.heap_arena.clone())), + ), + self, + ); + + // kmem_default_arena - backing store for most object caches + // let kmem_default_arena = Arc::new_in( + // Lock::new( + // "kmem_default_arena", + // Arena::new("kmem_default", None, PAGE_SIZE_4K, Some(va_arena.clone())), + // ), + // self, + // ); + //self.va_arena = Some(va_arena as Allocator); + } + + pub fn alloc(&self, layout: Layout) -> *mut u8 { + let node = LockNode::new(); + let mut guard = self + .kmem_default_arena + .as_deref() + .expect("kmem_default_arena not yet created") + .lock(&node); + // TODO use layout properly + guard.alloc(layout.size()) + } +} + +unsafe impl core::alloc::Allocator for VmemAlloc { + fn allocate( + &self, + layout: Layout, + ) -> Result, core::alloc::AllocError> { + let bytes = self.alloc(layout); + if bytes.is_null() { + Err(AllocError {}) + } else { + let nonnull_bytes_ptr = NonNull::new(bytes).unwrap(); + Ok(NonNull::slice_from_raw_parts(nonnull_bytes_ptr, layout.size())) + } + } + + unsafe fn deallocate(&self, _ptr: core::ptr::NonNull, _layout: Layout) { + todo!() + } +} + +#[cfg(test)] +mod tests { + + use crate::bumpalloc::Bump; + + use super::*; + + #[test] + fn alloc_with_importing() { + static BUMP_ALLOC: Bump<{ 4 * PAGE_SIZE_4K }, PAGE_SIZE_4K> = Bump::new(0); + let vmalloc = + VmemAlloc::new(&BUMP_ALLOC, VirtRange::with_len(0xffff800000800000, 0x1000000)); + vmalloc.init(); + let b = vmalloc.alloc(unsafe { Layout::from_size_align_unchecked(1024, 1) }); + assert_ne!(b, 0 as *mut u8); + } +} diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 43c30b6..a179883 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "nightly-2024-08-27" +channel = "nightly-2024-09-08" components = ["rustfmt", "rust-src", "clippy", "llvm-tools"] targets = [ "aarch64-unknown-none",