From 10b131dfb1a140e7c38702b9c56ba5a5f2980ce6 Mon Sep 17 00:00:00 2001 From: Graham MacDonald Date: Sat, 16 Sep 2023 18:11:08 +0100 Subject: [PATCH 01/12] Manually create pagetables Signed-off-by: Graham MacDonald --- Cargo.lock | 8 +- aarch64/Cargo.toml | 2 +- aarch64/lib/kernel.ld | 20 +- aarch64/src/devcons.rs | 3 +- aarch64/src/kalloc.rs | 49 +++++ aarch64/src/kmem.rs | 187 ++++++++++++++++ aarch64/src/l.S | 10 +- aarch64/src/mailbox.rs | 3 +- aarch64/src/main.rs | 32 ++- aarch64/src/mem.rs | 15 -- aarch64/src/registers.rs | 27 ++- aarch64/src/runtime.rs | 3 +- aarch64/src/vm.rs | 463 +++++++++++++++++++++++++++++++++++++++ port/src/fdt.rs | 4 + 14 files changed, 792 insertions(+), 34 deletions(-) create mode 100644 aarch64/src/kalloc.rs create mode 100644 aarch64/src/kmem.rs delete mode 100644 aarch64/src/mem.rs create mode 100644 aarch64/src/vm.rs diff --git a/Cargo.lock b/Cargo.lock index 4d4d84e..b9aa477 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -180,18 +180,18 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "num_enum" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a015b430d3c108a207fd776d2e2196aaf8b1cf8cf93253e3a097ff3085076a1" +checksum = "70bf6736f74634d299d00086f02986875b3c2d924781a6a2cb6c201e73da0ceb" dependencies = [ "num_enum_derive", ] [[package]] name = "num_enum_derive" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" +checksum = "56ea360eafe1022f7cc56cd7b869ed57330fb2453d0c7831d99b74c65d2f5597" dependencies = [ "proc-macro2", "quote", diff --git a/aarch64/Cargo.toml b/aarch64/Cargo.toml index bd8b9bf..e66c803 100644 --- a/aarch64/Cargo.toml +++ b/aarch64/Cargo.toml @@ -6,4 +6,4 @@ edition = "2021" [dependencies] bitstruct = "0.1" port = { path = "../port" } -num_enum = { version = "0.6.1", default-features = false } +num_enum = { version = "0.7.0", default-features = false } diff --git a/aarch64/lib/kernel.ld b/aarch64/lib/kernel.ld index 8a928ce..178f3b6 100644 --- a/aarch64/lib/kernel.ld +++ b/aarch64/lib/kernel.ld @@ -49,8 +49,26 @@ SECTIONS { *(COMMON) . = ALIGN(2097152); } - end = .; + ebss = .; + + /* Reserve section for kernel heap. Align to 2MiB to allow us to map as + a 2MiB page. */ + . = ALIGN(2 * 1024 * 1024); + heap = .; + . += 64 * 1024 * 1024; + eheap = .; + /* Reserve section for early pagetables. Align to 2MiB to allow us to map + as a 2MiB page.Note that this won't be needed once we transition to + recursive pagetables. */ + . = ALIGN(2 * 1024 * 1024); + early_pagetables = .; + . += 2 * 1024 * 1024; + eearly_pagetables = .; + + end = .; + PROVIDE(end = .); + /DISCARD/ : { *(.eh_frame .note.GNU-stack) } diff --git a/aarch64/src/devcons.rs b/aarch64/src/devcons.rs index 2608359..1cefc6c 100644 --- a/aarch64/src/devcons.rs +++ b/aarch64/src/devcons.rs @@ -1,5 +1,6 @@ // Racy to start. +use crate::param::KZERO; use crate::registers::rpi_mmio; use crate::uartmini::MiniUart; use core::mem::MaybeUninit; @@ -34,7 +35,7 @@ use port::mem::VirtRange; pub fn init(_dt: &DeviceTree) { Console::new(|| { - let mmio = rpi_mmio().expect("mmio base detect failed").to_virt(); + let mmio = rpi_mmio().expect("mmio base detect failed").to_virt_with_offset(KZERO); let gpio_range = VirtRange::with_len(mmio + 0x20_0000, 0xb4); let aux_range = VirtRange::with_len(mmio + 0x21_5000, 0x8); let miniuart_range = VirtRange::with_len(mmio + 0x21_5040, 0x40); diff --git a/aarch64/src/kalloc.rs b/aarch64/src/kalloc.rs new file mode 100644 index 0000000..304a006 --- /dev/null +++ b/aarch64/src/kalloc.rs @@ -0,0 +1,49 @@ +use crate::vm::{Page4K, PAGE_SIZE_4K}; +use core::ptr; +use port::mcslock::{Lock, LockNode}; + +static FREE_LIST: Lock = Lock::new("kmem", FreeList { next: None }); + +#[repr(align(4096))] +struct FreeList { + next: Option>, +} +unsafe impl Send for FreeList {} + +impl FreeList { + pub fn put(&mut self, page: &mut Page4K) { + let ptr = (page as *mut Page4K).addr(); + assert_eq!(ptr % PAGE_SIZE_4K, 0, "freeing unaligned page"); + page.scribble(); + let f = page as *mut Page4K as *mut FreeList; + unsafe { + ptr::write(f, FreeList { next: self.next }); + } + self.next = ptr::NonNull::new(f); + } + + pub fn get(&mut self) -> Option<&'static mut Page4K> { + let mut next = self.next?; + let next = unsafe { next.as_mut() }; + self.next = next.next; + let pg = unsafe { &mut *(next as *mut FreeList as *mut Page4K) }; + pg.clear(); + Some(pg) + } +} + +pub unsafe fn free_pages(pages: &mut [Page4K]) { + static mut NODE: LockNode = LockNode::new(); + let mut lock = FREE_LIST.lock(unsafe { &NODE }); + let fl = &mut *lock; + for page in pages.iter_mut() { + fl.put(page); + } +} + +pub fn alloc() -> Option<&'static mut Page4K> { + static mut NODE: LockNode = LockNode::new(); + let mut lock = FREE_LIST.lock(unsafe { &NODE }); + let fl = &mut *lock; + fl.get() +} diff --git a/aarch64/src/kmem.rs b/aarch64/src/kmem.rs new file mode 100644 index 0000000..8bcebde --- /dev/null +++ b/aarch64/src/kmem.rs @@ -0,0 +1,187 @@ +use crate::vm::Page4K; +use core::{ + fmt, + iter::{Step, StepBy}, + mem, + ops::{self, Range}, + slice, +}; + +// These map to definitions in kernel.ld +extern "C" { + static etext: [u64; 0]; + static erodata: [u64; 0]; + static ebss: [u64; 0]; + static early_pagetables: [u64; 0]; + static eearly_pagetables: [u64; 0]; + static heap: [u64; 0]; + static eheap: [u64; 0]; +} + +pub fn text_addr() -> usize { + 0xFFFF_8000_0000_0000 +} + +pub fn etext_addr() -> usize { + unsafe { etext.as_ptr().addr() } +} + +pub fn erodata_addr() -> usize { + unsafe { erodata.as_ptr().addr() } +} + +pub fn ebss_addr() -> usize { + unsafe { ebss.as_ptr().addr() } +} + +pub fn heap_addr() -> usize { + unsafe { heap.as_ptr().addr() } +} + +pub fn eheap_addr() -> usize { + unsafe { eheap.as_ptr().addr() } +} + +pub fn early_pagetables_addr() -> usize { + unsafe { early_pagetables.as_ptr().addr() } +} + +pub fn eearly_pagetables_addr() -> usize { + unsafe { eearly_pagetables.as_ptr().addr() } +} + +#[derive(Clone, Copy, PartialEq, PartialOrd)] +#[repr(transparent)] +pub struct PhysAddr(u64); + +impl PhysAddr { + pub const fn new(value: u64) -> Self { + PhysAddr(value) + } + + pub const fn addr(&self) -> u64 { + self.0 + } + + pub const fn to_virt_with_offset(&self, offset: usize) -> usize { + (self.0 as usize).wrapping_add(offset) + } + + pub fn from_offset_virt(a: usize, offset: usize) -> Self { + Self((a - offset) as u64) + } + + pub fn from_offset_ptr(a: *const T, offset: usize) -> Self { + Self::from_offset_virt(a.addr(), offset) + } + + pub const fn to_ptr_mut_with_offset(&self, offset: usize) -> *mut T { + self.to_virt_with_offset(offset) as *mut T + } + + pub const fn round_up(&self, step: u64) -> PhysAddr { + PhysAddr((self.0 + step - 1) & !(step - 1)) + } + + pub const fn round_down(&self, step: u64) -> PhysAddr { + PhysAddr(self.0 & !(step - 1)) + } + + pub fn step_by_rounded( + startpa: PhysAddr, + endpa: PhysAddr, + step_size: usize, + ) -> StepBy> { + let startpa = startpa.round_down(step_size as u64); + let endpa = endpa.round_up(step_size as u64); + (startpa..endpa).step_by(step_size) + } +} + +impl ops::Add for PhysAddr { + type Output = PhysAddr; + + fn add(self, offset: u64) -> PhysAddr { + PhysAddr(self.0 + offset) + } +} + +/// Note that this implementation will round down the startpa and round up the endpa +impl Step for PhysAddr { + fn steps_between(&startpa: &Self, &endpa: &Self) -> Option { + if startpa.0 <= endpa.0 { + match endpa.0.checked_sub(startpa.0) { + Some(result) => usize::try_from(result).ok(), + None => None, + } + } else { + None + } + } + + fn forward_checked(startpa: Self, count: usize) -> Option { + startpa.0.checked_add(count as u64).map(|x| PhysAddr(x)) + } + + fn backward_checked(startpa: Self, count: usize) -> Option { + startpa.0.checked_sub(count as u64).map(|x| PhysAddr(x)) + } +} + +impl fmt::Debug for PhysAddr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "PhysAddr({:#016x})", self.0)?; + Ok(()) + } +} + +unsafe fn page_slice_mut<'a>(pstart: *mut Page4K, pend: *mut Page4K) -> &'a mut [Page4K] { + let ustart = pstart.addr(); + let uend = pend.addr(); + const PAGE_SIZE: usize = mem::size_of::(); + assert_eq!(ustart % PAGE_SIZE, 0, "page_slice_mut: unaligned start page"); + assert_eq!(uend % PAGE_SIZE, 0, "page_slice_mut: unaligned end page"); + assert!(ustart < uend, "page_slice_mut: bad range"); + + let len = (uend - ustart) / PAGE_SIZE; + unsafe { slice::from_raw_parts_mut(ustart as *mut Page4K, len) } +} + +pub fn early_pages() -> &'static mut [Page4K] { + let early_start = early_pagetables_addr() as *mut Page4K; + let early_end = eearly_pagetables_addr() as *mut Page4K; + unsafe { page_slice_mut(early_start, early_end) } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::vm; + + #[test] + fn physaddr_step() { + let startpa = PhysAddr::new(4096); + let endpa = PhysAddr::new(4096 * 3); + let pas = + PhysAddr::step_by_rounded(startpa, endpa, vm::PAGE_SIZE_4K).collect::>(); + assert_eq!(pas, [PhysAddr::new(4096), PhysAddr::new(4096 * 2)]); + } + + #[test] + fn physaddr_step_rounds_up_and_down() { + let startpa = PhysAddr::new(9000); // Should round down to 8192 + let endpa = PhysAddr::new(5000 * 3); // Should round up to 16384 + let pas = + PhysAddr::step_by_rounded(startpa, endpa, vm::PAGE_SIZE_4K).collect::>(); + assert_eq!(pas, [PhysAddr::new(4096 * 2), PhysAddr::new(4096 * 3)]); + } + + #[test] + fn physaddr_step_2m() { + let startpa = PhysAddr::new(0x3f000000); + let endpa = PhysAddr::new(0x3f000000 + 4 * 1024 * 1024); + let pas = + PhysAddr::step_by_rounded(startpa, endpa, vm::PAGE_SIZE_2M).collect::>(); + assert_eq!(pas, [PhysAddr::new(0x3f000000), PhysAddr::new(0x3f000000 + 2 * 1024 * 1024)]); + } +} diff --git a/aarch64/src/l.S b/aarch64/src/l.S index b5d1c84..badc5ea 100644 --- a/aarch64/src/l.S +++ b/aarch64/src/l.S @@ -45,11 +45,11 @@ SCTLR_EL1 = (SCTLR_EL1_I|SCTLR_EL1_C|SCTLR_EL1_M) // Preset memory attributes. This register stores 8 8-bit presets that are // referenced by index in the page table entries: -// [0] 0x00 - Device (Non-gathering, non-reordering, no early write acknowledgement (most restrictive)) -// [1] 0xff - Normal -MAIR_EL1 = 0xff00 -PT_MAIR_DEVICE = (0<<2) // Use device memory attributes -PT_MAIR_NORMAL = (1<<2) // Use normal memory attributes +// [0] 0xff - Normal +// [1] 0x00 - Device (Non-gathering, non-reordering, no early write acknowledgement (most restrictive)) +MAIR_EL1 = 0x00ff +PT_MAIR_NORMAL = (0<<2) // Use normal memory attributes +PT_MAIR_DEVICE = (1<<2) // Use device memory attributes PT_PAGE = 3 // 4KiB granule PT_BLOCK = 1 // 2MiB granule diff --git a/aarch64/src/mailbox.rs b/aarch64/src/mailbox.rs index 34a9bed..24343b4 100644 --- a/aarch64/src/mailbox.rs +++ b/aarch64/src/mailbox.rs @@ -1,4 +1,5 @@ use crate::io::{read_reg, write_reg}; +use crate::param::KZERO; use crate::registers::rpi_mmio; use core::mem; use core::mem::MaybeUninit; @@ -24,7 +25,7 @@ pub fn init(_dt: &DeviceTree) { *mailbox = Some({ static mut MAYBE_MAILBOX: MaybeUninit = MaybeUninit::uninit(); unsafe { - let mmio = rpi_mmio().expect("mmio base detect failed").to_virt(); + let mmio = rpi_mmio().expect("mmio base detect failed").to_virt_with_offset(KZERO); let mbox_range = VirtRange::with_len(mmio + 0xb880, 0x40); MAYBE_MAILBOX.write(Mailbox { mbox_range }); diff --git a/aarch64/src/main.rs b/aarch64/src/main.rs index 7de461c..fd760b6 100644 --- a/aarch64/src/main.rs +++ b/aarch64/src/main.rs @@ -3,26 +3,40 @@ #![cfg_attr(not(test), no_main)] #![feature(alloc_error_handler)] #![feature(asm_const)] +#![feature(core_intrinsics)] #![feature(stdsimd)] +#![feature(step_trait)] +#![feature(strict_provenance)] #![forbid(unsafe_op_in_unsafe_fn)] mod devcons; mod io; +mod kalloc; +mod kmem; mod mailbox; -mod mem; mod param; mod registers; mod trap; mod uartmini; mod uartpl011; +mod vm; use core::ffi::c_void; use port::fdt::DeviceTree; use port::println; +use vm::PageTable; + +use crate::kmem::PhysAddr; +use crate::param::KZERO; +use crate::vm::kernel_root; #[cfg(not(test))] core::arch::global_asm!(include_str!("l.S")); +type Result = core::result::Result; + +static mut KPGTBL: PageTable = PageTable::empty(); + unsafe fn print_memory_range(name: &str, start: &*const c_void, end: &*const c_void) { let start = start as *const _ as u64; let end = end as *const _ as u64; @@ -93,7 +107,18 @@ fn print_board_info() { pub extern "C" fn main9(dtb_ptr: u64) { trap::init(); + // Parse the DTB before we set up memory so we can correctly map it let dt = unsafe { DeviceTree::from_u64(dtb_ptr).unwrap() }; + + unsafe { + kalloc::free_pages(kmem::early_pages()); + + let dtb_phys = PhysAddr::new(dtb_ptr); + let edtb_phys = dtb_phys + dt.size() as u64; + vm::init(&mut KPGTBL, dtb_phys, edtb_phys); + vm::switch(&KPGTBL); + } + mailbox::init(&dt); devcons::init(&dt); @@ -102,14 +127,17 @@ pub extern "C" fn main9(dtb_ptr: u64) { println!("DTB found at: {:#x}", dtb_ptr); println!("midr_el1: {:?}", registers::MidrEl1::read()); + println!("DT: {:p}", &dt); print_binary_sections(); print_physical_memory_map(); print_board_info(); + // Dump out pagetables + kernel_root().print_tables(KZERO); + println!("looping now"); #[allow(clippy::empty_loop)] loop {} } - mod runtime; diff --git a/aarch64/src/mem.rs b/aarch64/src/mem.rs deleted file mode 100644 index 0fe3275..0000000 --- a/aarch64/src/mem.rs +++ /dev/null @@ -1,15 +0,0 @@ -use crate::param::KZERO; - -#[repr(transparent)] -#[derive(Clone, Debug, PartialEq, PartialOrd)] -pub struct PhysAddr(u64); - -impl PhysAddr { - pub fn new(value: u64) -> Self { - PhysAddr(value) - } - - pub fn to_virt(&self) -> usize { - (self.0 as usize).wrapping_add(KZERO) - } -} diff --git a/aarch64/src/registers.rs b/aarch64/src/registers.rs index 9bd5987..5919510 100644 --- a/aarch64/src/registers.rs +++ b/aarch64/src/registers.rs @@ -1,11 +1,10 @@ -#![allow(dead_code)] +#![allow(non_upper_case_globals)] +use crate::kmem::PhysAddr; use bitstruct::bitstruct; use core::fmt; use num_enum::TryFromPrimitive; -use crate::mem::PhysAddr; - // GPIO registers pub const GPFSEL1: usize = 0x04; // GPIO function select register 1 pub const GPPUD: usize = 0x94; // GPIO pin pull up/down enable @@ -247,6 +246,16 @@ bitstruct! { } } +bitstruct! { + #[derive(Copy, Clone)] + pub struct Vaddr4K2M(pub u64) { + offset: u32 = 0..21; + l3idx: u16 = 21..30; + l2idx: u16 = 30..39; + l1idx: u16 = 39..48; + } +} + bitstruct! { #[derive(Copy, Clone)] pub struct Vaddr4K1G(pub u64) { @@ -297,6 +306,18 @@ mod tests { assert_eq!(va.l4idx(), 128); assert_eq!(va.offset(), 168); + let va = Vaddr4K2M(0xffff_8000_3f00_0000); + assert_eq!(va.l1idx(), 256); + assert_eq!(va.l2idx(), 0); + assert_eq!(va.l3idx(), 504); + assert_eq!(va.offset(), 0); + + let va = Vaddr4K2M(0xffff_8000_fe00_0000); + assert_eq!(va.l1idx(), 256); + assert_eq!(va.l2idx(), 3); + assert_eq!(va.l3idx(), 496); + assert_eq!(va.offset(), 0); + let va = Vaddr4K1G(0xffff_8000_0000_0000); assert_eq!(va.l1idx(), 256); assert_eq!(va.l2idx(), 0); diff --git a/aarch64/src/runtime.rs b/aarch64/src/runtime.rs index 331570e..d4a3e67 100644 --- a/aarch64/src/runtime.rs +++ b/aarch64/src/runtime.rs @@ -2,6 +2,7 @@ extern crate alloc; +use crate::param::KZERO; use crate::registers::rpi_mmio; use crate::uartmini::MiniUart; use alloc::alloc::{GlobalAlloc, Layout}; @@ -16,7 +17,7 @@ use port::mem::VirtRange; // - Add support for raspi4 #[panic_handler] pub extern "C" fn panic(info: &PanicInfo) -> ! { - let mmio = rpi_mmio().expect("mmio base detect failed").to_virt(); + let mmio = rpi_mmio().expect("mmio base detect failed").to_virt_with_offset(KZERO); let gpio_range = VirtRange::with_len(mmio + 0x200000, 0xb4); let aux_range = VirtRange::with_len(mmio + 0x215000, 0x8); diff --git a/aarch64/src/vm.rs b/aarch64/src/vm.rs new file mode 100644 index 0000000..f2931c3 --- /dev/null +++ b/aarch64/src/vm.rs @@ -0,0 +1,463 @@ +#![allow(non_upper_case_globals)] + +use crate::{ + kalloc, + kmem::{ + early_pagetables_addr, ebss_addr, eearly_pagetables_addr, eheap_addr, erodata_addr, + etext_addr, heap_addr, text_addr, PhysAddr, + }, + param::KZERO, + registers::rpi_mmio, + Result, +}; +use bitstruct::bitstruct; +use core::fmt; +use core::ptr::write_volatile; +use num_enum::{FromPrimitive, IntoPrimitive}; + +#[cfg(not(test))] +use port::println; + +pub const PAGE_SIZE_4K: usize = 4 * 1024; +pub const PAGE_SIZE_2M: usize = 2 * 1024 * 1024; +pub const PAGE_SIZE_1G: usize = 1 * 1024 * 1024 * 1024; + +#[derive(Debug, Clone, Copy)] +pub enum PageSize { + Page4K, + Page2M, + Page1G, +} + +impl PageSize { + const fn size(&self) -> usize { + match self { + PageSize::Page4K => PAGE_SIZE_4K, + PageSize::Page2M => PAGE_SIZE_2M, + PageSize::Page1G => PAGE_SIZE_1G, + } + } +} + +#[repr(C, align(4096))] +#[derive(Clone, Copy)] +pub struct Page4K([u8; PAGE_SIZE_4K]); + +impl Page4K { + pub fn clear(&mut self) { + unsafe { + core::intrinsics::volatile_set_memory(&mut self.0, 0u8, 1); + } + } + + pub fn scribble(&mut self) { + unsafe { + core::intrinsics::volatile_set_memory(self, 0b1010_1010u8, 1); + } + } +} + +#[derive(Debug, IntoPrimitive, FromPrimitive)] +#[repr(u8)] +pub enum Mair { + #[num_enum(default)] + Normal = 0, + Device = 1, +} + +#[derive(Debug, IntoPrimitive, FromPrimitive)] +#[repr(u8)] +pub enum AccessPermission { + #[num_enum(default)] + PrivRw = 0, + AllRw = 1, + PrivRo = 2, + AllRo = 3, +} + +#[derive(Debug, IntoPrimitive, FromPrimitive)] +#[repr(u8)] +pub enum Shareable { + #[num_enum(default)] + NonShareable = 0, // Non-shareable (single core) + Unpredictable = 1, // Unpredicatable! + OuterShareable = 2, // Outer shareable (shared across CPUs, GPU) + InnerShareable = 3, // Inner shareable (shared across CPUs) +} + +bitstruct! { + /// AArch64 supports various granule and page sizes. We assume 48-bit + /// addresses. This is documented in the 'Translation table descriptor + /// formats' section of the Arm Architecture Reference Manual. + /// The virtual address translation breakdown is documented in the 'Translation + /// Process' secrtion of the Arm Architecture Reference Manual. + #[derive(Copy, Clone, PartialEq)] + #[repr(transparent)] + pub struct Entry(u64) { + valid: bool = 0; + table: bool = 1; + mair_index: Mair = 2..5; + non_secure: bool = 5; + access_permission: AccessPermission = 6..8; + shareable: Shareable = 8..10; + accessed: bool = 10; // Was accessed by code + addr: u64 = 12..48; + pxn: bool = 53; // Privileged eXecute Never + uxn: bool = 54; // Unprivileged eXecute Never + } +} + +impl Entry { + fn new(pa: PhysAddr) -> Self { + Entry(0).with_addr(pa.addr() >> 12) + } + + pub const fn empty() -> Entry { + Entry(0) + } + + fn rw_kernel_data() -> Self { + Entry(0) + .with_shareable(Shareable::InnerShareable) + .with_accessed(true) + .with_uxn(true) + .with_pxn(true) + .with_mair_index(Mair::Normal) + } + + fn ro_kernel_data() -> Self { + Entry(0) + .with_access_permission(AccessPermission::PrivRo) + .with_shareable(Shareable::InnerShareable) + .with_accessed(true) + .with_uxn(true) + .with_pxn(true) + .with_mair_index(Mair::Normal) + } + + fn ro_kernel_text() -> Self { + Entry(0) + .with_access_permission(AccessPermission::PrivRw) + .with_shareable(Shareable::InnerShareable) + .with_accessed(true) + .with_uxn(true) + .with_pxn(false) + .with_mair_index(Mair::Normal) + } + + fn ro_kernel_device() -> Self { + Entry(0) + .with_access_permission(AccessPermission::PrivRw) + .with_shareable(Shareable::InnerShareable) + .with_accessed(true) + .with_uxn(true) + .with_pxn(true) + .with_mair_index(Mair::Device) + } + + const fn with_phys_addr(self, pa: PhysAddr) -> Self { + Entry(self.0).with_addr(pa.addr() >> 12) + } + + fn phys_page_addr(self) -> PhysAddr { + PhysAddr::new(self.addr() << 12) + } + + fn virt_page_addr_with_offset(self, offset: usize) -> usize { + self.phys_page_addr().to_virt_with_offset(offset) + } +} + +impl fmt::Debug for Entry { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Entry: {:#x} ", self.addr() << 12)?; + if self.valid() { + write!(f, " Valid")?; + } else { + write!(f, " Invalid")?; + } + if self.table() { + write!(f, " Table")?; + } else { + write!(f, " Page")?; + } + write!(f, " {:?}", self.mair_index())?; + if self.non_secure() { + write!(f, " NonSecure")?; + } else { + write!(f, " Secure")?; + } + write!(f, " {:?} {:?}", self.access_permission(), self.shareable())?; + if self.accessed() { + write!(f, " Accessed")?; + } + if self.pxn() { + write!(f, " PXN")?; + } + if self.uxn() { + write!(f, " UXN")?; + } + Ok(()) + } +} + +/// Levels start at the lowest number (most significant) and increase from +/// there. Four levels would support (for example) 4kiB granules with 4KiB +/// pages using Level0 - Level3, while three would support 2MiB pages with the +/// same size granules, using only Level0 - Level2. +#[derive(Debug, Clone, Copy)] +pub enum Level { + Level0, + Level1, + Level2, + Level3, +} + +impl Level { + /// Returns the next level to translate + pub fn next(&self) -> Option { + match self { + Level::Level0 => Some(Level::Level1), + Level::Level1 => Some(Level::Level2), + Level::Level2 => Some(Level::Level3), + Level::Level3 => None, + } + } + + pub fn depth(&self) -> usize { + match self { + Level::Level0 => 0, + Level::Level1 => 1, + Level::Level2 => 2, + Level::Level3 => 3, + } + } +} + +#[repr(C, align(4096))] +pub struct Table { + entries: [Entry; 512], +} + +impl Table { + fn index(level: Level, va: usize) -> usize { + match level { + Level::Level0 => (va >> 39) & 0x1FF, + Level::Level1 => (va >> 30) & 0x1FF, + Level::Level2 => (va >> 21) & 0x1FF, + Level::Level3 => (va >> 12) & 0x1FF, + } + } + + pub fn entry_mut(&mut self, level: Level, va: usize) -> Option<&mut Entry> { + Some(&mut self.entries[Self::index(level, va)]) + } + + fn child_table(&self, entry: Entry, kern_offset: usize) -> Option<&Table> { + if !entry.valid() { + return None; + } + let raw_ptr = entry.virt_page_addr_with_offset(kern_offset); + Some(unsafe { &*(raw_ptr as *const Table) }) + } + + fn next(&self, level: Level, va: usize, kern_offset: usize) -> Option<&Table> { + let idx = Self::index(level, va); + let entry = self.entries[idx]; + self.child_table(entry, kern_offset) + } + + fn next_mut(&mut self, level: Level, va: usize, kern_offset: usize) -> Option<&mut Table> { + let index = Self::index(level, va); + let mut entry = self.entries[index]; + // println!("next_mut(level:{:?}, va:{:016x}, index:{}): entry:{:?}", level, va, index, entry); + if !entry.valid() { + let page = kalloc::alloc()?; + page.clear(); + entry = Entry::new(PhysAddr::from_offset_ptr(page, kern_offset)) + .with_valid(true) + .with_table(true); + unsafe { + write_volatile(&mut self.entries[index], entry); + } + } + let raw_ptr = entry.virt_page_addr_with_offset(kern_offset); + let next_table = unsafe { &mut *(raw_ptr as *mut Table) }; + Some(next_table) + } +} + +pub type PageTable = Table; + +impl PageTable { + pub const fn empty() -> PageTable { + PageTable { entries: [Entry::empty(); 512] } + } + + pub fn map_to( + &mut self, + entry: Entry, + va: usize, + page_size: PageSize, + kern_offset: usize, + ) -> Result<()> { + // println!("map_to(entry: {:?}, va: {:#x}, page_size {:?})", entry, va, page_size); + let old_entry = match page_size { + PageSize::Page4K => self + .next_mut(Level::Level0, va, kern_offset) + .and_then(|t1| t1.next_mut(Level::Level1, va, kern_offset)) + .and_then(|t2| t2.next_mut(Level::Level2, va, kern_offset)) + .and_then(|t3| t3.entry_mut(Level::Level3, va)), + PageSize::Page2M => self + .next_mut(Level::Level0, va, kern_offset) + .and_then(|t1| t1.next_mut(Level::Level1, va, kern_offset)) + .and_then(|t2| t2.entry_mut(Level::Level2, va)), + PageSize::Page1G => self + .next_mut(Level::Level0, va, kern_offset) + .and_then(|t1| t1.entry_mut(Level::Level1, va)), + }; + + if let Some(old_entry) = old_entry { + let entry = entry.with_valid(true); + // println!("Some {:?}, New {:?}", old_entry, entry); + // println!("{:p}", old_entry); + unsafe { + write_volatile(old_entry, entry); + } + return Ok(()); + } + Err("Allocation failed") + } + + pub fn map_phys_range( + &mut self, + start: PhysAddr, + end: PhysAddr, + entry: Entry, + page_size: PageSize, + kern_offset: usize, + ) -> Result<()> { + for pa in PhysAddr::step_by_rounded(start, end, page_size.size()) { + self.map_to( + entry.with_phys_addr(pa), + pa.to_virt_with_offset(kern_offset), + page_size, + kern_offset, + )?; + } + Ok(()) + } + + /// Recursively write out the table and all its children + pub fn print_tables(&self, kern_offset: usize) { + println!("Root va:{:p}", self); + self.print_table_at_level(Level::Level0, kern_offset); + } + + /// Recursively write out the table and all its children + fn print_table_at_level(&self, level: Level, kern_offset: usize) { + let indent = 2 + level.depth() * 2; + for (i, &pte) in self.entries.iter().enumerate() { + if pte.valid() { + print_pte(indent, i, pte, kern_offset); + + if pte.table() { + if let Some(child_table) = self.child_table(pte, kern_offset) { + child_table.print_table_at_level(level.next().unwrap(), kern_offset); + } + } + } + } + } +} + +impl fmt::Debug for PageTable { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:x}", (self as *const Self).addr()) + } +} + +/// Helper to print out PTE as part of a table +fn print_pte(indent: usize, i: usize, pte: Entry, kern_offset: usize) { + println!( + "{:indent$}[{:03}] va:{:#016x} -> pa:({:?}) (pte:{:#016x})", + "", + i, + pte.virt_page_addr_with_offset(kern_offset), + pte, + pte.0, + ); +} + +pub unsafe fn init(kpage_table: &mut PageTable, dtb_phys: PhysAddr, edtb_phys: PhysAddr) { + //use PageFlags as PF; + + let text_phys = PhysAddr::from_offset_virt(text_addr(), KZERO); + let etext_phys = PhysAddr::from_offset_virt(etext_addr(), KZERO); + let erodata_phys = PhysAddr::from_offset_virt(erodata_addr(), KZERO); + let ebss_phys = PhysAddr::from_offset_virt(ebss_addr(), KZERO); + let heap_phys = PhysAddr::from_offset_virt(heap_addr(), KZERO); + let eheap_phys = PhysAddr::from_offset_virt(eheap_addr(), KZERO); + let early_pagetables_phys = PhysAddr::from_offset_virt(early_pagetables_addr(), KZERO); + let eearly_pagetables_phys = PhysAddr::from_offset_virt(eearly_pagetables_addr(), KZERO); + + let mmio = rpi_mmio().expect("mmio base detect failed"); + let mmio_end = PhysAddr::from(mmio + (2 * PAGE_SIZE_2M as u64)); + + let custom_map = [ + // TODO We don't actualy unmap the first page... We should to achieve: + // Note that the first page is left unmapped to try and + // catch null pointer dereferences in unsafe code: defense + // in depth! + + // DTB + (dtb_phys, edtb_phys, Entry::ro_kernel_data(), PageSize::Page4K), + // Kernel text + (text_phys, etext_phys, Entry::ro_kernel_text(), PageSize::Page2M), + // Kernel read-only data + (etext_phys, erodata_phys, Entry::ro_kernel_data(), PageSize::Page2M), + // Kernel BSS + (erodata_phys, ebss_phys, Entry::rw_kernel_data(), PageSize::Page2M), + // Kernel heap + (heap_phys, eheap_phys, Entry::rw_kernel_data(), PageSize::Page2M), + // Page tables + (early_pagetables_phys, eearly_pagetables_phys, Entry::rw_kernel_data(), PageSize::Page2M), + // MMIO + (mmio, mmio_end, Entry::ro_kernel_device(), PageSize::Page2M), + ]; + + for (start, end, flags, page_size) in custom_map.iter() { + kpage_table + .map_phys_range(*start, *end, *flags, *page_size, KZERO) + .expect("init mapping failed"); + } +} + +/// Return the root kernel page table physical address +fn ttbr1_el1() -> u64 { + let mut addr: u64; + unsafe { + core::arch::asm!("mrs {value}, ttbr1_el1", value = out(reg) addr); + } + addr +} + +pub unsafe fn switch(kpage_table: &PageTable) { + #[cfg(not(test))] + unsafe { + let pt_phys = PhysAddr::from_offset_ptr(kpage_table, KZERO).addr(); + core::arch::asm!( + "msr ttbr1_el1, {pt_phys}", + "dsb ish", + "isb", + pt_phys = in(reg) pt_phys); + } +} + +/// Return the root kernel page table +pub fn kernel_root() -> &'static mut PageTable { + unsafe { + let ttbr1_el1 = ttbr1_el1(); + &mut *PhysAddr::new(ttbr1_el1).to_ptr_mut_with_offset::(KZERO) + } +} diff --git a/port/src/fdt.rs b/port/src/fdt.rs index ab097f0..e4f7d77 100644 --- a/port/src/fdt.rs +++ b/port/src/fdt.rs @@ -63,6 +63,10 @@ impl<'a> DeviceTree<'a> { FdtHeader::new(uninit_data, false).map(|header| Self { data: uninit_data, header }) } + pub fn size(&self) -> usize { + self.header.totalsize as usize + } + /// Given a pointer to the dtb as a u64, return a DeviceTree struct. pub unsafe fn from_u64(ptr: u64) -> Result { let u8ptr = ptr as *const mem::MaybeUninit; From 8e65d5b3b7073f94ce41b824c15934cc7031d3fa Mon Sep 17 00:00:00 2001 From: Graham MacDonald Date: Sat, 16 Sep 2023 18:11:17 +0100 Subject: [PATCH 02/12] Tidy Signed-off-by: Graham MacDonald --- aarch64/src/main.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/aarch64/src/main.rs b/aarch64/src/main.rs index fd760b6..de02375 100644 --- a/aarch64/src/main.rs +++ b/aarch64/src/main.rs @@ -127,7 +127,6 @@ pub extern "C" fn main9(dtb_ptr: u64) { println!("DTB found at: {:#x}", dtb_ptr); println!("midr_el1: {:?}", registers::MidrEl1::read()); - println!("DT: {:p}", &dt); print_binary_sections(); print_physical_memory_map(); print_board_info(); From 5d09d0bad14d67b982590d0d7ec70a7d016a767a Mon Sep 17 00:00:00 2001 From: Graham MacDonald Date: Sat, 16 Sep 2023 18:17:43 +0100 Subject: [PATCH 03/12] Remove offset bits Signed-off-by: Graham MacDonald --- aarch64/src/devcons.rs | 3 +- aarch64/src/kmem.rs | 18 ++++---- aarch64/src/mailbox.rs | 3 +- aarch64/src/main.rs | 8 ++-- aarch64/src/runtime.rs | 3 +- aarch64/src/vm.rs | 93 +++++++++++++++++------------------------- 6 files changed, 53 insertions(+), 75 deletions(-) diff --git a/aarch64/src/devcons.rs b/aarch64/src/devcons.rs index 1cefc6c..2608359 100644 --- a/aarch64/src/devcons.rs +++ b/aarch64/src/devcons.rs @@ -1,6 +1,5 @@ // Racy to start. -use crate::param::KZERO; use crate::registers::rpi_mmio; use crate::uartmini::MiniUart; use core::mem::MaybeUninit; @@ -35,7 +34,7 @@ use port::mem::VirtRange; pub fn init(_dt: &DeviceTree) { Console::new(|| { - let mmio = rpi_mmio().expect("mmio base detect failed").to_virt_with_offset(KZERO); + let mmio = rpi_mmio().expect("mmio base detect failed").to_virt(); let gpio_range = VirtRange::with_len(mmio + 0x20_0000, 0xb4); let aux_range = VirtRange::with_len(mmio + 0x21_5000, 0x8); let miniuart_range = VirtRange::with_len(mmio + 0x21_5040, 0x40); diff --git a/aarch64/src/kmem.rs b/aarch64/src/kmem.rs index 8bcebde..4741ca2 100644 --- a/aarch64/src/kmem.rs +++ b/aarch64/src/kmem.rs @@ -1,4 +1,4 @@ -use crate::vm::Page4K; +use crate::{param::KZERO, vm::Page4K}; use core::{ fmt, iter::{Step, StepBy}, @@ -63,20 +63,20 @@ impl PhysAddr { self.0 } - pub const fn to_virt_with_offset(&self, offset: usize) -> usize { - (self.0 as usize).wrapping_add(offset) + pub const fn to_virt(&self) -> usize { + (self.0 as usize).wrapping_add(KZERO) } - pub fn from_offset_virt(a: usize, offset: usize) -> Self { - Self((a - offset) as u64) + pub fn from_virt(a: usize) -> Self { + Self((a - KZERO) as u64) } - pub fn from_offset_ptr(a: *const T, offset: usize) -> Self { - Self::from_offset_virt(a.addr(), offset) + pub fn from_ptr(a: *const T) -> Self { + Self::from_virt(a.addr()) } - pub const fn to_ptr_mut_with_offset(&self, offset: usize) -> *mut T { - self.to_virt_with_offset(offset) as *mut T + pub const fn to_ptr_mut(&self) -> *mut T { + self.to_virt() as *mut T } pub const fn round_up(&self, step: u64) -> PhysAddr { diff --git a/aarch64/src/mailbox.rs b/aarch64/src/mailbox.rs index 24343b4..34a9bed 100644 --- a/aarch64/src/mailbox.rs +++ b/aarch64/src/mailbox.rs @@ -1,5 +1,4 @@ use crate::io::{read_reg, write_reg}; -use crate::param::KZERO; use crate::registers::rpi_mmio; use core::mem; use core::mem::MaybeUninit; @@ -25,7 +24,7 @@ pub fn init(_dt: &DeviceTree) { *mailbox = Some({ static mut MAYBE_MAILBOX: MaybeUninit = MaybeUninit::uninit(); unsafe { - let mmio = rpi_mmio().expect("mmio base detect failed").to_virt_with_offset(KZERO); + let mmio = rpi_mmio().expect("mmio base detect failed").to_virt(); let mbox_range = VirtRange::with_len(mmio + 0xb880, 0x40); MAYBE_MAILBOX.write(Mailbox { mbox_range }); diff --git a/aarch64/src/main.rs b/aarch64/src/main.rs index de02375..862f70f 100644 --- a/aarch64/src/main.rs +++ b/aarch64/src/main.rs @@ -21,15 +21,13 @@ mod uartmini; mod uartpl011; mod vm; +use crate::kmem::PhysAddr; +use crate::vm::kernel_root; use core::ffi::c_void; use port::fdt::DeviceTree; use port::println; use vm::PageTable; -use crate::kmem::PhysAddr; -use crate::param::KZERO; -use crate::vm::kernel_root; - #[cfg(not(test))] core::arch::global_asm!(include_str!("l.S")); @@ -132,7 +130,7 @@ pub extern "C" fn main9(dtb_ptr: u64) { print_board_info(); // Dump out pagetables - kernel_root().print_tables(KZERO); + kernel_root().print_tables(); println!("looping now"); diff --git a/aarch64/src/runtime.rs b/aarch64/src/runtime.rs index d4a3e67..331570e 100644 --- a/aarch64/src/runtime.rs +++ b/aarch64/src/runtime.rs @@ -2,7 +2,6 @@ extern crate alloc; -use crate::param::KZERO; use crate::registers::rpi_mmio; use crate::uartmini::MiniUart; use alloc::alloc::{GlobalAlloc, Layout}; @@ -17,7 +16,7 @@ use port::mem::VirtRange; // - Add support for raspi4 #[panic_handler] pub extern "C" fn panic(info: &PanicInfo) -> ! { - let mmio = rpi_mmio().expect("mmio base detect failed").to_virt_with_offset(KZERO); + let mmio = rpi_mmio().expect("mmio base detect failed").to_virt(); let gpio_range = VirtRange::with_len(mmio + 0x200000, 0xb4); let aux_range = VirtRange::with_len(mmio + 0x215000, 0x8); diff --git a/aarch64/src/vm.rs b/aarch64/src/vm.rs index f2931c3..d861db0 100644 --- a/aarch64/src/vm.rs +++ b/aarch64/src/vm.rs @@ -6,7 +6,6 @@ use crate::{ early_pagetables_addr, ebss_addr, eearly_pagetables_addr, eheap_addr, erodata_addr, etext_addr, heap_addr, text_addr, PhysAddr, }, - param::KZERO, registers::rpi_mmio, Result, }; @@ -163,8 +162,8 @@ impl Entry { PhysAddr::new(self.addr() << 12) } - fn virt_page_addr_with_offset(self, offset: usize) -> usize { - self.phys_page_addr().to_virt_with_offset(offset) + fn virt_page_addr(self) -> usize { + self.phys_page_addr().to_virt() } } @@ -253,35 +252,33 @@ impl Table { Some(&mut self.entries[Self::index(level, va)]) } - fn child_table(&self, entry: Entry, kern_offset: usize) -> Option<&Table> { + fn child_table(&self, entry: Entry) -> Option<&Table> { if !entry.valid() { return None; } - let raw_ptr = entry.virt_page_addr_with_offset(kern_offset); + let raw_ptr = entry.virt_page_addr(); Some(unsafe { &*(raw_ptr as *const Table) }) } - fn next(&self, level: Level, va: usize, kern_offset: usize) -> Option<&Table> { + fn next(&self, level: Level, va: usize) -> Option<&Table> { let idx = Self::index(level, va); let entry = self.entries[idx]; - self.child_table(entry, kern_offset) + self.child_table(entry) } - fn next_mut(&mut self, level: Level, va: usize, kern_offset: usize) -> Option<&mut Table> { + fn next_mut(&mut self, level: Level, va: usize) -> Option<&mut Table> { let index = Self::index(level, va); let mut entry = self.entries[index]; // println!("next_mut(level:{:?}, va:{:016x}, index:{}): entry:{:?}", level, va, index, entry); if !entry.valid() { let page = kalloc::alloc()?; page.clear(); - entry = Entry::new(PhysAddr::from_offset_ptr(page, kern_offset)) - .with_valid(true) - .with_table(true); + entry = Entry::new(PhysAddr::from_ptr(page)).with_valid(true).with_table(true); unsafe { write_volatile(&mut self.entries[index], entry); } } - let raw_ptr = entry.virt_page_addr_with_offset(kern_offset); + let raw_ptr = entry.virt_page_addr(); let next_table = unsafe { &mut *(raw_ptr as *mut Table) }; Some(next_table) } @@ -294,27 +291,21 @@ impl PageTable { PageTable { entries: [Entry::empty(); 512] } } - pub fn map_to( - &mut self, - entry: Entry, - va: usize, - page_size: PageSize, - kern_offset: usize, - ) -> Result<()> { + pub fn map_to(&mut self, entry: Entry, va: usize, page_size: PageSize) -> Result<()> { // println!("map_to(entry: {:?}, va: {:#x}, page_size {:?})", entry, va, page_size); let old_entry = match page_size { PageSize::Page4K => self - .next_mut(Level::Level0, va, kern_offset) - .and_then(|t1| t1.next_mut(Level::Level1, va, kern_offset)) - .and_then(|t2| t2.next_mut(Level::Level2, va, kern_offset)) + .next_mut(Level::Level0, va) + .and_then(|t1| t1.next_mut(Level::Level1, va)) + .and_then(|t2| t2.next_mut(Level::Level2, va)) .and_then(|t3| t3.entry_mut(Level::Level3, va)), PageSize::Page2M => self - .next_mut(Level::Level0, va, kern_offset) - .and_then(|t1| t1.next_mut(Level::Level1, va, kern_offset)) + .next_mut(Level::Level0, va) + .and_then(|t1| t1.next_mut(Level::Level1, va)) .and_then(|t2| t2.entry_mut(Level::Level2, va)), - PageSize::Page1G => self - .next_mut(Level::Level0, va, kern_offset) - .and_then(|t1| t1.entry_mut(Level::Level1, va)), + PageSize::Page1G => { + self.next_mut(Level::Level0, va).and_then(|t1| t1.entry_mut(Level::Level1, va)) + } }; if let Some(old_entry) = old_entry { @@ -335,35 +326,29 @@ impl PageTable { end: PhysAddr, entry: Entry, page_size: PageSize, - kern_offset: usize, ) -> Result<()> { for pa in PhysAddr::step_by_rounded(start, end, page_size.size()) { - self.map_to( - entry.with_phys_addr(pa), - pa.to_virt_with_offset(kern_offset), - page_size, - kern_offset, - )?; + self.map_to(entry.with_phys_addr(pa), pa.to_virt(), page_size)?; } Ok(()) } /// Recursively write out the table and all its children - pub fn print_tables(&self, kern_offset: usize) { + pub fn print_tables(&self) { println!("Root va:{:p}", self); - self.print_table_at_level(Level::Level0, kern_offset); + self.print_table_at_level(Level::Level0); } /// Recursively write out the table and all its children - fn print_table_at_level(&self, level: Level, kern_offset: usize) { + fn print_table_at_level(&self, level: Level) { let indent = 2 + level.depth() * 2; for (i, &pte) in self.entries.iter().enumerate() { if pte.valid() { - print_pte(indent, i, pte, kern_offset); + print_pte(indent, i, pte); if pte.table() { - if let Some(child_table) = self.child_table(pte, kern_offset) { - child_table.print_table_at_level(level.next().unwrap(), kern_offset); + if let Some(child_table) = self.child_table(pte) { + child_table.print_table_at_level(level.next().unwrap()); } } } @@ -378,12 +363,12 @@ impl fmt::Debug for PageTable { } /// Helper to print out PTE as part of a table -fn print_pte(indent: usize, i: usize, pte: Entry, kern_offset: usize) { +fn print_pte(indent: usize, i: usize, pte: Entry) { println!( "{:indent$}[{:03}] va:{:#016x} -> pa:({:?}) (pte:{:#016x})", "", i, - pte.virt_page_addr_with_offset(kern_offset), + pte.virt_page_addr(), pte, pte.0, ); @@ -392,14 +377,14 @@ fn print_pte(indent: usize, i: usize, pte: Entry, kern_offset: usize) { pub unsafe fn init(kpage_table: &mut PageTable, dtb_phys: PhysAddr, edtb_phys: PhysAddr) { //use PageFlags as PF; - let text_phys = PhysAddr::from_offset_virt(text_addr(), KZERO); - let etext_phys = PhysAddr::from_offset_virt(etext_addr(), KZERO); - let erodata_phys = PhysAddr::from_offset_virt(erodata_addr(), KZERO); - let ebss_phys = PhysAddr::from_offset_virt(ebss_addr(), KZERO); - let heap_phys = PhysAddr::from_offset_virt(heap_addr(), KZERO); - let eheap_phys = PhysAddr::from_offset_virt(eheap_addr(), KZERO); - let early_pagetables_phys = PhysAddr::from_offset_virt(early_pagetables_addr(), KZERO); - let eearly_pagetables_phys = PhysAddr::from_offset_virt(eearly_pagetables_addr(), KZERO); + let text_phys = PhysAddr::from_virt(text_addr()); + let etext_phys = PhysAddr::from_virt(etext_addr()); + let erodata_phys = PhysAddr::from_virt(erodata_addr()); + let ebss_phys = PhysAddr::from_virt(ebss_addr()); + let heap_phys = PhysAddr::from_virt(heap_addr()); + let eheap_phys = PhysAddr::from_virt(eheap_addr()); + let early_pagetables_phys = PhysAddr::from_virt(early_pagetables_addr()); + let eearly_pagetables_phys = PhysAddr::from_virt(eearly_pagetables_addr()); let mmio = rpi_mmio().expect("mmio base detect failed"); let mmio_end = PhysAddr::from(mmio + (2 * PAGE_SIZE_2M as u64)); @@ -427,9 +412,7 @@ pub unsafe fn init(kpage_table: &mut PageTable, dtb_phys: PhysAddr, edtb_phys: P ]; for (start, end, flags, page_size) in custom_map.iter() { - kpage_table - .map_phys_range(*start, *end, *flags, *page_size, KZERO) - .expect("init mapping failed"); + kpage_table.map_phys_range(*start, *end, *flags, *page_size).expect("init mapping failed"); } } @@ -445,7 +428,7 @@ fn ttbr1_el1() -> u64 { pub unsafe fn switch(kpage_table: &PageTable) { #[cfg(not(test))] unsafe { - let pt_phys = PhysAddr::from_offset_ptr(kpage_table, KZERO).addr(); + let pt_phys = PhysAddr::from_ptr(kpage_table).addr(); core::arch::asm!( "msr ttbr1_el1, {pt_phys}", "dsb ish", @@ -458,6 +441,6 @@ pub unsafe fn switch(kpage_table: &PageTable) { pub fn kernel_root() -> &'static mut PageTable { unsafe { let ttbr1_el1 = ttbr1_el1(); - &mut *PhysAddr::new(ttbr1_el1).to_ptr_mut_with_offset::(KZERO) + &mut *PhysAddr::new(ttbr1_el1).to_ptr_mut::() } } From c03b9deac9e4d653e1b6f1bd4daff73509964df5 Mon Sep 17 00:00:00 2001 From: Graham MacDonald Date: Sat, 16 Sep 2023 21:20:06 +0100 Subject: [PATCH 04/12] FIx test Signed-off-by: Graham MacDonald --- aarch64/src/vm.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/aarch64/src/vm.rs b/aarch64/src/vm.rs index d861db0..80ebb72 100644 --- a/aarch64/src/vm.rs +++ b/aarch64/src/vm.rs @@ -418,13 +418,19 @@ pub unsafe fn init(kpage_table: &mut PageTable, dtb_phys: PhysAddr, edtb_phys: P /// Return the root kernel page table physical address fn ttbr1_el1() -> u64 { - let mut addr: u64; - unsafe { - core::arch::asm!("mrs {value}, ttbr1_el1", value = out(reg) addr); + #[cfg(not(test))] + { + let mut addr: u64; + unsafe { + core::arch::asm!("mrs {value}, ttbr1_el1", value = out(reg) addr); + } + addr } - addr + #[cfg(test)] + 0 } +#[allow(unused_variables)] pub unsafe fn switch(kpage_table: &PageTable) { #[cfg(not(test))] unsafe { From 0cac7244098634ec6971f4a2521e71f16d2cffd7 Mon Sep 17 00:00:00 2001 From: Graham MacDonald Date: Sun, 17 Sep 2023 12:52:17 +0100 Subject: [PATCH 05/12] Comment Signed-off-by: Graham MacDonald --- aarch64/lib/kernel.ld | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/aarch64/lib/kernel.ld b/aarch64/lib/kernel.ld index 178f3b6..359c8c3 100644 --- a/aarch64/lib/kernel.ld +++ b/aarch64/lib/kernel.ld @@ -60,7 +60,8 @@ SECTIONS { /* Reserve section for early pagetables. Align to 2MiB to allow us to map as a 2MiB page.Note that this won't be needed once we transition to - recursive pagetables. */ + recursive pagetables. + Note this can go when we use recursive pagetables */ . = ALIGN(2 * 1024 * 1024); early_pagetables = .; . += 2 * 1024 * 1024; From 02e3082b373639efc0804bcef7d26a236cbe13ca Mon Sep 17 00:00:00 2001 From: Graham MacDonald Date: Sun, 17 Sep 2023 13:36:57 +0100 Subject: [PATCH 06/12] Use errors instead of options Signed-off-by: Graham MacDonald --- aarch64/src/kalloc.rs | 13 +++++++---- aarch64/src/main.rs | 2 -- aarch64/src/vm.rs | 50 +++++++++++++++++++++++++++---------------- 3 files changed, 41 insertions(+), 24 deletions(-) diff --git a/aarch64/src/kalloc.rs b/aarch64/src/kalloc.rs index 304a006..ff50101 100644 --- a/aarch64/src/kalloc.rs +++ b/aarch64/src/kalloc.rs @@ -10,6 +10,11 @@ struct FreeList { } unsafe impl Send for FreeList {} +#[derive(Debug)] +pub enum Error { + NoFreeBlocks, +} + impl FreeList { pub fn put(&mut self, page: &mut Page4K) { let ptr = (page as *mut Page4K).addr(); @@ -22,13 +27,13 @@ impl FreeList { self.next = ptr::NonNull::new(f); } - pub fn get(&mut self) -> Option<&'static mut Page4K> { - let mut next = self.next?; + pub fn get(&mut self) -> Result<&'static mut Page4K, Error> { + let mut next = self.next.ok_or(Error::NoFreeBlocks)?; let next = unsafe { next.as_mut() }; self.next = next.next; let pg = unsafe { &mut *(next as *mut FreeList as *mut Page4K) }; pg.clear(); - Some(pg) + Ok(pg) } } @@ -41,7 +46,7 @@ pub unsafe fn free_pages(pages: &mut [Page4K]) { } } -pub fn alloc() -> Option<&'static mut Page4K> { +pub fn alloc() -> Result<&'static mut Page4K, Error> { static mut NODE: LockNode = LockNode::new(); let mut lock = FREE_LIST.lock(unsafe { &NODE }); let fl = &mut *lock; diff --git a/aarch64/src/main.rs b/aarch64/src/main.rs index 862f70f..cbc3590 100644 --- a/aarch64/src/main.rs +++ b/aarch64/src/main.rs @@ -31,8 +31,6 @@ use vm::PageTable; #[cfg(not(test))] core::arch::global_asm!(include_str!("l.S")); -type Result = core::result::Result; - static mut KPGTBL: PageTable = PageTable::empty(); unsafe fn print_memory_range(name: &str, start: &*const c_void, end: &*const c_void) { diff --git a/aarch64/src/vm.rs b/aarch64/src/vm.rs index 80ebb72..631fd19 100644 --- a/aarch64/src/vm.rs +++ b/aarch64/src/vm.rs @@ -7,7 +7,6 @@ use crate::{ etext_addr, heap_addr, text_addr, PhysAddr, }, registers::rpi_mmio, - Result, }; use bitstruct::bitstruct; use core::fmt; @@ -17,6 +16,17 @@ use num_enum::{FromPrimitive, IntoPrimitive}; #[cfg(not(test))] use port::println; +#[derive(Debug)] +pub enum VmError { + AllocationFailed(kalloc::Error), +} + +impl From for VmError { + fn from(err: kalloc::Error) -> VmError { + VmError::AllocationFailed(err) + } +} + pub const PAGE_SIZE_4K: usize = 4 * 1024; pub const PAGE_SIZE_2M: usize = 2 * 1024 * 1024; pub const PAGE_SIZE_1G: usize = 1 * 1024 * 1024 * 1024; @@ -248,8 +258,8 @@ impl Table { } } - pub fn entry_mut(&mut self, level: Level, va: usize) -> Option<&mut Entry> { - Some(&mut self.entries[Self::index(level, va)]) + pub fn entry_mut(&mut self, level: Level, va: usize) -> Result<&mut Entry, VmError> { + Ok(&mut self.entries[Self::index(level, va)]) } fn child_table(&self, entry: Entry) -> Option<&Table> { @@ -261,12 +271,14 @@ impl Table { } fn next(&self, level: Level, va: usize) -> Option<&Table> { - let idx = Self::index(level, va); - let entry = self.entries[idx]; + let index = Self::index(level, va); + let entry = self.entries[index]; self.child_table(entry) } - fn next_mut(&mut self, level: Level, va: usize) -> Option<&mut Table> { + // TODO return Result + fn next_mut(&mut self, level: Level, va: usize) -> Result<&mut Table, VmError> { + // Try to get a valid page table entry. If it doesn't exist, create it. let index = Self::index(level, va); let mut entry = self.entries[index]; // println!("next_mut(level:{:?}, va:{:016x}, index:{}): entry:{:?}", level, va, index, entry); @@ -278,9 +290,13 @@ impl Table { write_volatile(&mut self.entries[index], entry); } } + + // TODO Check that the entry is a table + + // Return the address of the next table, as found in the entry. let raw_ptr = entry.virt_page_addr(); let next_table = unsafe { &mut *(raw_ptr as *mut Table) }; - Some(next_table) + Ok(next_table) } } @@ -291,7 +307,7 @@ impl PageTable { PageTable { entries: [Entry::empty(); 512] } } - pub fn map_to(&mut self, entry: Entry, va: usize, page_size: PageSize) -> Result<()> { + pub fn map_to(&mut self, entry: Entry, va: usize, page_size: PageSize) -> Result<(), VmError> { // println!("map_to(entry: {:?}, va: {:#x}, page_size {:?})", entry, va, page_size); let old_entry = match page_size { PageSize::Page4K => self @@ -308,16 +324,14 @@ impl PageTable { } }; - if let Some(old_entry) = old_entry { - let entry = entry.with_valid(true); - // println!("Some {:?}, New {:?}", old_entry, entry); - // println!("{:p}", old_entry); - unsafe { - write_volatile(old_entry, entry); - } - return Ok(()); + let old_entry = old_entry?; + let entry = entry.with_valid(true); + // println!("Some {:?}, New {:?}", old_entry, entry); + // println!("{:p}", old_entry); + unsafe { + write_volatile(old_entry, entry); } - Err("Allocation failed") + return Ok(()); } pub fn map_phys_range( @@ -326,7 +340,7 @@ impl PageTable { end: PhysAddr, entry: Entry, page_size: PageSize, - ) -> Result<()> { + ) -> Result<(), VmError> { for pa in PhysAddr::step_by_rounded(start, end, page_size.size()) { self.map_to(entry.with_phys_addr(pa), pa.to_virt(), page_size)?; } From a8e29d41c9c7b8d674e2c9cf5e5b2bec27baa459 Mon Sep 17 00:00:00 2001 From: Graham MacDonald Date: Sun, 17 Sep 2023 14:12:41 +0100 Subject: [PATCH 07/12] More specific error Signed-off-by: Graham MacDonald --- aarch64/src/vm.rs | 40 ++++++++++++++++++++++++---------------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/aarch64/src/vm.rs b/aarch64/src/vm.rs index 631fd19..6633632 100644 --- a/aarch64/src/vm.rs +++ b/aarch64/src/vm.rs @@ -16,17 +16,6 @@ use num_enum::{FromPrimitive, IntoPrimitive}; #[cfg(not(test))] use port::println; -#[derive(Debug)] -pub enum VmError { - AllocationFailed(kalloc::Error), -} - -impl From for VmError { - fn from(err: kalloc::Error) -> VmError { - VmError::AllocationFailed(err) - } -} - pub const PAGE_SIZE_4K: usize = 4 * 1024; pub const PAGE_SIZE_2M: usize = 2 * 1024 * 1024; pub const PAGE_SIZE_1G: usize = 1 * 1024 * 1024 * 1024; @@ -243,6 +232,18 @@ impl Level { } } +#[derive(Debug)] +pub enum PageTableError { + AllocationFailed(kalloc::Error), + EntryIsNotTable, +} + +impl From for PageTableError { + fn from(err: kalloc::Error) -> PageTableError { + PageTableError::AllocationFailed(err) + } +} + #[repr(C, align(4096))] pub struct Table { entries: [Entry; 512], @@ -258,7 +259,7 @@ impl Table { } } - pub fn entry_mut(&mut self, level: Level, va: usize) -> Result<&mut Entry, VmError> { + pub fn entry_mut(&mut self, level: Level, va: usize) -> Result<&mut Entry, PageTableError> { Ok(&mut self.entries[Self::index(level, va)]) } @@ -277,7 +278,7 @@ impl Table { } // TODO return Result - fn next_mut(&mut self, level: Level, va: usize) -> Result<&mut Table, VmError> { + fn next_mut(&mut self, level: Level, va: usize) -> Result<&mut Table, PageTableError> { // Try to get a valid page table entry. If it doesn't exist, create it. let index = Self::index(level, va); let mut entry = self.entries[index]; @@ -291,7 +292,9 @@ impl Table { } } - // TODO Check that the entry is a table + if !entry.table() { + return Err(PageTableError::EntryIsNotTable); + } // Return the address of the next table, as found in the entry. let raw_ptr = entry.virt_page_addr(); @@ -307,7 +310,12 @@ impl PageTable { PageTable { entries: [Entry::empty(); 512] } } - pub fn map_to(&mut self, entry: Entry, va: usize, page_size: PageSize) -> Result<(), VmError> { + pub fn map_to( + &mut self, + entry: Entry, + va: usize, + page_size: PageSize, + ) -> Result<(), PageTableError> { // println!("map_to(entry: {:?}, va: {:#x}, page_size {:?})", entry, va, page_size); let old_entry = match page_size { PageSize::Page4K => self @@ -340,7 +348,7 @@ impl PageTable { end: PhysAddr, entry: Entry, page_size: PageSize, - ) -> Result<(), VmError> { + ) -> Result<(), PageTableError> { for pa in PhysAddr::step_by_rounded(start, end, page_size.size()) { self.map_to(entry.with_phys_addr(pa), pa.to_virt(), page_size)?; } From 0c6c07cab00b8eea90ddf09f8e8674ed8bce8e8e Mon Sep 17 00:00:00 2001 From: Graham MacDonald Date: Sun, 17 Sep 2023 14:15:48 +0100 Subject: [PATCH 08/12] Tidy Signed-off-by: Graham MacDonald --- aarch64/src/registers.rs | 75 +--------------------------------------- aarch64/src/vm.rs | 2 ++ 2 files changed, 3 insertions(+), 74 deletions(-) diff --git a/aarch64/src/registers.rs b/aarch64/src/registers.rs index 5919510..fa7152d 100644 --- a/aarch64/src/registers.rs +++ b/aarch64/src/registers.rs @@ -179,6 +179,7 @@ bitstruct! { } } +#[allow(dead_code)] impl EsrEl1IssInstructionAbort { pub fn from_esr_el1(r: EsrEl1) -> Option { r.exception_class_enum() @@ -235,36 +236,6 @@ pub enum InstructionFaultStatusCode { UnsupportedAtomicHardwareUpdateFault = 49, } -bitstruct! { - #[derive(Copy, Clone)] - pub struct Vaddr4K4K(pub u64) { - offset: u16 = 0..12; - l4idx: u16 = 12..21; - l3idx: u16 = 21..30; - l2idx: u16 = 30..39; - l1idx: u16 = 39..48; - } -} - -bitstruct! { - #[derive(Copy, Clone)] - pub struct Vaddr4K2M(pub u64) { - offset: u32 = 0..21; - l3idx: u16 = 21..30; - l2idx: u16 = 30..39; - l1idx: u16 = 39..48; - } -} - -bitstruct! { - #[derive(Copy, Clone)] - pub struct Vaddr4K1G(pub u64) { - offset: u32 = 0..30; - l2idx: u16 = 30..39; - l1idx: u16 = 39..48; - } -} - #[cfg(test)] mod tests { use super::*; @@ -289,48 +260,4 @@ mod tests { InstructionFaultStatusCode::TranslationFaultLevel0 ); } - - #[test] - fn breakdown_vadder() { - let va = Vaddr4K4K(0xffff_8000_0000_0000); - assert_eq!(va.l1idx(), 256); - assert_eq!(va.l2idx(), 0); - assert_eq!(va.l3idx(), 0); - assert_eq!(va.l4idx(), 0); - assert_eq!(va.offset(), 0); - - let va = Vaddr4K4K(0x0000_0000_0008_00a8); - assert_eq!(va.l1idx(), 0); - assert_eq!(va.l2idx(), 0); - assert_eq!(va.l3idx(), 0); - assert_eq!(va.l4idx(), 128); - assert_eq!(va.offset(), 168); - - let va = Vaddr4K2M(0xffff_8000_3f00_0000); - assert_eq!(va.l1idx(), 256); - assert_eq!(va.l2idx(), 0); - assert_eq!(va.l3idx(), 504); - assert_eq!(va.offset(), 0); - - let va = Vaddr4K2M(0xffff_8000_fe00_0000); - assert_eq!(va.l1idx(), 256); - assert_eq!(va.l2idx(), 3); - assert_eq!(va.l3idx(), 496); - assert_eq!(va.offset(), 0); - - let va = Vaddr4K1G(0xffff_8000_0000_0000); - assert_eq!(va.l1idx(), 256); - assert_eq!(va.l2idx(), 0); - assert_eq!(va.offset(), 0); - - let va = Vaddr4K1G(0x0000_0000_0008_00a8); - assert_eq!(va.l1idx(), 0); - assert_eq!(va.l2idx(), 0); - assert_eq!(va.offset(), 524456); - - let va = Vaddr4K1G(0xffff_8000_0010_00c8); - assert_eq!(va.l1idx(), 256); - assert_eq!(va.l2idx(), 0); - assert_eq!(va.offset(), 0x1000c8); - } } diff --git a/aarch64/src/vm.rs b/aarch64/src/vm.rs index 6633632..f9ba865 100644 --- a/aarch64/src/vm.rs +++ b/aarch64/src/vm.rs @@ -20,6 +20,7 @@ pub const PAGE_SIZE_4K: usize = 4 * 1024; pub const PAGE_SIZE_2M: usize = 2 * 1024 * 1024; pub const PAGE_SIZE_1G: usize = 1 * 1024 * 1024 * 1024; +#[allow(dead_code)] #[derive(Debug, Clone, Copy)] pub enum PageSize { Page4K, @@ -271,6 +272,7 @@ impl Table { Some(unsafe { &*(raw_ptr as *const Table) }) } + #[allow(dead_code)] fn next(&self, level: Level, va: usize) -> Option<&Table> { let index = Self::index(level, va); let entry = self.entries[index]; From 8685e4b31e318276ba024a5b8949730f24a8daab Mon Sep 17 00:00:00 2001 From: Graham MacDonald Date: Wed, 27 Sep 2023 08:18:32 +0100 Subject: [PATCH 09/12] Support recursive page tables Signed-off-by: Graham MacDonald --- Cargo.lock | 8 +-- aarch64/lib/kernel.ld | 2 +- aarch64/src/kmem.rs | 15 ++++- aarch64/src/l.S | 20 ++++-- aarch64/src/main.rs | 27 +++++---- aarch64/src/vm.rs | 138 +++++++++++++++++++++++++----------------- port/src/fdt.rs | 4 +- xtask/Cargo.toml | 2 +- 8 files changed, 132 insertions(+), 84 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b9aa477..d1d39c0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -304,9 +304,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.6" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17e963a819c331dcacd7ab957d80bc2b9a9c1e71c804826d2f283dd65306542" +checksum = "c226a7bba6d859b63c92c4b4fe69c5b6b72d0cb897dbc8e6012298e6154cb56e" dependencies = [ "serde", "serde_spanned", @@ -325,9 +325,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.19.14" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" +checksum = "8ff63e60a958cefbb518ae1fd6566af80d9d4be430a33f3723dfc47d1d411d95" dependencies = [ "indexmap", "serde", diff --git a/aarch64/lib/kernel.ld b/aarch64/lib/kernel.ld index 359c8c3..855fc00 100644 --- a/aarch64/lib/kernel.ld +++ b/aarch64/lib/kernel.ld @@ -61,7 +61,7 @@ SECTIONS { /* Reserve section for early pagetables. Align to 2MiB to allow us to map as a 2MiB page.Note that this won't be needed once we transition to recursive pagetables. - Note this can go when we use recursive pagetables */ + TODO Just use the heap when we enable recursive pagetables? */ . = ALIGN(2 * 1024 * 1024); early_pagetables = .; . += 2 * 1024 * 1024; diff --git a/aarch64/src/kmem.rs b/aarch64/src/kmem.rs index 4741ca2..060fc12 100644 --- a/aarch64/src/kmem.rs +++ b/aarch64/src/kmem.rs @@ -156,7 +156,7 @@ pub fn early_pages() -> &'static mut [Page4K] { #[cfg(test)] mod tests { use super::*; - use crate::vm; + use crate::vm::{self, va_index, Level}; #[test] fn physaddr_step() { @@ -184,4 +184,17 @@ mod tests { PhysAddr::step_by_rounded(startpa, endpa, vm::PAGE_SIZE_2M).collect::>(); assert_eq!(pas, [PhysAddr::new(0x3f000000), PhysAddr::new(0x3f000000 + 2 * 1024 * 1024)]); } + + #[test] + fn can_break_down_va() { + let va: usize = 0xffff8000049fd000; + let va_parts = ( + va_index(va, Level::Level0), + va_index(va, Level::Level1), + va_index(va, Level::Level2), + va_index(va, Level::Level3), + ); + let expected_parts = (256, 0, 36, 509); + assert_eq!(va_parts, expected_parts); + } } diff --git a/aarch64/src/l.S b/aarch64/src/l.S index badc5ea..708f243 100644 --- a/aarch64/src/l.S +++ b/aarch64/src/l.S @@ -76,7 +76,6 @@ PT_ISH = (3<<8) // Inner shareable (shared across CPUs) KZERO = 0xffff800000000000 MiB = (1<<20) GiB = (1<<30) -KTZERO = (KZERO + 2*MiB) // Virtual base of kernel text // Constants for early uart setup MMIO_BASE_RPI3 = 0x3f000000 @@ -629,37 +628,46 @@ dnr: wfe // that the aarch64 setup code in l.S is solid, we should disable the uart code // and perhaps have something that can be enabled manually for dev purposes only // in the future. + +// One final note is that we've set up recursive page tables here. This is to +// allow us to use the vm code, which assumes recursive pagetables, e.g. for +// dumping out the page tables. .balign 4096 kernelpt4: .space (256*8) .quad (kernelpt3 - KZERO) + (PT_PAGE) // [256] (for kernel + mmio) - .space (255*8) + .space (254*8) + .quad (kernelpt4 - KZERO) + (PT_AF|PT_PAGE) // [511] (recursive entry) .balign 4096 kernelpt3: .quad (0*2*GiB) + (PT_BLOCK|PT_AF|PT_AP_KERNEL_RW|PT_ISH|PT_UXN|PT_MAIR_NORMAL) // [0] (for kernel) .space (2*8) .quad (kernelpt2 - KZERO) + (PT_PAGE) // [3] (for mmio) - .space (508*8) + .space (507*8) + .quad (kernelpt3 - KZERO) + (PT_AF|PT_PAGE) // [511] (recursive entry) .balign 4096 kernelpt2: .space (496*8) .quad (MMIO_BASE_RPI4) + (PT_BLOCK|PT_AF|PT_AP_KERNEL_RW|PT_ISH|PT_UXN|PT_PXN|PT_MAIR_DEVICE) // [496] (for mmio) .quad (MMIO_BASE_RPI4 + GPIO) + (PT_BLOCK|PT_AF|PT_AP_KERNEL_RW|PT_ISH|PT_UXN|PT_PXN|PT_MAIR_DEVICE) // [497] (for mmio) - .space (14*8) + .space (13*8) + .quad (kernelpt2 - KZERO) + (PT_AF|PT_PAGE) // [511] (recursive entry) // Early page tables for identity mapping the kernel physical addresses. // Once we've jumped to the higher half, this will no longer be used. .balign 4096 physicalpt4: .quad (physicalpt3 - KZERO) + (PT_PAGE) // [0] (for kernel) - .space (511*8) + .space (510*8) + .quad (physicalpt4 - KZERO) + (PT_AF|PT_PAGE) // [511] (recursive entry) .balign 4096 physicalpt3: .quad (0*2*GiB) + (PT_BLOCK|PT_AF|PT_AP_KERNEL_RW|PT_ISH|PT_UXN|PT_MAIR_NORMAL) // [0] (for kernel) - .space (511*8) + .space (510*8) + .quad (physicalpt3 - KZERO) + (PT_AF|PT_PAGE) // [511] (recursive entry) .bss .balign 4096 diff --git a/aarch64/src/main.rs b/aarch64/src/main.rs index cbc3590..49dedd6 100644 --- a/aarch64/src/main.rs +++ b/aarch64/src/main.rs @@ -100,21 +100,13 @@ fn print_board_info() { } #[no_mangle] -pub extern "C" fn main9(dtb_ptr: u64) { +pub extern "C" fn main9(dtb_ptr: usize) { trap::init(); // Parse the DTB before we set up memory so we can correctly map it - let dt = unsafe { DeviceTree::from_u64(dtb_ptr).unwrap() }; - - unsafe { - kalloc::free_pages(kmem::early_pages()); - - let dtb_phys = PhysAddr::new(dtb_ptr); - let edtb_phys = dtb_phys + dt.size() as u64; - vm::init(&mut KPGTBL, dtb_phys, edtb_phys); - vm::switch(&KPGTBL); - } + let dt = unsafe { DeviceTree::from_usize(dtb_ptr).unwrap() }; + // Set up uart so we can log as early as possible mailbox::init(&dt); devcons::init(&dt); @@ -123,12 +115,21 @@ pub extern "C" fn main9(dtb_ptr: u64) { println!("DTB found at: {:#x}", dtb_ptr); println!("midr_el1: {:?}", registers::MidrEl1::read()); + // Map address space accurately using rust VM code to manage page tables + unsafe { + kalloc::free_pages(kmem::early_pages()); + + let dtb_phys = PhysAddr::from_virt(dtb_ptr as usize); + let edtb_phys = dtb_phys + dt.size() as u64; + vm::init(&mut KPGTBL, dtb_phys, edtb_phys); + vm::switch(&KPGTBL); + } + print_binary_sections(); print_physical_memory_map(); print_board_info(); - // Dump out pagetables - kernel_root().print_tables(); + kernel_root().print_recursive_tables(); println!("looping now"); diff --git a/aarch64/src/vm.rs b/aarch64/src/vm.rs index f9ba865..4feb6bb 100644 --- a/aarch64/src/vm.rs +++ b/aarch64/src/vm.rs @@ -2,10 +2,7 @@ use crate::{ kalloc, - kmem::{ - early_pagetables_addr, ebss_addr, eearly_pagetables_addr, eheap_addr, erodata_addr, - etext_addr, heap_addr, text_addr, PhysAddr, - }, + kmem::{ebss_addr, eheap_addr, erodata_addr, etext_addr, heap_addr, text_addr, PhysAddr}, registers::rpi_mmio, }; use bitstruct::bitstruct; @@ -107,10 +104,6 @@ bitstruct! { } impl Entry { - fn new(pa: PhysAddr) -> Self { - Entry(0).with_addr(pa.addr() >> 12) - } - pub const fn empty() -> Entry { Entry(0) } @@ -204,7 +197,7 @@ impl fmt::Debug for Entry { /// there. Four levels would support (for example) 4kiB granules with 4KiB /// pages using Level0 - Level3, while three would support 2MiB pages with the /// same size granules, using only Level0 - Level2. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone, Copy, PartialEq)] pub enum Level { Level0, Level1, @@ -233,6 +226,15 @@ impl Level { } } +pub fn va_index(va: usize, level: Level) -> usize { + match level { + Level::Level0 => (va >> 39) & 0x1ff, + Level::Level1 => (va >> 30) & 0x1ff, + Level::Level2 => (va >> 21) & 0x1ff, + Level::Level3 => (va >> 12) & 0x1ff, + } +} + #[derive(Debug)] pub enum PageTableError { AllocationFailed(kalloc::Error), @@ -251,19 +253,11 @@ pub struct Table { } impl Table { - fn index(level: Level, va: usize) -> usize { - match level { - Level::Level0 => (va >> 39) & 0x1FF, - Level::Level1 => (va >> 30) & 0x1FF, - Level::Level2 => (va >> 21) & 0x1FF, - Level::Level3 => (va >> 12) & 0x1FF, - } - } - pub fn entry_mut(&mut self, level: Level, va: usize) -> Result<&mut Entry, PageTableError> { - Ok(&mut self.entries[Self::index(level, va)]) + Ok(&mut self.entries[va_index(va, level)]) } + // TODO remove? fn child_table(&self, entry: Entry) -> Option<&Table> { if !entry.valid() { return None; @@ -274,23 +268,28 @@ impl Table { #[allow(dead_code)] fn next(&self, level: Level, va: usize) -> Option<&Table> { - let index = Self::index(level, va); + let index = va_index(va, level); let entry = self.entries[index]; self.child_table(entry) } - // TODO return Result fn next_mut(&mut self, level: Level, va: usize) -> Result<&mut Table, PageTableError> { // Try to get a valid page table entry. If it doesn't exist, create it. - let index = Self::index(level, va); + let index = va_index(va, level); let mut entry = self.entries[index]; - // println!("next_mut(level:{:?}, va:{:016x}, index:{}): entry:{:?}", level, va, index, entry); if !entry.valid() { - let page = kalloc::alloc()?; - page.clear(); - entry = Entry::new(PhysAddr::from_ptr(page)).with_valid(true).with_table(true); + // Create a new recursive page table + let table = Self::alloc_pagetable()?; + entry = Entry::rw_kernel_data() + .with_phys_addr(PhysAddr::from_ptr(table)) + .with_valid(true) + .with_table(true); unsafe { write_volatile(&mut self.entries[index], entry); + // Write the recursive entry. Note that every recursive entry + // must have the 'accessed' flag set, which we do in setting up + // the entry above. + write_volatile(&mut table.entries[511], entry); } } @@ -303,6 +302,12 @@ impl Table { let next_table = unsafe { &mut *(raw_ptr as *mut Table) }; Ok(next_table) } + + fn alloc_pagetable() -> Result<&'static mut Table, PageTableError> { + let page = kalloc::alloc()?; + page.clear(); + Ok(unsafe { &mut *(page as *mut Page4K as *mut Table) }) + } } pub type PageTable = Table; @@ -318,7 +323,6 @@ impl PageTable { va: usize, page_size: PageSize, ) -> Result<(), PageTableError> { - // println!("map_to(entry: {:?}, va: {:#x}, page_size {:?})", entry, va, page_size); let old_entry = match page_size { PageSize::Page4K => self .next_mut(Level::Level0, va) @@ -336,14 +340,19 @@ impl PageTable { let old_entry = old_entry?; let entry = entry.with_valid(true); - // println!("Some {:?}, New {:?}", old_entry, entry); - // println!("{:p}", old_entry); unsafe { write_volatile(old_entry, entry); } return Ok(()); } + /// Map the physical range using the requested page size. + /// This aligns on page size boundaries, and rounds the requested range so + /// that both the alignment requirements are met and the requested range are + /// covered. + /// TODO Assuming some of these requests are dynamic, but should not fail, + /// we should fall back to the smaller page sizes if the requested size + /// fails. pub fn map_phys_range( &mut self, start: PhysAddr, @@ -358,22 +367,25 @@ impl PageTable { } /// Recursively write out the table and all its children - pub fn print_tables(&self) { - println!("Root va:{:p}", self); - self.print_table_at_level(Level::Level0); + pub fn print_recursive_tables(&self) { + println!("Root va:{:p}", self); + self.print_table_at_level(Level::Level0, 0xffff_ffff_ffff_f000); } /// Recursively write out the table and all its children - fn print_table_at_level(&self, level: Level) { + fn print_table_at_level(&self, level: Level, table_va: usize) { let indent = 2 + level.depth() * 2; + println!("{:indent$}Table {:?} va:{:p}", "", level, self); for (i, &pte) in self.entries.iter().enumerate() { if pte.valid() { print_pte(indent, i, pte); - if pte.table() { - if let Some(child_table) = self.child_table(pte) { - child_table.print_table_at_level(level.next().unwrap()); - } + // Recurse into child table (unless it's the recursive index) + if i != 511 && pte.table() { + let next_nevel = level.next().unwrap(); + let child_va = (table_va << 9) | (i << 12); + let child_table = unsafe { &*(child_va as *const PageTable) }; + child_table.print_table_at_level(next_nevel, child_va); } } } @@ -388,18 +400,29 @@ impl fmt::Debug for PageTable { /// Helper to print out PTE as part of a table fn print_pte(indent: usize, i: usize, pte: Entry) { - println!( - "{:indent$}[{:03}] va:{:#016x} -> pa:({:?}) (pte:{:#016x})", - "", - i, - pte.virt_page_addr(), - pte, - pte.0, - ); + if pte.table() { + println!("{:indent$}[{:03}] Table {:?} (pte:{:#016x})", "", i, pte, pte.0,); + } else { + println!( + "{:indent$}[{:03}] Entry va:{:#018x} -> {:?} (pte:{:#016x})", + "", + i, + pte.virt_page_addr(), + pte, + pte.0, + ); + } } pub unsafe fn init(kpage_table: &mut PageTable, dtb_phys: PhysAddr, edtb_phys: PhysAddr) { - //use PageFlags as PF; + // Write the recursive entry + unsafe { + let entry = Entry::rw_kernel_data() + .with_phys_addr(PhysAddr::from_ptr(kpage_table)) + .with_valid(true) + .with_table(true); + write_volatile(&mut kpage_table.entries[511], entry); + } let text_phys = PhysAddr::from_virt(text_addr()); let etext_phys = PhysAddr::from_virt(etext_addr()); @@ -407,8 +430,6 @@ pub unsafe fn init(kpage_table: &mut PageTable, dtb_phys: PhysAddr, edtb_phys: P let ebss_phys = PhysAddr::from_virt(ebss_addr()); let heap_phys = PhysAddr::from_virt(heap_addr()); let eheap_phys = PhysAddr::from_virt(eheap_addr()); - let early_pagetables_phys = PhysAddr::from_virt(early_pagetables_addr()); - let eearly_pagetables_phys = PhysAddr::from_virt(eearly_pagetables_addr()); let mmio = rpi_mmio().expect("mmio base detect failed"); let mmio_end = PhysAddr::from(mmio + (2 * PAGE_SIZE_2M as u64)); @@ -429,13 +450,18 @@ pub unsafe fn init(kpage_table: &mut PageTable, dtb_phys: PhysAddr, edtb_phys: P (erodata_phys, ebss_phys, Entry::rw_kernel_data(), PageSize::Page2M), // Kernel heap (heap_phys, eheap_phys, Entry::rw_kernel_data(), PageSize::Page2M), - // Page tables - (early_pagetables_phys, eearly_pagetables_phys, Entry::rw_kernel_data(), PageSize::Page2M), // MMIO (mmio, mmio_end, Entry::ro_kernel_device(), PageSize::Page2M), ]; for (start, end, flags, page_size) in custom_map.iter() { + println!( + "Mapping {:#018x} - {:#018x} flags: {:?} page_size: {:?}", + start.addr(), + end.addr(), + flags, + page_size + ); kpage_table.map_phys_range(*start, *end, *flags, *page_size).expect("init mapping failed"); } } @@ -459,18 +485,18 @@ pub unsafe fn switch(kpage_table: &PageTable) { #[cfg(not(test))] unsafe { let pt_phys = PhysAddr::from_ptr(kpage_table).addr(); + // https://forum.osdev.org/viewtopic.php?t=36412&p=303237 core::arch::asm!( "msr ttbr1_el1, {pt_phys}", - "dsb ish", - "isb", + "tlbi vmalle1", // invalidate all TLB entries + "dsb ish", // ensure write has completed + "isb", // synchronize context and ensure that no instructions + // are fetched using the old translation pt_phys = in(reg) pt_phys); } } /// Return the root kernel page table pub fn kernel_root() -> &'static mut PageTable { - unsafe { - let ttbr1_el1 = ttbr1_el1(); - &mut *PhysAddr::new(ttbr1_el1).to_ptr_mut::() - } + unsafe { &mut *PhysAddr::new(ttbr1_el1()).to_ptr_mut::() } } diff --git a/port/src/fdt.rs b/port/src/fdt.rs index e4f7d77..189e146 100644 --- a/port/src/fdt.rs +++ b/port/src/fdt.rs @@ -67,8 +67,8 @@ impl<'a> DeviceTree<'a> { self.header.totalsize as usize } - /// Given a pointer to the dtb as a u64, return a DeviceTree struct. - pub unsafe fn from_u64(ptr: u64) -> Result { + /// Given a pointer to the dtb as a usize, return a DeviceTree struct. + pub unsafe fn from_usize(ptr: usize) -> Result { let u8ptr = ptr as *const mem::MaybeUninit; // Extract the real length from the header diff --git a/xtask/Cargo.toml b/xtask/Cargo.toml index bef277e..3439d13 100644 --- a/xtask/Cargo.toml +++ b/xtask/Cargo.toml @@ -8,4 +8,4 @@ edition = "2021" [dependencies] clap = { version = "4.2.4", features = ["derive"] } serde = { version = "1.0.160", features = ["derive"] } -toml = "0.7.3" +toml = "0.8.0" From 64be61d56d75f9a814e4a8edcf84c35ca98000bc Mon Sep 17 00:00:00 2001 From: Graham MacDonald Date: Tue, 10 Oct 2023 21:59:33 +0100 Subject: [PATCH 10/12] Use recursive page tables Signed-off-by: Graham MacDonald --- aarch64/src/kmem.rs | 17 +---- aarch64/src/vm.rs | 158 ++++++++++++++++++++++++++++++++++---------- 2 files changed, 126 insertions(+), 49 deletions(-) diff --git a/aarch64/src/kmem.rs b/aarch64/src/kmem.rs index 060fc12..4576a32 100644 --- a/aarch64/src/kmem.rs +++ b/aarch64/src/kmem.rs @@ -19,7 +19,7 @@ extern "C" { } pub fn text_addr() -> usize { - 0xFFFF_8000_0000_0000 + 0xffff_8000_0000_0000 } pub fn etext_addr() -> usize { @@ -156,7 +156,7 @@ pub fn early_pages() -> &'static mut [Page4K] { #[cfg(test)] mod tests { use super::*; - use crate::vm::{self, va_index, Level}; + use crate::vm; #[test] fn physaddr_step() { @@ -184,17 +184,4 @@ mod tests { PhysAddr::step_by_rounded(startpa, endpa, vm::PAGE_SIZE_2M).collect::>(); assert_eq!(pas, [PhysAddr::new(0x3f000000), PhysAddr::new(0x3f000000 + 2 * 1024 * 1024)]); } - - #[test] - fn can_break_down_va() { - let va: usize = 0xffff8000049fd000; - let va_parts = ( - va_index(va, Level::Level0), - va_index(va, Level::Level1), - va_index(va, Level::Level2), - va_index(va, Level::Level3), - ); - let expected_parts = (256, 0, 36, 509); - assert_eq!(va_parts, expected_parts); - } } diff --git a/aarch64/src/vm.rs b/aarch64/src/vm.rs index 4feb6bb..c13e230 100644 --- a/aarch64/src/vm.rs +++ b/aarch64/src/vm.rs @@ -115,6 +115,7 @@ impl Entry { .with_uxn(true) .with_pxn(true) .with_mair_index(Mair::Normal) + .with_valid(true) } fn ro_kernel_data() -> Self { @@ -125,6 +126,7 @@ impl Entry { .with_uxn(true) .with_pxn(true) .with_mair_index(Mair::Normal) + .with_valid(true) } fn ro_kernel_text() -> Self { @@ -135,6 +137,7 @@ impl Entry { .with_uxn(true) .with_pxn(false) .with_mair_index(Mair::Normal) + .with_valid(true) } fn ro_kernel_device() -> Self { @@ -145,12 +148,14 @@ impl Entry { .with_uxn(true) .with_pxn(true) .with_mair_index(Mair::Device) + .with_valid(true) } const fn with_phys_addr(self, pa: PhysAddr) -> Self { Entry(self.0).with_addr(pa.addr() >> 12) } + /// Return the physical page address pointed to by this entry fn phys_page_addr(self) -> PhysAddr { PhysAddr::new(self.addr() << 12) } @@ -235,6 +240,34 @@ pub fn va_index(va: usize, level: Level) -> usize { } } +#[cfg(test)] +fn va_indices(va: usize) -> (usize, usize, usize, usize) { + ( + va_index(va, Level::Level0), + va_index(va, Level::Level1), + va_index(va, Level::Level2), + va_index(va, Level::Level3), + ) +} + +fn recursive_table_addr(va: usize, level: Level) -> usize { + let indices_mask = 0x0000_ffff_ffff_f000; + let indices = va & indices_mask; + let shift = match level { + Level::Level0 => 36, + Level::Level1 => 27, + Level::Level2 => 18, + Level::Level3 => 9, + }; + let recursive_indices = match level { + Level::Level0 => (511 << 39) | (511 << 30) | (511 << 21) | (511 << 12), + Level::Level1 => (511 << 39) | (511 << 30) | (511 << 21), + Level::Level2 => (511 << 39) | (511 << 30), + Level::Level3 => 511 << 39, + }; + 0xffff_0000_0000_0000 | recursive_indices | ((indices >> shift) & indices_mask) +} + #[derive(Debug)] pub enum PageTableError { AllocationFailed(kalloc::Error), @@ -257,38 +290,21 @@ impl Table { Ok(&mut self.entries[va_index(va, level)]) } - // TODO remove? - fn child_table(&self, entry: Entry) -> Option<&Table> { - if !entry.valid() { - return None; - } - let raw_ptr = entry.virt_page_addr(); - Some(unsafe { &*(raw_ptr as *const Table) }) - } - - #[allow(dead_code)] - fn next(&self, level: Level, va: usize) -> Option<&Table> { - let index = va_index(va, level); - let entry = self.entries[index]; - self.child_table(entry) - } - + /// Return the next table in the walk. If it doesn't exist, create it. fn next_mut(&mut self, level: Level, va: usize) -> Result<&mut Table, PageTableError> { // Try to get a valid page table entry. If it doesn't exist, create it. let index = va_index(va, level); let mut entry = self.entries[index]; if !entry.valid() { - // Create a new recursive page table + // Create a new recursive page table. (Note every recursive entry + // must have the 'accessed' flag set) At this point the address + // doesn't need to be recursive because we just allocated it from + // a mapped area of memory. let table = Self::alloc_pagetable()?; - entry = Entry::rw_kernel_data() - .with_phys_addr(PhysAddr::from_ptr(table)) - .with_valid(true) - .with_table(true); + entry = + Entry::rw_kernel_data().with_phys_addr(PhysAddr::from_ptr(table)).with_table(true); unsafe { write_volatile(&mut self.entries[index], entry); - // Write the recursive entry. Note that every recursive entry - // must have the 'accessed' flag set, which we do in setting up - // the entry above. write_volatile(&mut table.entries[511], entry); } } @@ -297,10 +313,9 @@ impl Table { return Err(PageTableError::EntryIsNotTable); } - // Return the address of the next table, as found in the entry. - let raw_ptr = entry.virt_page_addr(); - let next_table = unsafe { &mut *(raw_ptr as *mut Table) }; - Ok(next_table) + // Return the address of the next table as a recursive address + let recursive_page_addr = recursive_table_addr(va, level.next().unwrap()); + Ok(unsafe { &mut *(recursive_page_addr as *mut Table) }) } fn alloc_pagetable() -> Result<&'static mut Table, PageTableError> { @@ -317,13 +332,30 @@ impl PageTable { PageTable { entries: [Entry::empty(); 512] } } - pub fn map_to( + /// Ensure there's a mapping from va to entry, creating any intermediate + /// page tables that don't already exist. If a mapping already exists, + /// replace it. + fn map_to( &mut self, entry: Entry, va: usize, page_size: PageSize, ) -> Result<(), PageTableError> { - let old_entry = match page_size { + // We change the last entry of the root page table to the address of + // self for the duration of this method. This allows us to work with + // this hierarchy of pagetables even if it's not the current translation + // table. We *must* return it to its original state on exit. + let old_recursive_entry = kernel_root().entries[511]; + let temp_recursive_entry = + Entry::rw_kernel_data().with_phys_addr(PhysAddr::from_ptr(self)).with_table(true); + + unsafe { + write_volatile(&mut kernel_root().entries[511], temp_recursive_entry); + // TODO Need to invalidate the single cache entry + invalidate_all_tlb_entries(); + }; + + let dest_entry = match page_size { PageSize::Page4K => self .next_mut(Level::Level0, va) .and_then(|t1| t1.next_mut(Level::Level1, va)) @@ -338,11 +370,14 @@ impl PageTable { } }; - let old_entry = old_entry?; - let entry = entry.with_valid(true); unsafe { - write_volatile(old_entry, entry); + write_volatile(dest_entry?, entry); + // Return the recursive entry to its original state + write_volatile(&mut kernel_root().entries[511], old_recursive_entry); + // TODO Need to invalidate the single cache entry + invalidate_all_tlb_entries(); } + return Ok(()); } @@ -415,11 +450,16 @@ fn print_pte(indent: usize, i: usize, pte: Entry) { } pub unsafe fn init(kpage_table: &mut PageTable, dtb_phys: PhysAddr, edtb_phys: PhysAddr) { + // We use recursive page tables, but we have to be careful in the init call, + // since the kpage_table is not currently pointed to by ttbr1_el1. Any + // recursive addressing of (511, 511, 511, 511) always points to the + // physical address of the root page table, which isn't what we want here + // because kpage_table hasn't been switched to yet. + // Write the recursive entry unsafe { let entry = Entry::rw_kernel_data() .with_phys_addr(PhysAddr::from_ptr(kpage_table)) - .with_valid(true) .with_table(true); write_volatile(&mut kpage_table.entries[511], entry); } @@ -496,7 +536,57 @@ pub unsafe fn switch(kpage_table: &PageTable) { } } +#[allow(unused_variables)] +pub unsafe fn invalidate_all_tlb_entries() { + #[cfg(not(test))] + unsafe { + // https://forum.osdev.org/viewtopic.php?t=36412&p=303237 + core::arch::asm!( + "tlbi vmalle1", // invalidate all TLB entries + "dsb ish", // ensure write has completed + "isb" + ); // synchronize context and ensure that no instructions + // are fetched using the old translation + } +} + /// Return the root kernel page table pub fn kernel_root() -> &'static mut PageTable { unsafe { &mut *PhysAddr::new(ttbr1_el1()).to_ptr_mut::() } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn can_break_down_va() { + assert_eq!(va_indices(0xffff8000049fd000), (256, 0, 36, 509)); + } + + #[test] + fn test_to_use_for_debugging_vaddrs() { + assert_eq!(va_indices(0xffff8000049fd000), (256, 0, 36, 509)); + } + + #[test] + fn test_recursive_table_addr() { + assert_eq!(va_indices(0xffff800008000000), (256, 0, 64, 0)); + assert_eq!( + va_indices(recursive_table_addr(0xffff800008000000, Level::Level0)), + (511, 511, 511, 511) + ); + assert_eq!( + va_indices(recursive_table_addr(0xffff800008000000, Level::Level1)), + (511, 511, 511, 256) + ); + assert_eq!( + va_indices(recursive_table_addr(0xffff800008000000, Level::Level2)), + (511, 511, 256, 0) + ); + assert_eq!( + va_indices(recursive_table_addr(0xffff800008000000, Level::Level3)), + (511, 256, 0, 64) + ); + } +} From 2fada181530b76520fc8d8cb4337a0fb75901652 Mon Sep 17 00:00:00 2001 From: Graham MacDonald Date: Tue, 10 Oct 2023 22:02:30 +0100 Subject: [PATCH 11/12] fix riscv build Signed-off-by: Graham MacDonald --- riscv64/src/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/riscv64/src/main.rs b/riscv64/src/main.rs index 9172a2c..03fbc24 100644 --- a/riscv64/src/main.rs +++ b/riscv64/src/main.rs @@ -20,8 +20,8 @@ use port::fdt::DeviceTree; core::arch::global_asm!(include_str!("l.S")); #[no_mangle] -pub extern "C" fn main9(hartid: usize, dtb_ptr: u64) -> ! { - let dt = unsafe { DeviceTree::from_u64(dtb_ptr).unwrap() }; +pub extern "C" fn main9(hartid: usize, dtb_ptr: usize) -> ! { + let dt = unsafe { DeviceTree::from_usize(dtb_ptr).unwrap() }; crate::devcons::init(&dt); platform_init(); From 70fff2b1c9b7f3f2c2163b6c806b2eebd4793ec2 Mon Sep 17 00:00:00 2001 From: Graham MacDonald Date: Sun, 15 Oct 2023 16:41:05 +0100 Subject: [PATCH 12/12] Better mapping info Signed-off-by: Graham MacDonald --- aarch64/src/vm.rs | 42 +++++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/aarch64/src/vm.rs b/aarch64/src/vm.rs index c13e230..204c2c6 100644 --- a/aarch64/src/vm.rs +++ b/aarch64/src/vm.rs @@ -272,6 +272,7 @@ fn recursive_table_addr(va: usize, level: Level) -> usize { pub enum PageTableError { AllocationFailed(kalloc::Error), EntryIsNotTable, + PhysRangeIsZero, } impl From for PageTableError { @@ -394,11 +395,16 @@ impl PageTable { end: PhysAddr, entry: Entry, page_size: PageSize, - ) -> Result<(), PageTableError> { + ) -> Result<(usize, usize), PageTableError> { + let mut startva = None; + let mut endva = 0; for pa in PhysAddr::step_by_rounded(start, end, page_size.size()) { - self.map_to(entry.with_phys_addr(pa), pa.to_virt(), page_size)?; + let va = pa.to_virt(); + self.map_to(entry.with_phys_addr(pa), va, page_size)?; + startva.get_or_insert(va); + endva = va + page_size.size(); } - Ok(()) + startva.map(|startva| (startva, endva)).ok_or(PageTableError::PhysRangeIsZero) } /// Recursively write out the table and all its children @@ -479,30 +485,28 @@ pub unsafe fn init(kpage_table: &mut PageTable, dtb_phys: PhysAddr, edtb_phys: P // Note that the first page is left unmapped to try and // catch null pointer dereferences in unsafe code: defense // in depth! - - // DTB - (dtb_phys, edtb_phys, Entry::ro_kernel_data(), PageSize::Page4K), - // Kernel text - (text_phys, etext_phys, Entry::ro_kernel_text(), PageSize::Page2M), - // Kernel read-only data - (etext_phys, erodata_phys, Entry::ro_kernel_data(), PageSize::Page2M), - // Kernel BSS - (erodata_phys, ebss_phys, Entry::rw_kernel_data(), PageSize::Page2M), - // Kernel heap - (heap_phys, eheap_phys, Entry::rw_kernel_data(), PageSize::Page2M), - // MMIO - (mmio, mmio_end, Entry::ro_kernel_device(), PageSize::Page2M), + ("DTB", dtb_phys, edtb_phys, Entry::ro_kernel_data(), PageSize::Page4K), + ("Kernel Text", text_phys, etext_phys, Entry::ro_kernel_text(), PageSize::Page2M), + ("Kernel Data", etext_phys, erodata_phys, Entry::ro_kernel_data(), PageSize::Page2M), + ("Kernel BSS", erodata_phys, ebss_phys, Entry::rw_kernel_data(), PageSize::Page2M), + ("Kernel Heap", heap_phys, eheap_phys, Entry::rw_kernel_data(), PageSize::Page2M), + ("MMIO", mmio, mmio_end, Entry::ro_kernel_device(), PageSize::Page2M), ]; - for (start, end, flags, page_size) in custom_map.iter() { + for (name, start, end, flags, page_size) in custom_map.iter() { + let mapped_range = kpage_table + .map_phys_range(*start, *end, *flags, *page_size) + .expect("init mapping failed"); println!( - "Mapping {:#018x} - {:#018x} flags: {:?} page_size: {:?}", + "Mapped {:16} {:#018x}-{:#018x} to {:#018x}-{:#018x} flags: {:?} page_size: {:?}", + name, start.addr(), end.addr(), + mapped_range.0, + mapped_range.1, flags, page_size ); - kpage_table.map_phys_range(*start, *end, *flags, *page_size).expect("init mapping failed"); } }