Skip to content

Commit

Permalink
Support recursive page tables
Browse files Browse the repository at this point in the history
Signed-off-by: Graham MacDonald <[email protected]>
  • Loading branch information
gmacd committed Sep 27, 2023
1 parent 0c6c07c commit 8685e4b
Show file tree
Hide file tree
Showing 8 changed files with 132 additions and 84 deletions.
8 changes: 4 additions & 4 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion aarch64/lib/kernel.ld
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ SECTIONS {
/* Reserve section for early pagetables. Align to 2MiB to allow us to map
as a 2MiB page.Note that this won't be needed once we transition to
recursive pagetables.
Note this can go when we use recursive pagetables */
TODO Just use the heap when we enable recursive pagetables? */
. = ALIGN(2 * 1024 * 1024);
early_pagetables = .;
. += 2 * 1024 * 1024;
Expand Down
15 changes: 14 additions & 1 deletion aarch64/src/kmem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ pub fn early_pages() -> &'static mut [Page4K] {
#[cfg(test)]
mod tests {
use super::*;
use crate::vm;
use crate::vm::{self, va_index, Level};

#[test]
fn physaddr_step() {
Expand Down Expand Up @@ -184,4 +184,17 @@ mod tests {
PhysAddr::step_by_rounded(startpa, endpa, vm::PAGE_SIZE_2M).collect::<Vec<PhysAddr>>();
assert_eq!(pas, [PhysAddr::new(0x3f000000), PhysAddr::new(0x3f000000 + 2 * 1024 * 1024)]);
}

#[test]
fn can_break_down_va() {
let va: usize = 0xffff8000049fd000;
let va_parts = (
va_index(va, Level::Level0),
va_index(va, Level::Level1),
va_index(va, Level::Level2),
va_index(va, Level::Level3),
);
let expected_parts = (256, 0, 36, 509);
assert_eq!(va_parts, expected_parts);
}
}
20 changes: 14 additions & 6 deletions aarch64/src/l.S
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,6 @@ PT_ISH = (3<<8) // Inner shareable (shared across CPUs)
KZERO = 0xffff800000000000
MiB = (1<<20)
GiB = (1<<30)
KTZERO = (KZERO + 2*MiB) // Virtual base of kernel text

// Constants for early uart setup
MMIO_BASE_RPI3 = 0x3f000000
Expand Down Expand Up @@ -629,37 +628,46 @@ dnr: wfe
// that the aarch64 setup code in l.S is solid, we should disable the uart code
// and perhaps have something that can be enabled manually for dev purposes only
// in the future.

// One final note is that we've set up recursive page tables here. This is to
// allow us to use the vm code, which assumes recursive pagetables, e.g. for
// dumping out the page tables.
.balign 4096
kernelpt4:
.space (256*8)
.quad (kernelpt3 - KZERO) + (PT_PAGE) // [256] (for kernel + mmio)
.space (255*8)
.space (254*8)
.quad (kernelpt4 - KZERO) + (PT_AF|PT_PAGE) // [511] (recursive entry)

.balign 4096
kernelpt3:
.quad (0*2*GiB) + (PT_BLOCK|PT_AF|PT_AP_KERNEL_RW|PT_ISH|PT_UXN|PT_MAIR_NORMAL) // [0] (for kernel)
.space (2*8)
.quad (kernelpt2 - KZERO) + (PT_PAGE) // [3] (for mmio)
.space (508*8)
.space (507*8)
.quad (kernelpt3 - KZERO) + (PT_AF|PT_PAGE) // [511] (recursive entry)

.balign 4096
kernelpt2:
.space (496*8)
.quad (MMIO_BASE_RPI4) + (PT_BLOCK|PT_AF|PT_AP_KERNEL_RW|PT_ISH|PT_UXN|PT_PXN|PT_MAIR_DEVICE) // [496] (for mmio)
.quad (MMIO_BASE_RPI4 + GPIO) + (PT_BLOCK|PT_AF|PT_AP_KERNEL_RW|PT_ISH|PT_UXN|PT_PXN|PT_MAIR_DEVICE) // [497] (for mmio)
.space (14*8)
.space (13*8)
.quad (kernelpt2 - KZERO) + (PT_AF|PT_PAGE) // [511] (recursive entry)

// Early page tables for identity mapping the kernel physical addresses.
// Once we've jumped to the higher half, this will no longer be used.
.balign 4096
physicalpt4:
.quad (physicalpt3 - KZERO) + (PT_PAGE) // [0] (for kernel)
.space (511*8)
.space (510*8)
.quad (physicalpt4 - KZERO) + (PT_AF|PT_PAGE) // [511] (recursive entry)

.balign 4096
physicalpt3:
.quad (0*2*GiB) + (PT_BLOCK|PT_AF|PT_AP_KERNEL_RW|PT_ISH|PT_UXN|PT_MAIR_NORMAL) // [0] (for kernel)
.space (511*8)
.space (510*8)
.quad (physicalpt3 - KZERO) + (PT_AF|PT_PAGE) // [511] (recursive entry)

.bss
.balign 4096
Expand Down
27 changes: 14 additions & 13 deletions aarch64/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -100,21 +100,13 @@ fn print_board_info() {
}

#[no_mangle]
pub extern "C" fn main9(dtb_ptr: u64) {
pub extern "C" fn main9(dtb_ptr: usize) {
trap::init();

// Parse the DTB before we set up memory so we can correctly map it
let dt = unsafe { DeviceTree::from_u64(dtb_ptr).unwrap() };

unsafe {
kalloc::free_pages(kmem::early_pages());

let dtb_phys = PhysAddr::new(dtb_ptr);
let edtb_phys = dtb_phys + dt.size() as u64;
vm::init(&mut KPGTBL, dtb_phys, edtb_phys);
vm::switch(&KPGTBL);
}
let dt = unsafe { DeviceTree::from_usize(dtb_ptr).unwrap() };

// Set up uart so we can log as early as possible
mailbox::init(&dt);
devcons::init(&dt);

Expand All @@ -123,12 +115,21 @@ pub extern "C" fn main9(dtb_ptr: u64) {
println!("DTB found at: {:#x}", dtb_ptr);
println!("midr_el1: {:?}", registers::MidrEl1::read());

// Map address space accurately using rust VM code to manage page tables
unsafe {
kalloc::free_pages(kmem::early_pages());

let dtb_phys = PhysAddr::from_virt(dtb_ptr as usize);
let edtb_phys = dtb_phys + dt.size() as u64;
vm::init(&mut KPGTBL, dtb_phys, edtb_phys);
vm::switch(&KPGTBL);
}

print_binary_sections();
print_physical_memory_map();
print_board_info();

// Dump out pagetables
kernel_root().print_tables();
kernel_root().print_recursive_tables();

println!("looping now");

Expand Down
Loading

0 comments on commit 8685e4b

Please sign in to comment.