Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Simple pagetable setup in rust #27

Merged
merged 12 commits into from
Oct 16, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 8 additions & 8 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion aarch64/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,4 @@ edition = "2021"
[dependencies]
bitstruct = "0.1"
port = { path = "../port" }
num_enum = { version = "0.6.1", default-features = false }
num_enum = { version = "0.7.0", default-features = false }
21 changes: 20 additions & 1 deletion aarch64/lib/kernel.ld
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,27 @@ SECTIONS {
*(COMMON)
. = ALIGN(2097152);
}
end = .;
ebss = .;

/* Reserve section for kernel heap. Align to 2MiB to allow us to map as
a 2MiB page. */
. = ALIGN(2 * 1024 * 1024);
heap = .;
. += 64 * 1024 * 1024;
eheap = .;

/* Reserve section for early pagetables. Align to 2MiB to allow us to map
as a 2MiB page.Note that this won't be needed once we transition to
recursive pagetables.
TODO Just use the heap when we enable recursive pagetables? */
. = ALIGN(2 * 1024 * 1024);
early_pagetables = .;
. += 2 * 1024 * 1024;
eearly_pagetables = .;

end = .;
PROVIDE(end = .);

/DISCARD/ : {
*(.eh_frame .note.GNU-stack)
}
Expand Down
54 changes: 54 additions & 0 deletions aarch64/src/kalloc.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
use crate::vm::{Page4K, PAGE_SIZE_4K};
use core::ptr;
use port::mcslock::{Lock, LockNode};

static FREE_LIST: Lock<FreeList> = Lock::new("kmem", FreeList { next: None });

#[repr(align(4096))]
struct FreeList {
next: Option<ptr::NonNull<FreeList>>,
}
unsafe impl Send for FreeList {}

#[derive(Debug)]
pub enum Error {
NoFreeBlocks,
}

impl FreeList {
pub fn put(&mut self, page: &mut Page4K) {
let ptr = (page as *mut Page4K).addr();
assert_eq!(ptr % PAGE_SIZE_4K, 0, "freeing unaligned page");
page.scribble();
let f = page as *mut Page4K as *mut FreeList;
unsafe {
ptr::write(f, FreeList { next: self.next });
}
self.next = ptr::NonNull::new(f);
}

pub fn get(&mut self) -> Result<&'static mut Page4K, Error> {
let mut next = self.next.ok_or(Error::NoFreeBlocks)?;
let next = unsafe { next.as_mut() };
self.next = next.next;
let pg = unsafe { &mut *(next as *mut FreeList as *mut Page4K) };
pg.clear();
Ok(pg)
}
}

pub unsafe fn free_pages(pages: &mut [Page4K]) {
static mut NODE: LockNode = LockNode::new();
let mut lock = FREE_LIST.lock(unsafe { &NODE });
let fl = &mut *lock;
for page in pages.iter_mut() {
fl.put(page);
}
}

pub fn alloc() -> Result<&'static mut Page4K, Error> {
static mut NODE: LockNode = LockNode::new();
let mut lock = FREE_LIST.lock(unsafe { &NODE });
let fl = &mut *lock;
fl.get()
}
187 changes: 187 additions & 0 deletions aarch64/src/kmem.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,187 @@
use crate::{param::KZERO, vm::Page4K};
use core::{
fmt,
iter::{Step, StepBy},
mem,
ops::{self, Range},
slice,
};

// These map to definitions in kernel.ld
extern "C" {
static etext: [u64; 0];
static erodata: [u64; 0];
static ebss: [u64; 0];
static early_pagetables: [u64; 0];
static eearly_pagetables: [u64; 0];
static heap: [u64; 0];
static eheap: [u64; 0];
}

pub fn text_addr() -> usize {
0xffff_8000_0000_0000
}

pub fn etext_addr() -> usize {
unsafe { etext.as_ptr().addr() }
}

pub fn erodata_addr() -> usize {
unsafe { erodata.as_ptr().addr() }
}

pub fn ebss_addr() -> usize {
unsafe { ebss.as_ptr().addr() }
}

pub fn heap_addr() -> usize {
unsafe { heap.as_ptr().addr() }
}

pub fn eheap_addr() -> usize {
unsafe { eheap.as_ptr().addr() }
}

pub fn early_pagetables_addr() -> usize {
unsafe { early_pagetables.as_ptr().addr() }
}

pub fn eearly_pagetables_addr() -> usize {
unsafe { eearly_pagetables.as_ptr().addr() }
}

#[derive(Clone, Copy, PartialEq, PartialOrd)]
#[repr(transparent)]
pub struct PhysAddr(u64);

impl PhysAddr {
pub const fn new(value: u64) -> Self {
PhysAddr(value)
}

pub const fn addr(&self) -> u64 {
self.0
}

pub const fn to_virt(&self) -> usize {
(self.0 as usize).wrapping_add(KZERO)
}

pub fn from_virt(a: usize) -> Self {
Self((a - KZERO) as u64)
}

pub fn from_ptr<T>(a: *const T) -> Self {
Self::from_virt(a.addr())
}

pub const fn to_ptr_mut<T>(&self) -> *mut T {
self.to_virt() as *mut T
}

pub const fn round_up(&self, step: u64) -> PhysAddr {
PhysAddr((self.0 + step - 1) & !(step - 1))
}

pub const fn round_down(&self, step: u64) -> PhysAddr {
PhysAddr(self.0 & !(step - 1))
}

pub fn step_by_rounded(
startpa: PhysAddr,
endpa: PhysAddr,
step_size: usize,
) -> StepBy<Range<Self>> {
let startpa = startpa.round_down(step_size as u64);
let endpa = endpa.round_up(step_size as u64);
(startpa..endpa).step_by(step_size)
}
}

impl ops::Add<u64> for PhysAddr {
type Output = PhysAddr;

fn add(self, offset: u64) -> PhysAddr {
PhysAddr(self.0 + offset)
}
}

/// Note that this implementation will round down the startpa and round up the endpa
impl Step for PhysAddr {
fn steps_between(&startpa: &Self, &endpa: &Self) -> Option<usize> {
if startpa.0 <= endpa.0 {
match endpa.0.checked_sub(startpa.0) {
Some(result) => usize::try_from(result).ok(),
None => None,
}
} else {
None
}
}

fn forward_checked(startpa: Self, count: usize) -> Option<Self> {
startpa.0.checked_add(count as u64).map(|x| PhysAddr(x))
}

fn backward_checked(startpa: Self, count: usize) -> Option<Self> {
startpa.0.checked_sub(count as u64).map(|x| PhysAddr(x))
}
}

impl fmt::Debug for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "PhysAddr({:#016x})", self.0)?;
Ok(())
}
}

unsafe fn page_slice_mut<'a>(pstart: *mut Page4K, pend: *mut Page4K) -> &'a mut [Page4K] {
let ustart = pstart.addr();
let uend = pend.addr();
const PAGE_SIZE: usize = mem::size_of::<Page4K>();
assert_eq!(ustart % PAGE_SIZE, 0, "page_slice_mut: unaligned start page");
assert_eq!(uend % PAGE_SIZE, 0, "page_slice_mut: unaligned end page");
assert!(ustart < uend, "page_slice_mut: bad range");

let len = (uend - ustart) / PAGE_SIZE;
unsafe { slice::from_raw_parts_mut(ustart as *mut Page4K, len) }
}

pub fn early_pages() -> &'static mut [Page4K] {
let early_start = early_pagetables_addr() as *mut Page4K;
let early_end = eearly_pagetables_addr() as *mut Page4K;
unsafe { page_slice_mut(early_start, early_end) }
}

#[cfg(test)]
mod tests {
use super::*;
use crate::vm;

#[test]
fn physaddr_step() {
let startpa = PhysAddr::new(4096);
let endpa = PhysAddr::new(4096 * 3);
let pas =
PhysAddr::step_by_rounded(startpa, endpa, vm::PAGE_SIZE_4K).collect::<Vec<PhysAddr>>();
assert_eq!(pas, [PhysAddr::new(4096), PhysAddr::new(4096 * 2)]);
}

#[test]
fn physaddr_step_rounds_up_and_down() {
let startpa = PhysAddr::new(9000); // Should round down to 8192
let endpa = PhysAddr::new(5000 * 3); // Should round up to 16384
let pas =
PhysAddr::step_by_rounded(startpa, endpa, vm::PAGE_SIZE_4K).collect::<Vec<PhysAddr>>();
assert_eq!(pas, [PhysAddr::new(4096 * 2), PhysAddr::new(4096 * 3)]);
}

#[test]
fn physaddr_step_2m() {
let startpa = PhysAddr::new(0x3f000000);
let endpa = PhysAddr::new(0x3f000000 + 4 * 1024 * 1024);
let pas =
PhysAddr::step_by_rounded(startpa, endpa, vm::PAGE_SIZE_2M).collect::<Vec<PhysAddr>>();
assert_eq!(pas, [PhysAddr::new(0x3f000000), PhysAddr::new(0x3f000000 + 2 * 1024 * 1024)]);
}
}
Loading