From 69f84c39db3be0fd1f5c60259dd5b13328ea736a Mon Sep 17 00:00:00 2001 From: Patrick Sabanic Date: Mon, 3 Jun 2024 11:44:30 +0000 Subject: [PATCH] Add basic monitor interactions --- Cargo.lock | 12 +- Cargo.toml | 3 + Makefile | 2 +- cpuarch/src/vmsa.rs | 7 +- kernel/Cargo.toml | 7 + kernel/src/attestation/mod.rs | 9 + kernel/src/attestation/monitor.rs | 35 ++ kernel/src/attestation/policy.rs | 0 kernel/src/attestation/process.rs | 12 + kernel/src/cpu/percpu.rs | 4 +- kernel/src/greq/services.rs | 2 +- kernel/src/lib.rs | 3 + kernel/src/process_manager/call_handler.rs | 54 +++ kernel/src/process_manager/mod.rs | 2 + kernel/src/process_manager/process.rs | 354 ++++++++++++++++ kernel/src/protocols/core.rs | 2 +- kernel/src/protocols/mod.rs | 10 +- kernel/src/protocols/process.rs | 29 ++ kernel/src/requests.rs | 2 + kernel/src/sp_pagetable/mod.rs | 459 +++++++++++++++++++++ kernel/src/sp_pagetable/tmp_mapping.rs | 104 +++++ 21 files changed, 1094 insertions(+), 18 deletions(-) create mode 100644 kernel/src/attestation/mod.rs create mode 100644 kernel/src/attestation/monitor.rs create mode 100644 kernel/src/attestation/policy.rs create mode 100644 kernel/src/attestation/process.rs create mode 100644 kernel/src/process_manager/call_handler.rs create mode 100644 kernel/src/process_manager/mod.rs create mode 100644 kernel/src/process_manager/process.rs create mode 100644 kernel/src/protocols/process.rs create mode 100644 kernel/src/sp_pagetable/mod.rs create mode 100644 kernel/src/sp_pagetable/tmp_mapping.rs diff --git a/Cargo.lock b/Cargo.lock index 26eb4b46d..c2311f07e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -411,9 +411,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.12" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "libc", @@ -559,9 +559,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.152" +version = "0.2.154" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" +checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" [[package]] name = "libfuzzer-sys" @@ -601,9 +601,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.17" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", ] diff --git a/Cargo.toml b/Cargo.toml index 6a9f8ea9c..9dc47080e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,6 +26,7 @@ svsm = { path = "kernel" } elf = { path = "elf" } libmstpm = { path = "libmstpm" } syscall = { path = "syscall" } +monitorcrypto = { path = "monitorcrypto" } # crates.io aes-gcm = { version = "0.10.3", default-features = false } @@ -45,6 +46,8 @@ uuid = "1.6.1" # Add the derive feature by default because all crates use it. zerocopy = { version = "0.7.32", features = ["derive"] } +#rsa = { version= "0.9.6", default-features = false, features = [] } +#getrandom ={version="0.2.12", default-features = false, features = ["rdrand"]} # other repos packit = { git = "https://github.com/coconut-svsm/packit", version = "0.1.1" } diff --git a/Makefile b/Makefile index 47b67a012..d4a4a5361 100644 --- a/Makefile +++ b/Makefile @@ -113,7 +113,7 @@ bin/stage2.bin: bin objcopy -O binary ${STAGE2_ELF} $@ bin/svsm-kernel.elf: bin - cargo build ${CARGO_ARGS} ${SVSM_ARGS} --bin svsm + cargo build --manifest-path kernel/Cargo.toml ${CARGO_ARGS} ${SVSM_ARGS} --bin svsm objcopy -O elf64-x86-64 --strip-unneeded ${SVSM_KERNEL_ELF} $@ bin/test-kernel.elf: bin diff --git a/cpuarch/src/vmsa.rs b/cpuarch/src/vmsa.rs index e078229e1..934d8dd5b 100644 --- a/cpuarch/src/vmsa.rs +++ b/cpuarch/src/vmsa.rs @@ -7,7 +7,7 @@ // AE Exitcodes // Table 15-35, AMD64 Architecture Programmer’s Manual, Vol. 2 #[repr(u64)] -#[derive(Clone, Copy, Default, Debug)] +#[derive(Clone, Copy, Default, Debug, PartialEq)] #[allow(dead_code, non_camel_case_types)] pub enum GuestVMExit { MC = 0x52, @@ -44,7 +44,7 @@ pub enum GuestVMExit { } #[repr(C, packed)] -#[derive(Debug, Default, Clone, Copy)] +#[derive(Debug, Default, Clone, Copy, PartialEq)] pub struct VMSASegment { pub selector: u16, pub flags: u16, @@ -53,7 +53,7 @@ pub struct VMSASegment { } #[repr(C, packed)] -#[derive(Debug)] +#[derive(Debug, Copy, Clone, PartialEq)] pub struct VMSA { pub es: VMSASegment, pub cs: VMSASegment, @@ -280,6 +280,7 @@ impl Default for VMSA { } } + impl VMSA { pub fn enable(&mut self) { self.efer |= 1u64 << 12; diff --git a/kernel/Cargo.toml b/kernel/Cargo.toml index 8e8c01267..adde82e52 100644 --- a/kernel/Cargo.toml +++ b/kernel/Cargo.toml @@ -25,6 +25,13 @@ syscall.workspace = true aes-gcm = { workspace = true, features = ["aes", "alloc"] } bitflags.workspace = true +#getrandom.workspace = true +#num-bigint = { path="../../num-bigint", version = "0.8.4", default-features = false, features = ["prime"], package = "num-bigint-dig"} +#num-traits = { version = "0.2.19", default-features = false, features = [] } +#getrandom = { version = "0.2.15", default-features = false, features = ["rdrand"]} +#rsa = { version= "0.9.6", default-features = false, features = [] } +#rsa = { path = "../../RSA", default-features = false, features = [] } +#sha2 = { version="0.10.8", default_features = false, features = ["force-soft"]} gdbstub = { workspace = true, optional = true } gdbstub_arch = { workspace = true, optional = true } igvm_defs = { workspace = true, features = ["unstable"] } diff --git a/kernel/src/attestation/mod.rs b/kernel/src/attestation/mod.rs new file mode 100644 index 000000000..23ab80c3b --- /dev/null +++ b/kernel/src/attestation/mod.rs @@ -0,0 +1,9 @@ +pub mod monitor; +pub mod process; +pub mod policy; +//pub mod ClientExchange { + + + + +//} \ No newline at end of file diff --git a/kernel/src/attestation/monitor.rs b/kernel/src/attestation/monitor.rs new file mode 100644 index 000000000..ee5ebe6be --- /dev/null +++ b/kernel/src/attestation/monitor.rs @@ -0,0 +1,35 @@ +use crate::{address::PhysAddr, greq::services::{get_regular_report, REPORT_RESPONSE_SIZE}}; +use crate::greq::pld_report::SnpReportResponse; +use crate::protocols::errors::SvsmReqError; +use crate::protocols::RequestParams; +use crate::mm::PerCPUPageMappingGuard; + +pub fn attest_monitor(params: &mut RequestParams) -> Result<(), SvsmReqError>{ + let mut rep: [u8; REPORT_RESPONSE_SIZE] = [0u8;REPORT_RESPONSE_SIZE]; + + rep[0] = 1; + log::info!("Requesting Monitor Attestation Report"); + let rep_size = get_regular_report(&mut rep)?; + + if params.rdx == 0 { + /* Here we only query for the size of the report */ + params.rdx = rep_size.try_into().unwrap(); + return Ok(()); + } + + params.rdx = rep_size.try_into().unwrap(); + + log::info!("Size of Report: {rep_size}"); + let r = SnpReportResponse::try_from_as_ref(&mut rep)?; + log::info!("Report: {:?}\n",r); + log::info!("Report: {:?}\n",rep); + //TODO: Check if address is valid for this request + let target_address = PhysAddr::from(params.rcx); + let mapped_target_page = PerCPUPageMappingGuard::create_4k(target_address).unwrap(); + let target = unsafe {mapped_target_page.virt_addr().as_mut_ptr::<[u8;4096]>().as_mut().unwrap()}; + target[0..rep_size].copy_from_slice(&rep); + + + Ok(()) +} + diff --git a/kernel/src/attestation/policy.rs b/kernel/src/attestation/policy.rs new file mode 100644 index 000000000..e69de29bb diff --git a/kernel/src/attestation/process.rs b/kernel/src/attestation/process.rs new file mode 100644 index 000000000..36d2d2fe8 --- /dev/null +++ b/kernel/src/attestation/process.rs @@ -0,0 +1,12 @@ +use crate::process_manager::process::TrustedProcess; + +pub fn attest_process() -> bool { + log::info!("attest(): Attesting Monitor"); + true +} + +pub fn hash_process(process: &mut TrustedProcess) { + log::info!("Hash of Process is: 0"); + process.hash = [0u8;32]; + +} \ No newline at end of file diff --git a/kernel/src/cpu/percpu.rs b/kernel/src/cpu/percpu.rs index 43096ae1c..d6be2882a 100644 --- a/kernel/src/cpu/percpu.rs +++ b/kernel/src/cpu/percpu.rs @@ -193,7 +193,7 @@ impl GuestVmsaRef { #[derive(Debug)] pub struct PerCpuShared { - guest_vmsa: SpinLock, + pub guest_vmsa: SpinLock, online: AtomicBool, } @@ -245,7 +245,7 @@ impl PerCpuShared { pub struct PerCpuUnsafe { shared: PerCpuShared, private: RefCell, - ghcb: *mut GHCB, + pub ghcb: *mut GHCB, init_stack: Option, ist: IstStacks, diff --git a/kernel/src/greq/services.rs b/kernel/src/greq/services.rs index 0d10342f0..ebf86cb02 100644 --- a/kernel/src/greq/services.rs +++ b/kernel/src/greq/services.rs @@ -17,7 +17,7 @@ use crate::{ use core::mem::size_of; const REPORT_REQUEST_SIZE: usize = size_of::(); -const REPORT_RESPONSE_SIZE: usize = size_of::(); +pub const REPORT_RESPONSE_SIZE: usize = size_of::(); fn get_report(buffer: &mut [u8], certs: Option<&mut [u8]>) -> Result { let request: &SnpReportRequest = SnpReportRequest::try_from_as_ref(buffer)?; diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs index 9dacc338a..ef71446c5 100644 --- a/kernel/src/lib.rs +++ b/kernel/src/lib.rs @@ -42,6 +42,9 @@ pub mod utils; #[cfg(all(feature = "mstpm", not(test)))] pub mod vtpm; +pub mod attestation; +pub mod process_manager; +pub mod sp_pagetable; #[test] fn test_nop() {} diff --git a/kernel/src/process_manager/call_handler.rs b/kernel/src/process_manager/call_handler.rs new file mode 100644 index 000000000..0c71c0b1e --- /dev/null +++ b/kernel/src/process_manager/call_handler.rs @@ -0,0 +1,54 @@ +//use crate::address::PhysAddr; +use crate::protocols::errors::SvsmReqError; +use crate::protocols::RequestParams; +use crate::attestation; +use crate::process_manager::process::TrustedProcessType; + +const MONITOR_INIT: u32 = 0; +const ATTEST_MONITOR: u32 = 1; +//const LOAD_POLICY: u32 = 2; +const CREATE_ZYGOTE: u32 = 4; +const DELETE_ZYGOTE: u32 = 5; +const CREATE_TRUSTLET: u32 = 6; +const DELETE_TRUSTLET: u32 = 7; + +pub fn attest_monitor(params: &mut RequestParams) -> Result<(), SvsmReqError>{ + attestation::monitor::attest_monitor(params) +} +pub fn monitor_init(_params: &mut RequestParams) -> Result<(), SvsmReqError>{ + + log::info!("Initilization Monitor"); + super::process::PROCESS_STORE.init(10); + crate::sp_pagetable::set_ecryption_mask_address_size(); + log::info!("Initilization Done"); + Ok(()) +} + +pub fn create_zygote(params: &mut RequestParams) -> Result<(), SvsmReqError>{ + super::process::create_trusted_process(params,TrustedProcessType::Zygote) +} + +pub fn delete_zygote(params: &mut RequestParams) -> Result<(), SvsmReqError> { + super::process::delete_trusted_process(params) +} + +pub fn create_trustlet(params: &mut RequestParams) -> Result<(), SvsmReqError> { + super::process::create_trusted_process(params, TrustedProcessType::Trustlet) +} + +pub fn delete_trustlet(params: &mut RequestParams) -> Result<(), SvsmReqError> { + super::process::delete_trusted_process(params) +} + + +pub fn monitor_call_handler(request: u32, params: &mut RequestParams) -> Result<(), SvsmReqError> { + match request { + MONITOR_INIT => monitor_init(params), + ATTEST_MONITOR => attest_monitor(params), + CREATE_ZYGOTE => create_zygote(params), + DELETE_ZYGOTE => delete_zygote(params), + CREATE_TRUSTLET => create_trustlet(params), + DELETE_TRUSTLET => delete_trustlet(params), + _ => Err(SvsmReqError::unsupported_call()), + } +} \ No newline at end of file diff --git a/kernel/src/process_manager/mod.rs b/kernel/src/process_manager/mod.rs new file mode 100644 index 000000000..0e71ee308 --- /dev/null +++ b/kernel/src/process_manager/mod.rs @@ -0,0 +1,2 @@ +pub mod call_handler; +pub mod process; \ No newline at end of file diff --git a/kernel/src/process_manager/process.rs b/kernel/src/process_manager/process.rs new file mode 100644 index 000000000..f03b189fb --- /dev/null +++ b/kernel/src/process_manager/process.rs @@ -0,0 +1,354 @@ +extern crate alloc; + +use core::cell::UnsafeCell; +use alloc::vec::Vec; +use cpuarch::vmsa::VMSASegment; +use crate::address::PhysAddr; +use crate::cpu::percpu::this_cpu_shared; +use crate::cpu::percpu::this_cpu_unsafe; +use crate::mm::SVSM_PERCPU_VMSA_BASE; +//use crate::attestation::process; +use crate::protocols::errors::SvsmReqError; +use crate::protocols::RequestParams; + +//Testing +use crate::sp_pagetable::*; +use crate::sev::RMPFlags; +use crate::sev::rmp_adjust; +//use crate::cpu::percpu::this_cpu_mut; +use crate::cpu::percpu::PERCPU_VMSAS; +use crate::cpu::percpu::this_cpu; +use core::mem::replace; +use crate::mm::PAGE_SIZE; +use crate::utils::zero_mem_region; +use cpuarch::vmsa::VMSA; +use crate::sev::utils::rmp_revoke_guest_access; +use crate::cpu::flush_tlb_global_sync; +use crate::mm::virt_to_phys; +use crate::mm::alloc::allocate_zeroed_page; +use crate::types::PageSize; +use crate::address::VirtAddr; +use crate::mm::PerCPUPageMappingGuard; +use crate::sev::utils::rmp_set_guest_vmsa; +//use crate::protocols::core::core_create_vcpu_error_restore; // + +trait FromVAddr { + fn from_virt_addr(v: VirtAddr) -> &'static mut VMSA; +} + +impl FromVAddr for VMSA { + fn from_virt_addr(v: VirtAddr) -> &'static mut VMSA{ + unsafe { v.as_mut_ptr::().as_mut().unwrap() } + } +} + +#[derive(Clone,Copy,Debug,PartialEq)] +pub enum TrustedProcessType { + Undefined, + Zygote, + Trustlet, +} +pub const UNDEFINED_PROCESS: u32 = 0; +pub const ZYGOTE_PROCESS: u32 = 1; +pub const TRUSTLET_PROCESS: u32 = 2; + +pub static PROCESS_STORE: TrustedProcessStore = TrustedProcessStore::new(); + +#[derive(Debug)] +pub struct TrustedProcessStore{ + processes: UnsafeCell>, +} + +unsafe impl Sync for TrustedProcessStore {} + +impl TrustedProcessStore { + const fn new() -> Self { + Self { + processes: UnsafeCell::new(Vec::new()), + } + } + pub fn push(&self, process: TrustedProcess){ + let ptr: &mut Vec = unsafe { self.processes.get().as_mut().unwrap() }; + ptr.push(process); + } + pub fn init(&self, size: u32){ + let empty_process = TrustedProcess::empty(); + for _ in 0..size { + self.push(empty_process); + } + } + pub fn insert(&self, p: TrustedProcess) -> i64 { + let ptr: &mut Vec = unsafe { self.processes.get().as_mut().unwrap() }; + for i in 0..(ptr.len()) { + if ptr[i].process_type == TrustedProcessType::Undefined { + ptr[i] = p; + return i.try_into().unwrap(); + } + } + -1 + } + + pub fn get(&self, pid: ProcessID) -> &mut TrustedProcess { + let ptr = unsafe { self.processes.get().as_mut().unwrap() }; + &mut ptr[pid.0] + } + +} + +#[derive(Clone,Copy,Debug)] +pub struct ProcessData(PhysAddr); + +impl ProcessData { + pub fn dublicate_read_only(&self) -> ProcessData{ + ProcessData(self.0) + } + pub fn append_data(&self){ + + } +} + +#[derive(Clone,Copy,Debug)] +pub struct ProcessID(usize); + +#[derive(Clone,Copy,Debug)] +pub struct TrustedProcess { + process_type: TrustedProcessType, + data: ProcessData, + len: u64, + pub hash: [u8; 32], +} +impl TrustedProcess { + + const fn new(process_type: TrustedProcessType, data: PhysAddr, len: u64, hash: [u8; 32])->Self{ + Self {process_type, data: ProcessData(data), len, hash} + } + + pub fn zygote(d: PhysAddr, len: u64) -> Self{ + let hash = [0u8;32]; + let mut process = Self::new(TrustedProcessType::Zygote, d, len, hash); + super::super::attestation::process::hash_process(&mut process); + process + } + + fn dublicate(pid: ProcessID) -> TrustedProcess { + let process = PROCESS_STORE.get(pid); + TrustedProcess { process_type: TrustedProcessType::Trustlet, data: process.data.dublicate_read_only(), len: process.len, hash: process.hash } + } + + pub fn trustlet(parent: ProcessID, _d: PhysAddr, _len: u64) -> Self{ + let _hash = [0u8;32]; + let mut trustlet = TrustedProcess::dublicate(parent); + super::super::attestation::process::hash_process(&mut trustlet); + trustlet + } + + pub fn empty() -> Self { + Self::new(TrustedProcessType::Undefined, PhysAddr::from(0u64), 0, [0u8;32]) + } + + pub fn delete(&self) -> bool { + true + } + +} + +pub fn check_vmsa_ind(new: &VMSA, sev_features: u64, svme_mask: u64, vmpl_level: u64) -> bool { + new.vmpl == vmpl_level as u8 + && new.efer & svme_mask == svme_mask + && new.sev_features == sev_features +} + + +pub fn create_tmp_page_tabel() -> (*mut PageTableReference, PhysAddr) { + set_ecryption_mask_address_size(); + log::info!("Creating tmp page table"); + let ref_page = allocate_zeroed_page().unwrap(); + let _ref_page_phy = virt_to_phys(ref_page); + log::info!("Allocating ref page"); + rmp_adjust(ref_page, RMPFlags::VMPL2 | RMPFlags::VMPL3 | RMPFlags::VMPL1 | RMPFlags::VMPL0 | RMPFlags::RWX, PageSize::Regular).unwrap(); + log::info!("Allocating table pages"); + let table_page = allocate_zeroed_page().unwrap(); + let table_page_phy = virt_to_phys(table_page); + rmp_adjust(table_page, RMPFlags::VMPL2 | RMPFlags::VMPL3 | RMPFlags::VMPL1 | RMPFlags::VMPL0 | RMPFlags::RWX, PageSize::Regular).unwrap(); + let mut sub_pages: [VirtAddr;5] = [VirtAddr::from(0u64),VirtAddr::from(0u64),VirtAddr::from(0u64),VirtAddr::from(0u64),VirtAddr::from(0u64)]; + let mut sub_pages_phy: [PhysAddr; 5] = [PhysAddr::from(0u64),PhysAddr::from(0u64),PhysAddr::from(0u64),PhysAddr::from(0u64),PhysAddr::from(0u64)]; + for i in 0..5 { + sub_pages[i] = allocate_zeroed_page().unwrap(); + sub_pages_phy[i] = virt_to_phys(sub_pages[i]); + rmp_adjust(sub_pages[i], RMPFlags::VMPL2 | RMPFlags::VMPL3 | RMPFlags::VMPL1 | RMPFlags::VMPL0 | RMPFlags::RWX, PageSize::Regular).unwrap(); + } + let r = unsafe { ref_page.as_mut_ptr::().as_mut().unwrap() }; + r.init(table_page_phy, &sub_pages_phy); + r.mount(); + log::info!("Done with tmp page table creation process"); + (ref_page.as_mut_ptr::(), table_page_phy) +} + +pub fn create_trusted_process(params: &mut RequestParams, _t: TrustedProcessType) -> Result<(), SvsmReqError>{ + + /* Test code for the execution withint a different VMPL level (only uses Monitor Memory)*/ + + log::info!("VMSA host: \n{:?}", unsafe { SVSM_PERCPU_VMSA_BASE.as_mut_ptr::().as_mut().unwrap() } ); + + let tmp_vmsa_store = allocate_zeroed_page().unwrap(); + let vmsa_copy = unsafe { tmp_vmsa_store.as_mut_ptr::().as_mut().unwrap()}; + *vmsa_copy = unsafe { *SVSM_PERCPU_VMSA_BASE.as_mut_ptr::().as_mut().unwrap() }; + + let paddr_vmsa =virt_to_phys(allocate_zeroed_page().unwrap()); + let paddr_stack = virt_to_phys(allocate_zeroed_page().unwrap()); + let tmp = allocate_zeroed_page().unwrap(); + let t2 = unsafe { tmp.as_mut_ptr::<[u8;4096]>().as_mut().unwrap()}; + let t: [u8; 6] = [0x0f, 0xa2, 0xeb, 0x00, 0xeb, 0xfe]; //cpuid; jmp +0;jmp -2; + for i in 0..6 { + t2[i] = t[i]; + } + let paddr_pages = virt_to_phys(tmp); + // + + log::info!("Allocating new page table"); + let page_table = create_tmp_page_tabel(); + let page_table_phy = page_table.1; + let page_table = unsafe { page_table.0.as_mut().unwrap() }; + + page_table.map_4k_page(VirtAddr::from(0x8000000000u64), paddr_pages, PageFlags::exec()).unwrap(); + page_table.map_4k_page(VirtAddr::from(0x8000000000u64)+PAGE_SIZE, paddr_stack, PageFlags::data()).unwrap(); + + page_table.dump(); + + let mapping_guard = PerCPUPageMappingGuard::create_4k(paddr_pages)?; + let vaddr_pages = mapping_guard.virt_addr(); + let mapping_guard = PerCPUPageMappingGuard::create_4k(paddr_stack)?; + let vaddr_stack = mapping_guard.virt_addr(); + let mapping_guard = PerCPUPageMappingGuard::create_4k(paddr_vmsa)?; + let vaddr_vmsa = mapping_guard.virt_addr(); + + + flush_tlb_global_sync(); + let vmsa = VMSA::from_virt_addr(vaddr_vmsa); + zero_mem_region(vaddr_vmsa, vaddr_vmsa + PAGE_SIZE); + + + let locked = this_cpu_shared().guest_vmsa.lock(); + let vmsa_ptr = unsafe { SVSM_PERCPU_VMSA_BASE.as_mut_ptr::().as_mut().unwrap() }; + _ = replace(vmsa,*vmsa_ptr); + drop(locked); + //log::info!("Changing VMPL level of memory"); + + //------ Breaks the kernel (prevents from booting); Does not break anymore in release mode + rmp_adjust(vaddr_pages, RMPFlags::VMPL3 | RMPFlags::RWX, PageSize::Regular)?; + rmp_adjust(vaddr_stack, RMPFlags::VMPL3 | RMPFlags::RWX, PageSize::Regular)?; + rmp_adjust(vaddr_vmsa, RMPFlags::VMPL3 | RMPFlags::RWX, PageSize::Regular)?; + rmp_adjust(vaddr_pages, RMPFlags::VMPL2 | RMPFlags::RWX, PageSize::Regular)?; + rmp_adjust(vaddr_vmsa, RMPFlags::VMPL2 | RMPFlags::VMPL3 | RMPFlags::VMPL1 | RMPFlags::VMPL0 | RMPFlags::VMSA, PageSize::Regular)?; + + + + flush_tlb_global_sync(); + + rmp_set_guest_vmsa(vaddr_vmsa)?; + rmp_revoke_guest_access(vaddr_vmsa, PageSize::Regular)?; + rmp_adjust( + vaddr_vmsa, + RMPFlags::VMPL3 | RMPFlags::VMSA, + PageSize::Regular, + )?; + let vmsa = VMSA::from_virt_addr(vaddr_vmsa); + zero_mem_region(vaddr_vmsa, vaddr_vmsa + PAGE_SIZE); + let locked = this_cpu_shared().guest_vmsa.lock(); + let vmsa_ptr = unsafe { SVSM_PERCPU_VMSA_BASE.as_mut_ptr::().as_mut().unwrap() }; + _ = replace(vmsa,*vmsa_ptr); + drop(locked); + + vmsa.vmpl = 3; + vmsa.cr3 = u64::from(page_table_phy); + vmsa.rbp = u64::from(0x8000000000u64)+2*4096-1; + vmsa.rsp = u64::from(0x8000000000u64)+2*4096-1; + vmsa.efer = vmsa.efer | 1u64 << 12; + vmsa.rip = u64::from(0x8000000000u64); + vmsa.sev_features = vmsa_ptr.sev_features | 4; // VC Reflection feature + + + log::info!("Trustlet VMSA: {:?}", vmsa); + + let svme_mask: u64 = 1u64 << 12; + if !check_vmsa_ind(vmsa, params.sev_features | 4, svme_mask,RMPFlags::VMPL3.bits()) { + log::info!("VMSA Check failed"); + log::info!("Bits: {}",vmsa.vmpl == RMPFlags::VMPL3.bits() as u8); + log::info!("Efer & vsme_mask: {}", vmsa.efer & svme_mask == svme_mask); + log::info!("SEV features: {}", vmsa.sev_features == params.sev_features); + if vmsa.efer & svme_mask == svme_mask { + PERCPU_VMSAS.unregister(paddr_vmsa, false).unwrap(); + //core_create_vcpu_error_restore(vaddr_vmsa)?; + return Err(SvsmReqError::invalid_parameter()); + } + } + + + let apic_id = this_cpu().get_apic_id(); + PERCPU_VMSAS.register(paddr_vmsa, apic_id, true)?; + + assert!(PERCPU_VMSAS.set_used(paddr_vmsa) == Some(apic_id)); + unsafe {(*(*this_cpu_unsafe()).ghcb).ap_create(paddr_vmsa,u64::from(apic_id), 3, params.sev_features | 4)?} + //this_cpu_mut().ghcb_unsafe().ap_create(paddr_vmsa,u64::from(apic_id), 3, params.sev_features | 4)?; + log::info!("Run in VMPL3 was successfull"); + log::info!("VMSA host (after execution): \n{:?}", unsafe { SVSM_PERCPU_VMSA_BASE.as_mut_ptr::().as_mut().unwrap() } ); + + let vmsa_end_res = *vmsa_copy == unsafe { *SVSM_PERCPU_VMSA_BASE.as_mut_ptr::().as_mut().unwrap() }; + log::info!("VMSA comparison: {}", vmsa_end_res); + + return Ok(()); + + /* End of Test code */ + /* Start of actual Trustlet creation */ + /*match _t { + TrustedProcessType::Undefined => panic!("Invalid Creation Request"), + TrustedProcessType::Zygote => { + + log::info!("create_trusted_process(): Creating and registering Zygote"); + let len = params.rcx; + let zygote_address = PhysAddr::from(params.r8); + let z: TrustedProcess = TrustedProcess::zygote(zygote_address, len); + let res = PROCESS_STORE.insert(z); + // if res < 0 { + // params.rcx = u64::from_ne_bytes(res.to_ne_bytes()); + // } + params.rcx = u64::from_ne_bytes(res.to_ne_bytes()); + Ok(()) + }, + TrustedProcessType::Trustlet => { + + log::info!("create_trusted_process(): Creating and registering Trustlet"); + let len = params.rcx; + let _trustlet_address = PhysAddr::from(params.r8); + let trustlet = TrustedProcess::trustlet(ProcessID(params.rdx as usize), PhysAddr::from(params.r8), len); + if trustlet.process_type == TrustedProcessType::Undefined { + params.rcx = u64::from_ne_bytes((-1i64).to_ne_bytes()); + return Ok(()); + } + + let res = PROCESS_STORE.insert(trustlet); + params.rcx = u64::from_ne_bytes(res.to_ne_bytes()); + Ok(()) + + }, + }*/ +} + +pub fn dublicate_trusted_process(_params: &mut RequestParams) -> Result<(), SvsmReqError> { + todo!() +} + +pub fn append_trusted_process(_params: &mut RequestParams) -> Result<(), SvsmReqError> { + todo!() +} + +pub fn delete_trusted_process(params: &mut RequestParams) -> Result<(), SvsmReqError> { + let process_id = ProcessID(params.rcx as usize); + let process = PROCESS_STORE.get(process_id); + process.delete(); + Ok(()) +} + +pub fn attest_trusted_process(_params: &mut RequestParams) -> Result<(), SvsmReqError> { + todo!() +} \ No newline at end of file diff --git a/kernel/src/protocols/core.rs b/kernel/src/protocols/core.rs index f2536f1c8..b6544284b 100644 --- a/kernel/src/protocols/core.rs +++ b/kernel/src/protocols/core.rs @@ -54,7 +54,7 @@ struct PValidateRequest { resv: u32, } -fn core_create_vcpu_error_restore(paddr: Option, vaddr: Option) { +pub fn core_create_vcpu_error_restore(paddr: Option, vaddr: Option) { if let Some(v) = vaddr { if let Err(err) = rmp_clear_guest_vmsa(v) { log::error!("Failed to restore page permissions: {:#?}", err); diff --git a/kernel/src/protocols/mod.rs b/kernel/src/protocols/mod.rs index 0f0b687d7..e1c694083 100644 --- a/kernel/src/protocols/mod.rs +++ b/kernel/src/protocols/mod.rs @@ -8,20 +8,22 @@ pub mod core; pub mod errors; #[cfg(all(feature = "mstpm", not(test)))] pub mod vtpm; +pub mod process; use cpuarch::vmsa::{GuestVMExit, VMSA}; // SVSM protocols pub const SVSM_CORE_PROTOCOL: u32 = 0; pub const SVSM_VTPM_PROTOCOL: u32 = 2; +pub const SVSM_PROCESS_PROTOCOL: u32 = 10; #[derive(Debug, Default, Clone, Copy)] pub struct RequestParams { pub guest_exit_code: GuestVMExit, - sev_features: u64, - rcx: u64, - rdx: u64, - r8: u64, + pub sev_features: u64, + pub rcx: u64, + pub rdx: u64, + pub r8: u64, } impl RequestParams { diff --git a/kernel/src/protocols/process.rs b/kernel/src/protocols/process.rs new file mode 100644 index 000000000..ec6a46f63 --- /dev/null +++ b/kernel/src/protocols/process.rs @@ -0,0 +1,29 @@ +#![allow(unused_imports)] +use crate::address::{Address, PhysAddr, VirtAddr}; +use crate::cpu::flush_tlb_global_sync; +use crate::cpu::percpu::{this_cpu_shared, PERCPU_AREAS, PERCPU_VMSAS}; +use crate::cpu::vmsa::{vmsa_mut_ref_from_vaddr, vmsa_ref_from_vaddr}; +use crate::greq::pld_report::{AttestationReport, SnpReportResponse}; +use crate::greq::services::{get_extended_report, get_regular_report, REPORT_RESPONSE_SIZE}; +use crate::locking::RWLock; +use crate::mm::virtualrange::{VIRT_ALIGN_2M, VIRT_ALIGN_4K}; +use crate::mm::PerCPUPageMappingGuard; +use crate::mm::{valid_phys_address, writable_phys_addr, GuestPtr}; +use crate::{attestation, println, process_manager}; +use crate::protocols::errors::SvsmReqError; +use crate::protocols::RequestParams; +use crate::requests::SvsmCaa; +use crate::sev::utils::{ + pvalidate, rmp_clear_guest_vmsa, rmp_grant_guest_access, rmp_revoke_guest_access, + rmp_set_guest_vmsa, PvalidateOp, RMPFlags, SevSnpError, +}; +use crate::types::{PageSize, PAGE_SIZE, PAGE_SIZE_2M}; +use crate::utils::zero_mem_region; +use cpuarch::vmsa::VMSA; + + + + +pub fn process_protocol_request(request: u32, params: &mut RequestParams) -> Result<(), SvsmReqError> { + process_manager::call_handler::monitor_call_handler(request, params) +} diff --git a/kernel/src/requests.rs b/kernel/src/requests.rs index 2161c0948..2b58e843b 100644 --- a/kernel/src/requests.rs +++ b/kernel/src/requests.rs @@ -15,6 +15,7 @@ use crate::protocols::errors::{SvsmReqError, SvsmResultCode}; #[cfg(all(feature = "mstpm", not(test)))] use crate::protocols::{vtpm::vtpm_protocol_request, SVSM_VTPM_PROTOCOL}; use crate::protocols::{RequestParams, SVSM_CORE_PROTOCOL}; +use crate::protocols::{process::process_protocol_request, SVSM_PROCESS_PROTOCOL}; use crate::types::GUEST_VMPL; use crate::utils::halt; use cpuarch::vmsa::GuestVMExit; @@ -97,6 +98,7 @@ fn request_loop_once( SVSM_CORE_PROTOCOL => core_protocol_request(request, params).map(|_| true), #[cfg(all(feature = "mstpm", not(test)))] SVSM_VTPM_PROTOCOL => vtpm_protocol_request(request, params).map(|_| true), + SVSM_PROCESS_PROTOCOL => process_protocol_request(request, params).map(|_| true), _ => Err(SvsmReqError::unsupported_protocol()), } } diff --git a/kernel/src/sp_pagetable/mod.rs b/kernel/src/sp_pagetable/mod.rs new file mode 100644 index 000000000..ba5a4f9bd --- /dev/null +++ b/kernel/src/sp_pagetable/mod.rs @@ -0,0 +1,459 @@ +#![allow(non_camel_case_types)] +#![allow(unused_imports)] +use crate::acpi::tables; +use crate::address::{Address, PhysAddr, VirtAddr}; +use crate::cpu::control_regs::write_cr3; +use crate::cpu::cpuid::cpuid_table; +use crate::cpu::features::{cpu_has_nx, cpu_has_pge}; +use crate::cpu::flush_tlb_global_sync; +use crate::error::SvsmError; +use crate::locking::{LockGuard, SpinLock}; +use crate::mm::alloc::{allocate_zeroed_page, free_page}; +use crate::mm::vm::Mapping; +use crate::mm::{phys_to_virt, virt_to_phys, PGTABLE_LVL3_IDX_SHARED}; +use crate::types::{PageSize, PAGE_SIZE, PAGE_SIZE_2M}; +use crate::utils::immut_after_init::ImmutAfterInitCell; +use aes_gcm::aead::consts::True; +use aes_gcm::Error; +use alloc::borrow::ToOwned; +use bitflags::{bitflags, Flag, Flags}; +use core::iter::Map; +use core::ops::{Deref, DerefMut, Index, IndexMut}; +use core::{cmp, ptr}; +use alloc::string::{String, ToString}; +use super::sp_pagetable::tmp_mapping::TemporaryPageMapping; +//use crate::mm::{PerCPUPageMappingGuard}; + + +pub mod tmp_mapping; + +extern crate alloc; +use alloc::boxed::Box; +use crate::sev::utils::{RMPFlags,rmp_adjust}; +const PAGE_ENTRY_SIZE: usize = 512; +static ENCRYPT_MASK: ImmutAfterInitCell = ImmutAfterInitCell::new(0); +#[allow(dead_code)] +static MAX_PHYS_ADDR: ImmutAfterInitCell = ImmutAfterInitCell::uninit(); +#[allow(dead_code)] +const CBIT_LOCATION: u32 = 0x8000001f; +#[allow(dead_code)] +const PSIZE_LOCATION: u32 = 0x80000008; +const ADDRESS_BITS: usize = 0x000ffffffffff000; +pub fn set_ecryption_mask_address_size() { + let res = cpuid_table(CBIT_LOCATION).expect("CPUID table query error"); + let c_bit = res.ebx & 0x3f; + let mask = 1u64 << c_bit; + let _ = ENCRYPT_MASK.reinit(&(mask as usize)); + + let res = cpuid_table(PSIZE_LOCATION).expect("CPUID table query error"); + let guest_phys_addr_size = (res.eax >> 16) & 0xff; + let host_phys_addr_size = res.eax & 0xff; + let phys_addr_size = if guest_phys_addr_size == 0 { + host_phys_addr_size + } else { + guest_phys_addr_size + }; + let effective_phys_addr_size = cmp::min(c_bit, phys_addr_size); + let max_addr = 1 << effective_phys_addr_size; + let _ = MAX_PHYS_ADDR.reinit(&max_addr); +} + + +fn get_ecryption_mask() -> usize { + *ENCRYPT_MASK +} + +fn strip_c_bit(paddr: PhysAddr) -> PhysAddr { + PhysAddr::from(paddr.bits() & !get_ecryption_mask()) +} + +fn set_c_bit_in_address(addr: PhysAddr) -> PhysAddr { + return PhysAddr::from(addr.bits() | get_ecryption_mask()); +} + +bitflags! { + #[repr(transparent)] + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] + pub struct PageFlags: u64 { + const PRESENT = 1 << 0; + const WRITABLE = 1 << 1; + const USER_ACCESSIBLE = 1 << 2; + const WRITE_THROUGH = 1 << 3; + const NO_CACHE = 1 << 4; + const ACCESSED = 1 << 5; + const DIRTY = 1 << 6; + const HUGE_PAGE = 1 << 7; + const GLOBAL = 1 << 8; + + const NO_EXECUTE = 1 << 63; + } +} + +impl PageFlags { + pub fn exec() -> Self { + Self::PRESENT | Self::GLOBAL | Self::ACCESSED | Self::DIRTY + } + + pub fn data() -> Self { + Self::PRESENT + | Self::GLOBAL + | Self::WRITABLE + | Self::NO_EXECUTE + | Self::ACCESSED + | Self::DIRTY + } + + pub fn data_ro() -> Self { + Self::PRESENT | Self::GLOBAL | Self::NO_EXECUTE | Self::ACCESSED | Self::DIRTY + } + + pub fn task_exec() -> Self { + Self::PRESENT | Self::ACCESSED | Self::DIRTY + } + + pub fn task_data() -> Self { + Self::PRESENT | Self::WRITABLE | Self::NO_EXECUTE | Self::ACCESSED | Self::DIRTY + } + + pub fn task_data_ro() -> Self { + Self::PRESENT | Self::NO_EXECUTE | Self::ACCESSED | Self::DIRTY + } +} + +#[repr(C)] +#[derive(Copy, Clone, Debug, Default)] +pub struct PageTableEntry(PhysAddr); + +impl PageTableEntry { + pub fn flags(&self) -> PageFlags { + return PageFlags::from_bits_truncate(self.0.bits() as u64); + } + pub fn present(&self) -> bool { + return self.flags().contains(PageFlags::PRESENT); + } + pub fn set(&mut self, addr: PhysAddr, flags: PageFlags) { + self.0 = PhysAddr::from(addr.bits() as u64 | flags.bits()); + } + pub fn address(&self) -> PhysAddr { + return strip_c_bit(PhysAddr::from(self.0.bits() & ADDRESS_BITS)); + } +} + +#[repr(C)] +#[derive(Debug)] +pub struct PageTablePage([PageTableEntry; PAGE_ENTRY_SIZE]); + +impl Default for PageTablePage { + fn default() -> Self { + return PageTablePage { + 0: [PageTableEntry::default(); PAGE_ENTRY_SIZE], + }; + } +} + +impl Index for PageTablePage { + type Output = PageTableEntry; + fn index(&self, index: usize) -> &PageTableEntry { + return &self.0[index]; + } +} + +impl IndexMut for PageTablePage { + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + return &mut self.0[index]; + } +} +#[repr(C)] +#[derive(Debug)] +pub struct PageTable(PageTablePage); + +impl PageTable { + + pub fn index(addr: VirtAddr) -> usize { + addr.bits() >> (12 + L * 9) & 0x1ff + } + +} + + +#[repr(C)] +#[derive(Debug)] +pub struct PageTableReference { + pub table: *mut PageTable, + pub table_phy: PhysAddr, + pub table_entry: TemporaryPageMapping, + pub mounted: bool, + pub pages: [PhysAddr; 256], + pub free_pages: [bool; 256], +} +#[derive(Clone, Copy, Debug)] +pub enum SchalError { + Allocation, +} + +#[derive(Debug)] +pub enum TableLevelMapping<'a> { + Level0(&'a mut PageTableEntry), + Level1(&'a mut PageTableEntry), + Level2(&'a mut PageTableEntry), + Level3(&'a mut PageTableEntry), +} + + +/* + +*/ +impl PageTableReference { + + pub fn init(&mut self, addr: PhysAddr, mem: &[PhysAddr]){ + for i in self.pages.iter_mut() { + *i = PhysAddr::from(0u64); + } + for i in self.free_pages.iter_mut() { + *i = false; + } + + for i in 0..mem.len() { + self.pages[i] = mem[i]; + self.free_pages[i] = true; + } + self.mounted = false; + self.table_phy = addr; + } + + pub fn mount(&mut self) { + if self.mounted { + return; + } + self.table_entry = TemporaryPageMapping::create_4k(self.table_phy).unwrap(); + self.table = self.table_entry.virt_addr().as_mut_ptr::(); + self.mounted = true; + } + pub fn unmount(&mut self) { + if !self.mounted { + return; + } + self.table_entry.delete(); + self.mounted = false; + } + + fn print_flags(addr: PhysAddr, tabs: &str) { + let v = addr.bits() as u64; + + let f = PageFlags::from_bits_truncate(v); + for e in f.iter_names() { + log::info!("{}{} set",tabs,e.0); + } + if v & get_ecryption_mask() as u64 != 0{ + log::info!("{}Encryption set",tabs) + } + + } + + fn dump_next_level(&self, pentry: &PageTableEntry,level: i32, tabs: &str, va: usize) { + if level == 0 { + log::info!("{}Address: {:#x}, Virtual Addres: {:#x}",tabs, pentry.0,va); + PageTableReference::print_flags(pentry.0, &(tabs.to_owned() + " ")); + return; + } + + if pentry.flags().contains(PageFlags::PRESENT){ + let m = TemporaryPageMapping::create_4k_clear12(pentry.0).expect(""); + let table = unsafe{ m.virt_addr().as_mut_ptr::().as_mut().unwrap() }; + for j in 0..table.0.len(){ + if table.0[j].0.bits() != 0 { + log::info!("{}Entry: Index {}, Address: {:#x}",tabs, j,table.0[j].0.bits() as u64 & 0x000f_ffff_ffff_f000u64); + PageTableReference::print_flags(table.0[j].0, &(tabs.to_owned() + " ")); + let tabel_entry = table.0[j]; + self.dump_next_level(&tabel_entry,level-1,&(tabs.to_owned() + " "), va + (j <<((12 + (level-1) * 9)))); + } + } + m.delete(); + } + + + } + + pub fn dump(&self) { + let table = unsafe {& (*self.table).0}; + for i in 0..table.0.len() { + if table.0[i].0.bits() != 0 { + log::info!("Entry: Index {}, Address: {:#x}", i,table.0[i].0.bits() as u64 & 0x000f_ffff_ffff_f000u64); + PageTableReference::print_flags(table.0[i].0, " "); + let tabel_entry = table.0[i]; + self.dump_next_level(&tabel_entry, 3, " ", i <<((12 + (3) * 9))); + } + } + + + } + + + + fn page_walk(table: &mut PageTablePage, addr: VirtAddr) -> (TableLevelMapping<'_>, TemporaryPageMapping) { + let index = PageTable::index::<3>(addr); + let table_entry = table[index]; + log::info!("Checking Entry: {:#x} (index: {})", table_entry.0, index); + + if !table_entry.flags().contains(PageFlags::PRESENT) { + log::info!("Entry {} in level 3 not found", index); + return (TableLevelMapping::Level3(&mut table[index]), Default::default()) + } + + let m3 = TemporaryPageMapping::create_4k_clear12(table_entry.0).expect(""); + + let table = unsafe{ m3.virt_addr().as_mut_ptr::().as_mut().unwrap() }; + + let index = PageTable::index::<2>(addr); + let table_entry = table[index]; + log::info!("Checking Entry: {:#x} (index: {})", table_entry.0, index); + if !table_entry.flags().contains(PageFlags::PRESENT) { + return (TableLevelMapping::Level2(&mut table[index]), m3) + } + + let m2 = TemporaryPageMapping::create_4k_clear12(table_entry.0).expect(""); + m3.delete(); + let table = unsafe{ m2.virt_addr().as_mut_ptr::().as_mut().unwrap() }; + + let index = PageTable::index::<1>(addr); + let table_entry = table[index]; + log::info!("Checking Entry: {:#x} (index: {})", table_entry.0, index); + if !table_entry.flags().contains(PageFlags::PRESENT) { + return (TableLevelMapping::Level1(&mut table[index]), m2) + } + + let m1 = TemporaryPageMapping::create_4k_clear12(table_entry.0).expect(""); + m2.delete(); + let table = unsafe{ m1.virt_addr().as_mut_ptr::().as_mut().unwrap() }; + + let index = PageTable::index::<0>(addr); + return (TableLevelMapping::Level0(&mut table[index]), m1) + + } + + + fn get_free_pages(&mut self) -> PhysAddr { + for i in 0..self.free_pages.len() { + if self.free_pages[i] { + self.free_pages[i] = false; + log::info!("New Page allocated: {:#x}", self.pages[i]); + return self.pages[i]; + } + } + return PhysAddr::from(0u64); + } + + pub fn page_walk_pub(&self, addr: VirtAddr) -> PhysAddr { + log::info!("Searching for entry {:#x}", addr.bits()); + let walk = PageTableReference::page_walk(unsafe {&mut (*self.table).0}, addr); + if let TableLevelMapping::Level0(page_entry) = walk.0 { + let ret = page_entry.0; + walk.1.delete(); + ret + } else { + PhysAddr::from(0u64) + } + + } + + pub fn map_4k_page(&mut self, target: VirtAddr, addr: PhysAddr, flags: PageFlags) -> Result<(), SchalError>{ + /* + First comes a walk along the table to find if we have a free slot we can use for the + current page + */ + log::info!("Trying to find empty space in page table"); + let walk = PageTableReference::page_walk(unsafe {&mut (*self.table).0}, target); + let mut current_mapping = walk.0; + let mut current_tmp = walk.1; + let mut finished = false; + log::info!("Allocating if needed"); + //let r = self as *mut PageTableReference; + let table_flages = PageFlags::PRESENT | PageFlags::WRITABLE | PageFlags::USER_ACCESSIBLE | PageFlags::ACCESSED; + while !finished { + match current_mapping { + TableLevelMapping::Level0(_) => { + finished = true; + log::info!("Finished creating new mappings"); + }, + TableLevelMapping::Level1(ref mut table_entry) => { + //let new_self = unsafe {&mut *r}; + log::info!("Created new level0 mapping"); + let page_addr = self.get_free_pages(); + + let new_tmp = TemporaryPageMapping::create_4k(page_addr).unwrap(); + table_entry.set(set_c_bit_in_address(page_addr), table_flages); + current_tmp.delete(); + current_tmp =new_tmp; + + let index = PageTable::index::<0>(target); + let e = unsafe { new_tmp.virt_addr().as_mut_ptr::().as_mut().unwrap() }; + current_mapping = TableLevelMapping::Level0(&mut e[index]); + + + }, + TableLevelMapping::Level2(ref mut table_entry) => { + //let new_self = unsafe {&mut *r}; + log::info!("Created new level1 mapping"); + let page_addr = self.get_free_pages(); + let new_tmp = TemporaryPageMapping::create_4k(page_addr).unwrap(); + table_entry.set(set_c_bit_in_address(page_addr), table_flages); + current_tmp.delete(); + current_tmp = new_tmp; + + let index = PageTable::index::<1>(target); + let e = unsafe { new_tmp.virt_addr().as_mut_ptr::().as_mut().unwrap() }; + current_mapping = TableLevelMapping::Level1(&mut e[index]); + + + }, + TableLevelMapping::Level3(ref mut table_entry) => { + //let new_self = unsafe {&mut *r}; + log::info!("Created new level2 mapping"); + let page_addr = self.get_free_pages(); + let new_tmp = TemporaryPageMapping::create_4k(page_addr).unwrap(); + table_entry.set(set_c_bit_in_address(page_addr), table_flages); + current_tmp.delete(); + current_tmp = new_tmp; + + let index = PageTable::index::<2>(target); + let e = unsafe { new_tmp.virt_addr().as_mut_ptr::().as_mut().unwrap() }; + current_mapping = TableLevelMapping::Level2(&mut e[index]); + + + }, + }; + }; + + if let TableLevelMapping::Level0(page_entry) = current_mapping { + log::info!("Adding address at {:#x}", target); + Ok(page_entry.set(set_c_bit_in_address(addr),flags)) + } else { + Err(SchalError::Allocation) + } + + } + + +} + + + +#[derive(Debug)] +pub struct Pointer { + pub pointer: *mut T +} + + +impl Deref for Pointer { + type Target = T; + + fn deref(&self) -> &Self::Target { + unsafe { &*self.pointer } + } +} + +impl DerefMut for Pointer { + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { &mut *self.pointer } + } +} \ No newline at end of file diff --git a/kernel/src/sp_pagetable/tmp_mapping.rs b/kernel/src/sp_pagetable/tmp_mapping.rs new file mode 100644 index 000000000..de39f8667 --- /dev/null +++ b/kernel/src/sp_pagetable/tmp_mapping.rs @@ -0,0 +1,104 @@ +use crate::cpu::percpu::this_cpu_mut; +use crate::cpu::tlb::flush_address_sync; +use crate::mm::pagetable::PTEntryFlags; +use crate::mm::virtualrange::{ + virt_alloc_range_2m, virt_alloc_range_4k, virt_free_range_2m, virt_free_range_4k, +}; +use crate::address::{Address, PhysAddr, VirtAddr}; +use crate::utils::MemoryRegion; +use crate::error::SvsmError; +use crate::types::{PageSize, PAGE_SIZE, PAGE_SIZE_2M}; +#[derive(Debug, Clone, Copy)] +pub struct TemporaryPageMapping { + mapping: MemoryRegion, + phy_add: PhysAddr, +} + +impl Default for TemporaryPageMapping { + fn default() -> Self { + TemporaryPageMapping { + mapping: MemoryRegion::new(VirtAddr::from(0u64),0), + phy_add: PhysAddr::from(0u64), + } + } +} + +impl TemporaryPageMapping { + pub fn create( + paddr_start: PhysAddr, + paddr_end: PhysAddr, + alignment: usize, + ) -> Result { + let align_mask = (PAGE_SIZE << alignment) - 1; + let size = paddr_end - paddr_start; + assert!((size & align_mask) == 0); + assert!((paddr_start.bits() & align_mask) == 0); + assert!((paddr_end.bits() & align_mask) == 0); + + let flags = PTEntryFlags::data(); + let huge = ((paddr_start.bits() & (PAGE_SIZE_2M - 1)) == 0) + && ((paddr_end.bits() & (PAGE_SIZE_2M - 1)) == 0); + let vaddr = if huge { + let vaddr = virt_alloc_range_2m(size, 0)?; + //let reg = MemoryRegion::::new(vaddr, size); + if let Err(e) = + this_cpu_mut() + .get_pgtable() + .map_region_2m(vaddr, paddr_start, flags) + { + virt_free_range_2m(vaddr); + return Err(e); + } + vaddr + } else { + let vaddr = virt_alloc_range_4k(size, 0)?; + //let reg = MemoryRegion::::new(vaddr, size); + if let Err(e) = + this_cpu_mut() + .get_pgtable() + .map_region_4k(vaddr, paddr_start, flags) + { + virt_free_range_4k(vaddr); + return Err(e); + } + vaddr + }; + + //let raw_mapping = MemoryRegion::::new(vaddr, size); + + Ok(TemporaryPageMapping { + mapping: vaddr, + phy_add: paddr_start, + }) + } + + pub fn create_4k(paddr: PhysAddr) -> Result { + Self::create(paddr, paddr + PAGE_SIZE, 0) + } + pub fn create_4k_clear12(paddr: PhysAddr) -> Result { + let paddr = PhysAddr::from(paddr.bits() as u64 & 0x000f_ffff_ffff_f000u64); + Self::create(paddr, paddr + PAGE_SIZE, 0) + } + + pub fn virt_addr(&self) -> VirtAddr { + self.mapping.start() + } + + pub fn remove(&self) { + let start = self.mapping.start(); + let end = self.mapping.end(); + //let size = self.mapping.len(); + + let reg = MemoryRegion::::from_addresses(start, end); + + this_cpu_mut().get_pgtable().unmap_region_4k(reg); + virt_free_range_4k(reg); + flush_address_sync(start); + } + pub fn delete(&self) { + if u64::from(self.phy_add) == 0 { + return; + } + self.remove(); + } +} \ No newline at end of file