From cd03ec94a06a704ccc78b4a7d3333dc901efca1f Mon Sep 17 00:00:00 2001 From: ClawSeven Date: Fri, 9 Jun 2023 19:10:43 +0800 Subject: [PATCH] Implement enclave memory management --- sgx_trts/Cargo.toml | 4 + sgx_trts/src/arch.rs | 16 +- sgx_trts/src/call/ocall.rs | 8 +- sgx_trts/src/edmm/epc.rs | 15 +- sgx_trts/src/edmm/mem.rs | 12 +- sgx_trts/src/edmm/mod.rs | 2 +- sgx_trts/src/edmm/perm.rs | 70 ++++++ sgx_trts/src/edmm/tcs.rs | 6 +- sgx_trts/src/emm/alloc.rs | 31 +++ sgx_trts/src/emm/bitmap.rs | 160 ++++++++++++ sgx_trts/src/emm/ema.rs | 454 +++++++++++++++++++++++++++++++++++ sgx_trts/src/emm/emalist.rs | 31 +++ sgx_trts/src/emm/flags.rs | 82 +++++++ sgx_trts/src/emm/interior.rs | 242 +++++++++++++++++++ sgx_trts/src/emm/mod.rs | 24 ++ sgx_trts/src/emm/user.rs | 96 ++++++++ sgx_trts/src/lib.rs | 3 + 17 files changed, 1240 insertions(+), 16 deletions(-) create mode 100644 sgx_trts/src/emm/alloc.rs create mode 100644 sgx_trts/src/emm/bitmap.rs create mode 100644 sgx_trts/src/emm/ema.rs create mode 100644 sgx_trts/src/emm/emalist.rs create mode 100644 sgx_trts/src/emm/flags.rs create mode 100644 sgx_trts/src/emm/interior.rs create mode 100644 sgx_trts/src/emm/mod.rs create mode 100644 sgx_trts/src/emm/user.rs diff --git a/sgx_trts/Cargo.toml b/sgx_trts/Cargo.toml index 82282e683..d1e2cb4e2 100644 --- a/sgx_trts/Cargo.toml +++ b/sgx_trts/Cargo.toml @@ -39,3 +39,7 @@ hyper = ["sgx_types/hyper"] sgx_types = { path = "../sgx_types" } sgx_crypto_sys = { path = "../sgx_crypto/sgx_crypto_sys" } sgx_tlibc_sys = { path = "../sgx_libc/sgx_tlibc_sys" } +intrusive-collections = "0.9.5" +buddy_system_allocator = "0.9.0" +spin = "0.9.4" +bitflags = "1.3" diff --git a/sgx_trts/src/arch.rs b/sgx_trts/src/arch.rs index c1d695a53..a6cb23e3c 100644 --- a/sgx_trts/src/arch.rs +++ b/sgx_trts/src/arch.rs @@ -37,12 +37,24 @@ macro_rules! is_page_aligned { }; } +macro_rules! round_to { + ($num:expr, $align:expr) => { + ($num + $align - 1) & (!($align - 1)) + }; +} + macro_rules! round_to_page { ($num:expr) => { ($num + crate::arch::SE_PAGE_SIZE - 1) & (!(crate::arch::SE_PAGE_SIZE - 1)) }; } +macro_rules! trim_to { + ($num:expr, $align:expr) => { + $num & (!($align - 1)) + }; +} + macro_rules! trim_to_page { ($num:expr) => { $num & (!(crate::arch::SE_PAGE_SIZE - 1)) @@ -670,8 +682,8 @@ impl From for SecinfoFlags { impl From for SecinfoFlags { fn from(data: PageInfo) -> SecinfoFlags { let typ = data.typ as u64; - let flags = data.flags.bits() as u64; - SecinfoFlags::from_bits_truncate((typ << 8) | flags) + let prot = data.prot.bits() as u64; + SecinfoFlags::from_bits_truncate((typ << 8) | prot) } } diff --git a/sgx_trts/src/call/ocall.rs b/sgx_trts/src/call/ocall.rs index ea1af72f7..bff4d6c51 100644 --- a/sgx_trts/src/call/ocall.rs +++ b/sgx_trts/src/call/ocall.rs @@ -34,11 +34,13 @@ pub enum OCallIndex { TrimCommit, Modpr, Mprotect, + Alloc, + Modify, } impl OCallIndex { pub fn is_builtin_index(index: i32) -> bool { - (-5..=-2).contains(&index) + (-7..=-2).contains(&index) } pub fn is_builtin(&self) -> bool { @@ -62,6 +64,8 @@ impl TryFrom for OCallIndex { -3 => Ok(OCallIndex::TrimCommit), -4 => Ok(OCallIndex::Modpr), -5 => Ok(OCallIndex::Mprotect), + -6 => Ok(OCallIndex::Alloc), + -7 => Ok(OCallIndex::Modify), _ => Err(u8::try_from(256_u16).unwrap_err()), } } @@ -76,6 +80,8 @@ impl From for i32 { OCallIndex::TrimCommit => -3, OCallIndex::Modpr => -4, OCallIndex::Mprotect => -5, + OCallIndex::Alloc => -6, + OCallIndex::Modify => -7, } } } diff --git a/sgx_trts/src/edmm/epc.rs b/sgx_trts/src/edmm/epc.rs index 446ecec8d..f204a3276 100644 --- a/sgx_trts/src/edmm/epc.rs +++ b/sgx_trts/src/edmm/epc.rs @@ -34,10 +34,13 @@ impl_enum! { } } +// ProtFlags may have richer meaning compared to ProtFlags +// ProtFlags and AllocFlags are confused to developer +// PageInfo->flags should change to PageInfo->prot impl_bitflags! { #[repr(C)] #[derive(Clone, Copy, Debug, Eq, PartialEq)] - pub struct PageFlags: u8 { + pub struct ProtFlags: u8 { const NONE = 0x00; const R = 0x01; const W = 0x02; @@ -51,7 +54,13 @@ impl_bitflags! { #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] pub struct PageInfo { pub typ: PageType, - pub flags: PageFlags, + pub prot: ProtFlags, +} + +impl Into for PageInfo { + fn into(self) -> u32 { + (Into::::into(self.typ) as u32) << 8 | (self.prot.bits() as u32) + } } unsafe impl ContiguousMemory for PageInfo {} @@ -106,7 +115,7 @@ impl PageRange { pub(crate) fn modify(&self) -> SgxResult { for page in self.iter() { let _ = page.modpe(); - if !page.info.flags.contains(PageFlags::W | PageFlags::X) { + if !page.info.prot.contains(ProtFlags::W | ProtFlags::X) { page.accept()?; } } diff --git a/sgx_trts/src/edmm/mem.rs b/sgx_trts/src/edmm/mem.rs index 0d6ac634d..18b1581d5 100644 --- a/sgx_trts/src/edmm/mem.rs +++ b/sgx_trts/src/edmm/mem.rs @@ -26,7 +26,7 @@ cfg_if! { #[cfg(not(any(feature = "sim", feature = "hyper")))] mod hw { use crate::arch::{self, Layout}; - use crate::edmm::epc::{PageFlags, PageInfo, PageRange, PageType}; + use crate::edmm::epc::{PageInfo, PageRange, PageType, ProtFlags}; use crate::edmm::layout::LayoutTable; use crate::edmm::perm; use crate::edmm::trim; @@ -47,7 +47,7 @@ mod hw { count, PageInfo { typ: PageType::Reg, - flags: PageFlags::R | PageFlags::W | PageFlags::PENDING, + prot: ProtFlags::R | ProtFlags::W | ProtFlags::PENDING, }, )?; if (attr.attr & arch::PAGE_DIR_GROW_DOWN) == 0 { @@ -74,7 +74,7 @@ mod hw { count, PageInfo { typ: PageType::Trim, - flags: PageFlags::MODIFIED, + prot: ProtFlags::MODIFIED, }, )?; pages.accept_forward()?; @@ -96,7 +96,7 @@ mod hw { count, PageInfo { typ: PageType::Reg, - flags: PageFlags::R | PageFlags::W | PageFlags::PENDING, + prot: ProtFlags::R | ProtFlags::W | ProtFlags::PENDING, }, )?; pages.accept_forward()?; @@ -131,7 +131,7 @@ mod hw { count, PageInfo { typ: PageType::Trim, - flags: PageFlags::MODIFIED, + prot: ProtFlags::MODIFIED, }, )?; pages.accept_forward()?; @@ -196,7 +196,7 @@ mod hw { count, PageInfo { typ: PageType::Reg, - flags: PageFlags::PR | PageFlags::from_bits_truncate(perm), + prot: ProtFlags::PR | ProtFlags::from_bits_truncate(perm), }, )?; diff --git a/sgx_trts/src/edmm/mod.rs b/sgx_trts/src/edmm/mod.rs index 420dbccb4..c54b036b0 100644 --- a/sgx_trts/src/edmm/mod.rs +++ b/sgx_trts/src/edmm/mod.rs @@ -24,6 +24,6 @@ pub(crate) mod tcs; #[cfg(not(any(feature = "sim", feature = "hyper")))] pub(crate) mod trim; -pub use epc::{PageFlags, PageInfo, PageRange, PageType}; +pub use epc::{PageInfo, PageRange, PageType, ProtFlags}; pub use mem::{apply_epc_pages, trim_epc_pages}; pub use perm::{modpr_ocall, mprotect_ocall}; diff --git a/sgx_trts/src/edmm/perm.rs b/sgx_trts/src/edmm/perm.rs index 4e17e67e5..31368064b 100644 --- a/sgx_trts/src/edmm/perm.rs +++ b/sgx_trts/src/edmm/perm.rs @@ -27,6 +27,8 @@ cfg_if! { mod hw { use crate::arch::SE_PAGE_SHIFT; use crate::call::{ocall, OCallIndex, OcAlloc}; + use crate::edmm::{PageInfo, PageType}; + use crate::emm::flags::AllocFlags; use alloc::boxed::Box; use core::convert::Into; use sgx_types::error::{SgxResult, SgxStatus}; @@ -67,6 +69,74 @@ mod hw { ocall(OCallIndex::Mprotect, Some(change.as_mut())) } + + // In keeping with Intel SDK, here we use the name page_properties, + // but page_type: PageType is more appropriate + #[repr(C)] + #[derive(Clone, Copy, Debug, Default)] + struct EmmAllocOcall { + retval: i32, + addr: usize, + size: usize, + page_properties: u32, + alloc_flags: u32, + } + + /// FIXME: fake alloc + pub fn alloc_ocall( + addr: usize, + length: usize, + page_type: PageType, + alloc_flags: AllocFlags, + ) -> SgxResult { + let mut change = Box::try_new_in( + EmmAllocOcall { + retval: 0, // not sure + addr, + size: length, + page_properties: Into::::into(page_type) as u32, + alloc_flags: alloc_flags.bits(), + }, + OcAlloc, + ) + .map_err(|_| SgxStatus::OutOfMemory)?; + + ocall(OCallIndex::Alloc, Some(change.as_mut())) + } + + // In keeping with Intel SDK, here we use the name flags_from (si_flags), + // but we rename si_flags to page_info, here info_from: PageInfo is more appropriate + #[repr(C)] + #[derive(Clone, Copy, Debug, Default)] + struct EmmModifyOcall { + retval: i32, + addr: usize, + size: usize, + flags_from: u32, + flags_to: u32, + } + + /// FIXME: fake modify + pub fn modify_ocall( + addr: usize, + length: usize, + info_from: PageInfo, + info_to: PageInfo, + ) -> SgxResult { + let mut change = Box::try_new_in( + EmmModifyOcall { + retval: 0, + addr, + size: length, + flags_from: Into::::into(info_from), + flags_to: Into::::into(info_to), + }, + OcAlloc, + ) + .map_err(|_| SgxStatus::OutOfMemory)?; + + ocall(OCallIndex::Modify, Some(change.as_mut())) + } } #[cfg(any(feature = "sim", feature = "hyper"))] diff --git a/sgx_trts/src/edmm/tcs.rs b/sgx_trts/src/edmm/tcs.rs index 271b94e24..e92d77be3 100644 --- a/sgx_trts/src/edmm/tcs.rs +++ b/sgx_trts/src/edmm/tcs.rs @@ -63,7 +63,7 @@ pub fn mktcs(mk_tcs: NonNull) -> SgxResult { #[cfg(not(any(feature = "sim", feature = "hyper")))] mod hw { use crate::arch::{self, Layout, Tcs}; - use crate::edmm::epc::{Page, PageFlags, PageInfo, PageType}; + use crate::edmm::epc::{Page, PageInfo, PageType, ProtFlags}; use crate::enclave::MmLayout; use crate::tcs::list; use core::ptr; @@ -123,7 +123,7 @@ mod hw { tcs.as_ptr() as usize, PageInfo { typ: PageType::Tcs, - flags: PageFlags::MODIFIED, + prot: ProtFlags::MODIFIED, }, )?; page.accept()?; @@ -175,7 +175,7 @@ mod hw { tcs.as_ptr() as usize, PageInfo { typ: PageType::Trim, - flags: PageFlags::MODIFIED, + prot: ProtFlags::MODIFIED, }, )?; page.accept()?; diff --git a/sgx_trts/src/emm/alloc.rs b/sgx_trts/src/emm/alloc.rs new file mode 100644 index 000000000..30e1d7008 --- /dev/null +++ b/sgx_trts/src/emm/alloc.rs @@ -0,0 +1,31 @@ +use core::alloc::{AllocError, Allocator, Layout}; +use core::ptr::NonNull; + +/// alloc layout memory from Reserve region +#[derive(Clone)] +pub struct ResAlloc; + +unsafe impl Allocator for ResAlloc { + fn allocate(&self, layout: Layout) -> Result, AllocError> { + todo!() + } + + #[inline] + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + todo!() + } +} + +#[derive(Clone)] +pub struct StaticAlloc; + +unsafe impl Allocator for StaticAlloc { + fn allocate(&self, layout: Layout) -> Result, AllocError> { + todo!() + } + + #[inline] + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + todo!() + } +} diff --git a/sgx_trts/src/emm/bitmap.rs b/sgx_trts/src/emm/bitmap.rs new file mode 100644 index 000000000..5a7e1e370 --- /dev/null +++ b/sgx_trts/src/emm/bitmap.rs @@ -0,0 +1,160 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License.. +use alloc::boxed::Box; +use alloc::vec; +use alloc::vec::Vec; +use core::alloc::Allocator; +use core::clone::Clone; +use sgx_types::error::SgxResult; +use sgx_types::error::SgxStatus; + +// box 能否 #[repr(C)] +#[derive(Clone)] +pub struct BitArray { + pub bits: usize, + pub bytes: usize, + pub data: Box<[u8], A>, // temporariy use ResAlloc + alloc: A, +} + +impl BitArray { + /// Init BitArray in Reserve memory with all zeros. + pub fn new_in(bits: usize, alloc: A) -> SgxResult { + let bytes = (bits + 7) / 8; + + // FIXME: return error if out of memory + let data: Box<[u8], A> = vec::from_elem_in(0_u8, bytes, alloc.clone()).into_boxed_slice(); + Ok(Self { + bits, + bytes, + data, + alloc, + }) + } + + // Get the value of the bit at a given index. + // todo: return SgxResult + pub fn get(&self, index: usize) -> bool { + let byte_index = index / 8; + let bit_index = index % 8; + let bit_mask = 1 << bit_index; + (self.data.get(byte_index).unwrap() & bit_mask) != 0 + } + + // Set the value of the bit at a given index. + pub fn set(&mut self, index: usize, value: bool) { + let byte_index = index / 8; + let bit_index = index % 8; + let bit_mask = 1 << bit_index; + + let data = self.data.as_mut(); + if value { + data[byte_index] |= bit_mask; + } else { + data[byte_index] &= !bit_mask; + } + } + + // return chunk range with all true, Vec<[start, end)> + pub fn true_range(&self) -> Vec<(usize, usize), A> { + let mut true_range: Vec<(usize, usize), A> = Vec::new_in(self.alloc.clone()); + + let start: usize = 0; + let end: usize = self.bits; + + // TODO: optimized with [u8] slice + while start < end { + let mut block_start = start; + while block_start < end { + if self.get(block_start) { + break; + } else { + block_start += 1; + } + } + + if block_start == end { + break; + } + + let mut block_end = block_start + 1; + while block_end < end { + if self.get(block_end) { + block_end += 1; + } else { + break; + } + } + true_range.push((start,end)); + } + + return true_range; + } + + /// Set the value of the bit at a given index. + /// The range includes [0, index). + pub fn set_until(&mut self, index: usize, value: bool) { + todo!() + } + + /// Set the value of the bit at a given index. + /// The range includes [0, index). + pub fn set_full(&mut self) { + self.data.fill(0xFF); + } + + /// Clear all the bits + pub fn clear(&mut self) { + self.data.fill(0); + } + + // split current bit array into left and right bit array + // return right bit array + pub fn split(&mut self, pos: usize) -> SgxResult> { + ensure!(pos > 0 && pos < self.bits, SgxStatus::InvalidParameter); + + let byte_index = pos / 8; + let bit_index = pos % 8; + + // let l_bits = (byte_index << 3) + bit_index; + let l_bits = pos; + let l_bytes = (l_bits + 7) / 8; + + let r_bits = self.bits - l_bits; + let r_bytes = (r_bits + 7) / 8; + + let mut r_array = Self::new_in(r_bits, self.alloc.clone())?; + + for (idx, item) in r_array.data[..(r_bytes - 1)].iter_mut().enumerate() { + // current byte index in previous bit_array + let curr_idx = idx + byte_index; + let low_bits = self.data[curr_idx] >> bit_index; + let high_bits = self.data[curr_idx + 1] << (8 - bit_index); + *item = high_bits | low_bits; + } + r_array.data[r_bytes - 1] = self.data[self.bytes - 1] >> bit_index; + + self.bits = l_bits; + self.bytes = l_bytes; + + return Ok(r_array); + } +} + + + +// FIXME: add more unit test \ No newline at end of file diff --git a/sgx_trts/src/emm/ema.rs b/sgx_trts/src/emm/ema.rs new file mode 100644 index 000000000..cee7e1516 --- /dev/null +++ b/sgx_trts/src/emm/ema.rs @@ -0,0 +1,454 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License.. + +use core::alloc::Allocator; + +use crate::arch::Secinfo; +use crate::arch::SecinfoFlags; +use crate::edmm::perm; +use crate::edmm::PageRange; +use crate::edmm::{PageInfo, PageType, ProtFlags}; +use crate::enclave::is_within_enclave; +use alloc::boxed::Box; +use intrusive_collections::intrusive_adapter; +use intrusive_collections::LinkedListLink; +use sgx_types::error::SgxResult; +use sgx_types::error::SgxStatus; + +use crate::feature::SysFeatures; +use crate::trts::Version; +use crate::veh::{ExceptionHandler, ExceptionInfo}; + +use super::alloc::ResAlloc; +use super::bitmap::BitArray; +use super::flags::AllocFlags; + +// pub struct Box(_, _) +// where +// A: Allocator, +// T: ?Sized; + +#[repr(C)] +#[derive(Clone)] +pub struct EMA +where + A: Allocator + Clone, +{ + // starting address, page aligned + start: usize, + // bytes, or page may be more available + length: usize, + alloc_flags: AllocFlags, + info: PageInfo, + // bitmap for EACCEPT status + eaccept_map: Option>, + // custom PF handler (for EACCEPTCOPY use) + handler: Option, + // private data for handler + priv_data: Option<*mut ExceptionInfo>, + alloc: A, + // intrusive linkedlist + link: LinkedListLink, +} + +impl EMA +where + A: Allocator + Clone, +{ + // start address must be page aligned + pub fn new( + start: usize, + length: usize, + alloc_flags: AllocFlags, + info: PageInfo, + handler: Option, + priv_data: Option<*mut ExceptionInfo>, + alloc: A, + ) -> SgxResult { + // check flags' eligibility + AllocFlags::try_from(alloc_flags.bits())?; + if start != 0 + && length != 0 + && is_within_enclave(start as *const u8, length) + && is_page_aligned!(start) + && (length % crate::arch::SE_PAGE_SIZE) == 0 + { + return Ok(Self { + start, + length, + alloc_flags, + info, + eaccept_map: None, + handler, + priv_data, + link: LinkedListLink::new(), + alloc, + }); + } else { + return Err(SgxStatus::InvalidParameter); + } + } + + // Returns a newly allocated ema in charging of the memory in the range [addr, len). + // After the call, the original ema will be left containing the elements [0, addr) + // with its previous capacity unchanged. + pub fn split(&mut self, addr: usize) -> SgxResult,A>> { + let l_start = self.start; + let l_length = addr - l_start; + + let r_start = addr; + let r_length = (self.start + self.length) - addr; + + let new_bitarray = match &mut self.eaccept_map{ + Some(bitarray) => { + let pos = (addr - self.start) >> crate::arch::SE_PAGE_SHIFT; + // split self.eaccept_map + Some(bitarray.split(pos)?) + } + None => { + None + } + }; + + // 这里之后可以优化 + // 1. self.clone() 会把原有的bitmap重新alloc并复制一份,但其实clone之后这里是None即可 + // 2. 使用Box::new_in 会把 self.clone() 这部分在栈上的数据再拷贝一份到Box新申请的内存区域 + let mut new_ema: Box,A> = Box::new_in( + self.clone(), + self.alloc.clone() + ); + + self.start = l_start; + self.length = l_length; + + new_ema.start = r_start; + new_ema.length = r_length; + new_ema.eaccept_map = new_bitarray; + + return Ok(new_ema); + } + + // If the previous ema is divided into three parts -> (left ema, middle ema, right ema), return (middle ema, right ema). + // If the previous ema is divided into two parts -> (left ema, right ema) + // end split: return (None, right ema), start split: return (left ema, None) + fn split_into_three(&mut self, start: usize, length: usize) -> SgxResult<(Option,A>>, Option,A>>)> { + if start > self.start { + let mut new_ema = self.split(start)?; + if new_ema.start + new_ema.length > start + length { + let r_ema = new_ema.split(start + length)?; + return Ok((Some(new_ema), Some(r_ema))); + } else { + return Ok((Some(new_ema), None)); + } + } else { + if self.start + self.length > start + length { + let new_ema = self.split(start + length)?; + return Ok((None, Some(new_ema))); + } else { + return Ok((None, None)); + } + } + } + + // 这里存在一个问题,如果是reserve ema node, 没有eaccept map怎么办 + /// Alloc the reserve / committed / vitual memory indeed + pub fn alloc(&mut self) -> SgxResult { + if self.alloc_flags.contains(AllocFlags::RESERVED) { + return Ok(()); + } + + // COMMIT_ON_DEMAND and COMMIT_NOW both need to mmap memory in urts + perm::alloc_ocall(self.start, self.length, self.info.typ, self.alloc_flags)?; + + if self.alloc_flags.contains(AllocFlags::COMMIT_NOW) { + let grow_up: bool = if self.alloc_flags.contains(AllocFlags::GROWSDOWN) { + false + } else { + true + }; + self.eaccept(self.start, self.length, grow_up)?; + // set eaccept map full + match &mut self.eaccept_map { + Some(map) => { + map.set_full(); + } + None => { + // COMMIT_NOW must have eaccept_map + return Err(SgxStatus::Unexpected); + } + } + } else { + // clear eaccept map + match &mut self.eaccept_map { + Some(map) => { + map.clear(); + } + None => { + // COMMIT_NOW must have eaccept_map + return Err(SgxStatus::Unexpected); + } + } + } + return Ok(()); + } + + /// do eaccept for targeted EPC page + /// similiar to "apply_epc_pages(addr: usize, count: usize)" / intel emm do_commit() + /// do not change eaccept map + fn eaccept(&self, start: usize, length: usize, grow_up: bool) -> SgxResult { + let info = PageInfo { + typ: self.info.typ, + prot: self.info.prot | ProtFlags::PENDING, + }; + + let pages = PageRange::new(start, length / crate::arch::SE_PAGE_SIZE, info)?; + + if grow_up { + pages.accept_backward() + } else { + pages.accept_forward() + } + } + + /// ema_do_commit + pub fn commit(&mut self, start: usize, length: usize) -> SgxResult { + ensure!( + length != 0 + && (length % crate::arch::SE_PAGE_SIZE) == 0 + && start >= self.start + && start + length <= self.start + self.length, + SgxStatus::InvalidParameter + ); + + let info = PageInfo { + typ: PageType::Reg, + prot: ProtFlags::R | ProtFlags::W | ProtFlags::PENDING, + }; + + let pages = PageRange::new(start, length / crate::arch::SE_PAGE_SIZE, info)?; + + // page index for parsing start address + let init_idx = (start - self.start) >> crate::arch::SE_PAGE_SHIFT; + let map = self.eaccept_map.as_mut().unwrap(); + + for (idx, page) in pages.iter().enumerate() { + let page_idx = idx + init_idx; + if map.get(page_idx) { + continue; + } else { + page.accept()?; + map.set(page_idx, true); + } + } + return Ok(()); + } + + /// uncommit EPC page + pub fn uncommit(&mut self, start: usize, length: usize, prot: ProtFlags) -> SgxResult { + // need READ for trimming + ensure!(self.info.prot != ProtFlags::NONE && self.eaccept_map.is_some(), + SgxStatus::InvalidParameter); + + if self.alloc_flags.contains(AllocFlags::RESERVED) { + return Ok(()); + } + + let trim_info = PageInfo { + typ: PageType::Trim, + prot: ProtFlags::MODIFIED, + }; + + let map = self.eaccept_map.as_mut().unwrap(); + let mut start = start; + let end: usize = start + length; + + // TODO: optimized with [u8] slice + while start < end { + let mut block_start = start; + while block_start < end { + let pos = (block_start - self.start) >> crate::arch::SE_PAGE_SHIFT; + if map.get(pos) { + break; + } else { + block_start += crate::arch::SE_PAGE_SIZE; + } + } + + if block_start == end { + break; + } + + let mut block_end = block_start + crate::arch::SE_PAGE_SIZE; + while block_end < end { + let pos = (block_end - self.start) >> crate::arch::SE_PAGE_SHIFT; + if map.get(pos) { + block_end += crate::arch::SE_PAGE_SIZE; + } else { + break; + } + } + + let block_length = block_end - block_start; + perm::modify_ocall(block_start, block_length, + PageInfo { + typ: self.info.typ, + prot, + }, + PageInfo { + typ: PageType::Trim, + prot, + }, + )?; + + let pages = PageRange::new( + block_start, + block_length / crate::arch::SE_PAGE_SIZE, + trim_info + )?; + + let init_idx = (block_start - self.start) >> crate::arch::SE_PAGE_SHIFT; + for (idx, page) in pages.iter().enumerate() { + page.accept()?; + let pos = idx + init_idx; + map.set(pos, false); + } + + // eaccept trim notify + perm::modify_ocall(block_start, block_length, + PageInfo { + typ: PageType::Trim, + prot, + }, + PageInfo { + typ: PageType::Trim, + prot, + }, + )?; + start = block_end; + } + Ok(()) + } + + pub fn modify_perm(&mut self, new_prot: ProtFlags) -> SgxResult { + if self.info.prot == new_prot { + return Ok(()); + } + + if SysFeatures::get().version() == Version::Sdk2_0 { + perm::modify_ocall( + self.start, + self.length, + self.info, + PageInfo { + typ: self.info.typ, + prot: new_prot, + }, + )?; + } + + let info = PageInfo { + typ: PageType::Reg, + prot: new_prot | ProtFlags::PR, + }; + + let pages = PageRange::new(self.start, self.length / crate::arch::SE_PAGE_SIZE, info)?; + + for page in pages.iter() { + // If new_prot is the subset of self.info.prot, no need to apply modpe. + // So we can't use new_prot != self.info.prot as determination + if (new_prot | self.info.prot) != self.info.prot { + page.modpe()?; + } + + // new permission is RWX, no EMODPR needed in untrusted part, hence no + // EACCEPT + if (new_prot & (ProtFlags::W | ProtFlags::X)) != (ProtFlags::W | ProtFlags::X) { + page.accept()?; + } + } + + self.info = PageInfo { + typ: self.info.typ, + prot: new_prot, + }; + + if new_prot == ProtFlags::NONE && SysFeatures::get().version() == Version::Sdk2_0 { + perm::modify_ocall( + self.start, + self.length, + PageInfo { + typ: self.info.typ, + prot: ProtFlags::NONE, + }, + PageInfo { + typ: self.info.typ, + prot: ProtFlags::NONE, + }, + )?; + } + + Ok(()) + } + + pub fn dealloc(&mut self) -> SgxResult { + if self.alloc_flags.contains(AllocFlags::RESERVED) { + return Ok(()); + } + + if self.info.prot == ProtFlags::NONE { + self.modify_perm(ProtFlags::R)?; + } + self.uncommit(self.start, self.length, ProtFlags::NONE)?; + Ok(()) + } + + pub fn aligned_end(&self, align: usize) -> usize { + let curr_end = self.start + self.length; + round_to!(curr_end, align) + } + + pub fn start(&self) -> usize { + self.start + } + + // get and set attributes + pub fn set_flags(flags: AllocFlags) -> SgxResult<()> { + todo!() + } + pub fn set_prot(info: PageInfo) -> SgxResult<()> { + todo!() + } + fn flags() -> AllocFlags { + todo!() + } + fn info(&self) -> PageInfo { + self.info + } + fn handler(&self) -> Option { + self.handler + } +} + +// +// intrusive_adapter!(pub RegEmaAda = Box, ResAlloc>: EMA { link: LinkedListLink }); + +// regular ema adapter +intrusive_adapter!(pub RegEmaAda = Box>: EMA { link: LinkedListLink }); + +// reserve ema adapter +intrusive_adapter!(pub ResEmaAda = Box>: EMA { link: LinkedListLink }); + diff --git a/sgx_trts/src/emm/emalist.rs b/sgx_trts/src/emm/emalist.rs new file mode 100644 index 000000000..de026ac83 --- /dev/null +++ b/sgx_trts/src/emm/emalist.rs @@ -0,0 +1,31 @@ +// emas: LinkedList, + + + + +// 其实ema list倒也不需要,我们可以有个init的user range +// pub struct EmaList { +// // intrusive linked list of reserve ema node for EMM +// emm: LinkedList, +// // intrusive linked list of regular ema node for User +// user: LinkedList, +// } + +// pub enum EmaType { +// Emm, +// User, +// } + +// impl EmaList { + +// pub fn new() -> Self { +// Self { +// emm: LinkedList::new(ResEmaAda::new()), +// user: LinkedList::new(RegEmaAda::new()), +// } +// } + +// pub fn emm_insert(&mut self, ema: Box, ResAlloc>) { + +// } +// } \ No newline at end of file diff --git a/sgx_trts/src/emm/flags.rs b/sgx_trts/src/emm/flags.rs new file mode 100644 index 000000000..c116667ca --- /dev/null +++ b/sgx_trts/src/emm/flags.rs @@ -0,0 +1,82 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License.. + +// 感觉这里可以优化一下的,因为EMA_RESERVED,EMA_COMMIT_NOW,EMA_COMMIT_ON_DEMAND其实是个enum。 +// 可以or | EMA_SYSTEM EMA_GROWSDOWN EMA_GROWSUP 组成的enum。 +use bitflags::bitflags; +use sgx_types::error::{SgxResult, SgxStatus}; + +bitflags! { + // 用bitflags的话,在ema输入的时候可能存在RESERVED & COMMIT_NOW 需要check一下 + pub struct AllocFlags: u32 { + const RESERVED = 0b0000_0001; + const COMMIT_NOW = 0b0000_0010; + const COMMIT_ON_DEMAND = 0b0000_0100; + const SYSTEM = 0b0001_0000; + const GROWSDOWN = 0x0010_0000; + const GROWSUP = 0x0100_0000; + } +} + +impl AllocFlags { + pub fn try_from(value: u32) -> SgxResult { + match value { + 0b0001_0001 => Ok(Self::RESERVED | Self::SYSTEM), + 0b0010_0001 => Ok(Self::RESERVED | Self::GROWSDOWN), + 0b0100_0001 => Ok(Self::RESERVED | Self::COMMIT_ON_DEMAND), + 0b0001_0010 => Ok(Self::COMMIT_NOW | Self::SYSTEM), + 0b0010_0010 => Ok(Self::COMMIT_NOW | Self::GROWSDOWN), + 0b0100_0010 => Ok(Self::COMMIT_NOW | Self::COMMIT_ON_DEMAND), + 0b0001_0100 => Ok(Self::COMMIT_ON_DEMAND | Self::SYSTEM), + 0b0010_0100 => Ok(Self::COMMIT_ON_DEMAND | Self::GROWSDOWN), + 0b0100_0100 => Ok(Self::COMMIT_ON_DEMAND | Self::COMMIT_ON_DEMAND), + _ => Err(SgxStatus::InvalidParameter), + } + } +} + +// bitflags! { +// #[derive(Default)] +// pub struct SiFlags: u32 { +// const NONE = 0; +// const READ = 1 << 0; +// const WRITE = 1 << 1; +// const EXEC = 1 << 2; +// const READ_WRITE = Self::READ.bits | Self::WRITE.bits; +// const READ_EXEC = Self::READ.bits | Self::EXEC.bits; +// const READ_WRITE_EXEC = Self::READ.bits | Self::WRITE.bits | Self::EXEC.bits; +// } +// } + +// bitflags! { +// #[derive(Default)] +// pub struct PageType: u32 { +// const NONE = 0; +// const REG = 1 << 0; +// const TCS = 1 << 1; +// const TRIM = 1 << 2; +// // 相比于sdk,少了一个va +// } +// } + +// #[derive(Clone)] +// // Memory protection info +// #[repr(C)] +// pub struct ProtInfo { +// pub si_flags: SiFlags, +// pub page_type: PageType, +// } diff --git a/sgx_trts/src/emm/interior.rs b/sgx_trts/src/emm/interior.rs new file mode 100644 index 000000000..daca9dd47 --- /dev/null +++ b/sgx_trts/src/emm/interior.rs @@ -0,0 +1,242 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License.. + +use buddy_system_allocator::LockedHeap; +use intrusive_collections::intrusive_adapter; +use intrusive_collections::{LinkedList, LinkedListLink}; + +use alloc::boxed::Box; +use core::alloc::Layout; +use core::ffi::c_void; +use core::mem::transmute; +use core::mem::MaybeUninit; +use core::ptr::NonNull; +use spin::{Mutex, Once}; + +use sgx_types::error::{SgxResult, SgxStatus}; +use sgx_types::types::ProtectPerm; + +use crate::emm::ema::EMA; +use crate::emm::user::{USER_RANGE, self, is_within_user_range}; +use crate::enclave::is_within_enclave; + +use super::ema::ResEmaAda; + +const STATIC_MEM_SIZE: usize = 65536; + +/// first level: static memory +static STATIC: LockedHeap<32> = LockedHeap::empty(); + +static mut STATIC_MEM: [u8; STATIC_MEM_SIZE] = [0; STATIC_MEM_SIZE]; + +pub fn init() { + unsafe { + STATIC + .lock() + .init(STATIC_MEM.as_ptr() as usize, STATIC_MEM_SIZE); + } +} + +/// second level: reserve memory +/// +static RES_ALLOCATOR: Once = Once::new(); + +pub fn init_res() { + // res_allocator需要在meta_allocator之后初始化 + RES_ALLOCATOR.call_once(|| { + Mutex::new(Reserve::new(1024)); + }); +} + +// mm_reserve +struct Chunk { + pub base: usize, + pub size: usize, + pub used: usize, + link: LinkedListLink, // intrusive linkedlist +} + +intrusive_adapter!(ChunkAda = Box: Chunk { link: LinkedListLink }); +// let linkedlist = LinkedList::new(ResChunk_Adapter::new()); + +// mm_reserve +struct Block { + size: usize, + link: LinkedListLink, // intrusive linkedlist +} +// 或许在某些情况里也不需要link。 + +intrusive_adapter!(BlockAda = Box: Block { link: LinkedListLink }); + +pub struct Reserve { + // 这些list是block list,每个block用于存放如 ema meta / bitmap meta / bitmap data + exact_blocks: [LinkedList; 256], + large_blocks: LinkedList, + + // chunks 这个结构体是存放于reserve EMA分配的reserve内存 + chunks: LinkedList, + emas: LinkedList, + + // statistics + allocated: usize, + total: usize, +} + +impl Reserve { + /// Create an empty heap + pub fn new(size: usize) -> Self { + // unsafe { + // self.add_reserve(size); + // } + let exact_blocks: [LinkedList; 256] = { + let mut exact_blocks: [MaybeUninit>; 256] = + MaybeUninit::uninit_array(); + for block in &mut exact_blocks { + block.write(LinkedList::new(BlockAda::new())); + } + unsafe { transmute(exact_blocks) } + }; + + Self { + exact_blocks, + large_blocks: LinkedList::new(BlockAda::new()), + chunks: LinkedList::new(ChunkAda::new()), + emas: LinkedList::new(ResEmaAda::new()), + allocated: 0, + total: 0, + } + } + pub fn alloc(&mut self, layout: Layout) -> Result, ()> { + // // 先check是否内存是否不够了,如果不够了就掉用add_reserve + // // 从空闲区域分配一块内存,这块内存头部有个block header,记录使用的bytes + // // 随后,把这块block链入对应链表 + // static threshold = 512*1024; // 0.5MB + // if self.allocated + layout.size() + threshold > self.total { + // self.add_reserve(2*threshold); + // } + + // // search available region + // if layout.size() < 256 { + // let exact_block_list = exact_blocks[layout.size()-1]; + // if !exact_block_list.is_empty() { + // let block: Box = exact_block_list.pop_front().unwrap(); + // let ptr = unsafe { + // block.as_mut_ptr() - mem::size_of::(); + // } + // let addr = std::ptr::NonNull::::new(ptr as *mut u8).unwrap(); + // return addr; + // } + // } else { + // // similar operation in large blocks + // } + + // // no available region in free blocks + // let chunk = self.chunks.iter().find( + // |&chunk| (chunk.size - chunk.used) > layout.size() + // ); + // if let chunk = Some(chunk) { + // let ptr = chunk.base + chunk.used; + // chunk.used += layout.size(); + // let addr = std::ptr::NonNull::::new(ptr as *mut u8).unwrap(); + // return addr; + // } else { + // // self.add_reserve + // // self.alloc() + // } + todo!() + } + pub fn dealloc(&mut self, ptr: NonNull, layout: Layout) { + // // 先通过ptr前面的block知道这个ptr的长度是多少 + // if size < 256 { + // // 将当前的ptr塞回队列 + // } else { + // // similar operation in large blocks + // } + todo!() + } + pub unsafe fn add_reserve(&mut self, size: usize) { + // // 分配一个EMA + // let reserve_ema: EMA = EMA::new(size); + // reserve_ema.alloc(size); + // self.emas.push(reserve_ema); + // // 将mm_res写入reserve_ema分配的EMA的首部 + // let chunk: ResChunk = ResChunk::new(); + // unsafe { + // let res_mem_ptr = reserve_ema.alloc().unwrap().as_mut_ptr(); + // std::ptr::write(res_mem_ptr as *mut ResChunk, chunk); + // let res_node = Box::from_raw(res_mem_ptr as *mut MM_Res ); + // // let new_mm_res = std::ptr::read(metadata_ptr as *const ResChunk); + // self.mm_reserve_list.push(res_node); + // } + todo!() + } + + // Find a free space of size at least 'size' bytes in reserve region, + // return the start address + fn find_free_region(&mut self, len: usize, align: usize) -> SgxResult { + let user_range = USER_RANGE.get().unwrap(); + let user_base = user_range.start; + let user_end = user_range.end; + + // no ema in list + if self.emas.is_empty() { + let mut addr = 0; + + if user_base >= len { + addr = trim_to!(user_base - len, align); + if is_within_enclave(addr as *const u8, len) { + return Ok(addr); + } + } else { + addr = round_to!(user_end, align); + if is_within_enclave(addr as *const u8, len) { + return Ok(addr); + } + } + return Err(SgxStatus::InvalidParameter); + } + + + let mut cursor = self.emas.cursor_mut(); + while !cursor.is_null() { + let curr_end = cursor.get() + .map(|ema| ema.aligned_end(align)).unwrap(); + + cursor.move_next(); + if cursor.is_null() { + break; + } + + let next_start = cursor.get() + .map(|ema| ema.start()).unwrap(); + + if curr_end < next_start { + let free_size = next_start - curr_end; + // 这里或许得用is_within_rts + if free_size < len && is_within_enclave(curr_end as *const u8, len){ + return Ok(curr_end); + } + } + cursor.move_next(); + } + + + todo!() + } +} + +const reserve_init_size: usize = 65536; diff --git a/sgx_trts/src/emm/mod.rs b/sgx_trts/src/emm/mod.rs new file mode 100644 index 000000000..23aab2511 --- /dev/null +++ b/sgx_trts/src/emm/mod.rs @@ -0,0 +1,24 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License.. + +pub(crate) mod alloc; +pub(crate) mod bitmap; +pub(crate) mod ema; +pub(crate) mod flags; +#[cfg(not(any(feature = "sim", feature = "hyper")))] +pub(crate) mod interior; +pub(crate) mod user; diff --git a/sgx_trts/src/emm/user.rs b/sgx_trts/src/emm/user.rs new file mode 100644 index 000000000..b8876744f --- /dev/null +++ b/sgx_trts/src/emm/user.rs @@ -0,0 +1,96 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License.. + +use super::ema::{RegEmaAda, EMA}; +use crate::emm::interior::Reserve; +use crate::enclave::MmLayout; +use alloc::boxed::Box; +use alloc::sync::Arc; +use spin::{Once, Mutex}; +use core::alloc::Layout; +use core::ffi::c_void; +use core::ptr::NonNull; +use intrusive_collections::intrusive_adapter; +use intrusive_collections::{LinkedList, LinkedListLink}; +use sgx_types::error::{SgxResult, SgxStatus}; + +#[derive(Clone, Copy)] +pub struct UserRange { + pub start: usize, + pub end: usize, +} + +pub static USER_RANGE: Once = Once::new(); + +pub fn init_range(start: usize, end: usize) { + // init + *USER_RANGE.call_once(|| { + UserRange { + start, + end, + } + }); +} + +pub fn is_within_user_range(start: usize, len: usize) -> bool { + let end = if len > 0 { + if let Some(end) = start.checked_add(len - 1) { + end + } else { + return false; + } + } else { + start + }; + let base = MmLayout::elrange_base(); + + (start <= end) && (start >= base) && (end < base + MmLayout::elrange_size()) +} + +pub struct UserMem { + emas: LinkedList, + + // statistics + allocated: usize, + total: usize, +} + +impl UserMem { + pub fn new() -> Self { + Self { + emas: LinkedList::new(RegEmaAda::new()), + allocated: 0, + total: 0, + } + } + // fn split(ema: Box) -> SgxResult<()>{ + // todo!() + // } + // fn merge(ema1: Box, ema2: Box) + // -> SgxResult<()> { + // todo!() + // } + pub fn alloc(&mut self, layout: Layout) -> Result, ()> { + todo!() + } + pub fn dealloc(&mut self, ptr: NonNull, layout: Layout) { + todo!() + } + pub fn commit(&mut self, layout: Layout) -> Result, ()> { + todo!() + } +} diff --git a/sgx_trts/src/lib.rs b/sgx_trts/src/lib.rs index 2a1babfc1..5c7c744bb 100644 --- a/sgx_trts/src/lib.rs +++ b/sgx_trts/src/lib.rs @@ -30,6 +30,8 @@ #![feature(nonnull_slice_from_raw_parts)] #![feature(ptr_internals)] #![feature(thread_local)] +#![feature(trait_alias)] +#![feature(new_uninit)] #![cfg_attr(feature = "sim", feature(unchecked_math))] #![allow(clippy::missing_safety_doc)] #![allow(dead_code)] @@ -61,6 +63,7 @@ mod xsave; pub mod capi; pub mod edmm; +pub mod emm; pub mod error; #[macro_use]