diff --git a/sgx_trts/src/emm/alloc.rs b/sgx_trts/src/emm/alloc.rs index f9368f0f..d8e67938 100644 --- a/sgx_trts/src/emm/alloc.rs +++ b/sgx_trts/src/emm/alloc.rs @@ -22,12 +22,13 @@ use intrusive_collections::singly_linked_list::{Link, SinglyLinkedList}; use intrusive_collections::UnsafeRef; use sgx_tlibc_sys::ENOMEM; +use crate::sync::SpinMutex as Mutex; use core::alloc::{AllocError, Allocator, Layout}; +use core::any::Any; use core::mem::size_of; -use core::mem::transmute; use core::mem::MaybeUninit; use core::ptr::NonNull; -use spin::{Mutex, Once}; +use spin::Once; use super::ema::EmaOptions; use super::page::AllocFlags; @@ -80,6 +81,10 @@ pub fn init_reserve_alloc() { RSRV_ALLOCATOR.call_once(|| Mutex::new(Reserve::new(INIT_MEM_SIZE))); } +pub trait EmmAllocator: Allocator + Any { + fn as_any(&self) -> &dyn Any; +} + /// AllocType layout memory from reserve memory region #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct RsrvAlloc; @@ -102,6 +107,12 @@ unsafe impl Allocator for RsrvAlloc { } } +impl EmmAllocator for RsrvAlloc { + fn as_any(&self) -> &dyn Any { + self + } +} + /// AllocType layout memory from static memory region #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct StaticAlloc; @@ -123,23 +134,29 @@ unsafe impl Allocator for StaticAlloc { } } +impl EmmAllocator for StaticAlloc { + fn as_any(&self) -> &dyn Any { + self + } +} + // Enum for allocator types #[derive(Clone, Copy, Debug, PartialEq, Eq)] #[repr(u8)] pub enum AllocType { - Static(StaticAlloc), - Reserve(RsrvAlloc), + Static, + Reserve, } impl AllocType { - pub fn new_static() -> Self { - Self::Static(StaticAlloc) - } - - pub fn new_rsrv() -> Self { - Self::Reserve(RsrvAlloc) + pub fn alloctor(&self) -> &'static dyn EmmAllocator { + match self { + AllocType::Static => &StaticAlloc, + AllocType::Reserve => &RsrvAlloc, + } } } + // Chunk manages memory range. // The Chunk structure is filled into the layout before the base pointer. #[derive(Debug)] @@ -252,7 +269,7 @@ impl Reserve { for block in &mut exact_blocks { block.write(SinglyLinkedList::new(BlockFreeAda::new())); } - unsafe { transmute(exact_blocks) } + unsafe { MaybeUninit::array_assume_init(exact_blocks) } }; let mut reserve = Self { @@ -478,7 +495,7 @@ impl Reserve { typ: PageType::None, prot: ProtFlags::NONE, }) - .alloc(AllocType::new_static()); + .alloc(AllocType::Static); let base = vmmgr.alloc(&options, RangeType::User)?; let mut options = EmaOptions::new( @@ -487,7 +504,7 @@ impl Reserve { AllocFlags::COMMIT_ON_DEMAND | AllocFlags::FIXED, ); - options.alloc(AllocType::new_static()); + options.alloc(AllocType::Static); let base = vmmgr.alloc(&options, RangeType::User)?; vmmgr.commit(base, rsize)?; diff --git a/sgx_trts/src/emm/bitmap.rs b/sgx_trts/src/emm/bitmap.rs index 0470cc6d..ed302f5d 100644 --- a/sgx_trts/src/emm/bitmap.rs +++ b/sgx_trts/src/emm/bitmap.rs @@ -17,12 +17,13 @@ use alloc::boxed::Box; use alloc::vec; -use core::alloc::Allocator; -use core::alloc::Layout; -use core::ptr::NonNull; use sgx_tlibc_sys::EACCES; use sgx_types::error::OsResult; +use crate::emm::alloc::EmmAllocator; +use crate::emm::alloc::RsrvAlloc; +use crate::emm::alloc::StaticAlloc; + use super::alloc::AllocType; const BYTE_SIZE: usize = 8; @@ -32,13 +33,11 @@ macro_rules! bytes_num { }; } -#[repr(C)] #[derive(Debug)] pub struct BitArray { bits: usize, bytes: usize, - data: *mut u8, - alloc: AllocType, + data: Box<[u8], &'static dyn EmmAllocator>, } impl BitArray { @@ -47,24 +46,9 @@ impl BitArray { let bytes = bytes_num!(bits); // FIXME: return error if OOM - let data = match alloc { - AllocType::Reserve(allocator) => { - // Set bits to all zeros - let data = vec::from_elem_in(0_u8, bytes, allocator).into_boxed_slice(); - Box::into_raw(data) as *mut u8 - } - AllocType::Static(allocator) => { - let data = vec::from_elem_in(0_u8, bytes, allocator).into_boxed_slice(); - Box::into_raw(data) as *mut u8 - } - }; - - Ok(Self { - bits, - bytes, - data, - alloc, - }) + let data = vec::from_elem_in(0_u8, bytes, alloc.alloctor()).into_boxed_slice(); + + Ok(Self { bits, bytes, data }) } /// Get the value of the bit at a given index @@ -76,8 +60,7 @@ impl BitArray { let byte_index = index / BYTE_SIZE; let bit_index = index % BYTE_SIZE; let bit_mask = 1 << bit_index; - let data = unsafe { core::slice::from_raw_parts_mut(self.data, self.bytes) }; - Ok((data.get(byte_index).unwrap() & bit_mask) != 0) + Ok((self.data.get(byte_index).unwrap() & bit_mask) != 0) } /// Check whether all bits are set true @@ -99,26 +82,33 @@ impl BitArray { let bit_index = index % BYTE_SIZE; let bit_mask = 1 << bit_index; - let data = unsafe { core::slice::from_raw_parts_mut(self.data, self.bytes) }; - if value { - data[byte_index] |= bit_mask; + self.data[byte_index] |= bit_mask; } else { - data[byte_index] &= !bit_mask; + self.data[byte_index] &= !bit_mask; } Ok(()) } /// Set all the bits to true pub fn set_full(&mut self) { - let data = unsafe { core::slice::from_raw_parts_mut(self.data, self.bytes) }; - data.fill(0xFF); + self.data.fill(0xFF); } /// Clear all the bits pub fn clear(&mut self) { - let data = unsafe { core::slice::from_raw_parts_mut(self.data, self.bytes) }; - data.fill(0); + self.data.fill(0); + } + + fn alloc_type(&self) -> AllocType { + let allocator = *Box::allocator(&self.data); + if allocator.as_any().downcast_ref::().is_some() { + AllocType::Reserve + } else if allocator.as_any().downcast_ref::().is_some() { + AllocType::Static + } else { + panic!() + } } /// Split current bit array at specified position, return a new allocated bit array @@ -136,11 +126,10 @@ impl BitArray { let rbits = self.bits - lbits; let rbytes = bytes_num!(rbits); - let rarray = Self::new(rbits, self.alloc)?; - - let rdata = unsafe { core::slice::from_raw_parts_mut(rarray.data, rarray.bytes) }; - let ldata = unsafe { core::slice::from_raw_parts_mut(self.data, self.bytes) }; + let mut rarray = Self::new(rbits, self.alloc_type())?; + let rdata = &mut rarray.data; + let ldata = &mut self.data; for (idx, item) in rdata[..(rbytes - 1)].iter_mut().enumerate() { // current byte index in previous bit_array let curr_idx = idx + byte_index; @@ -156,27 +145,3 @@ impl BitArray { Ok(rarray) } } - -impl Drop for BitArray { - fn drop(&mut self) { - match self.alloc { - AllocType::Reserve(allocator) => { - // Layout is redundant since interior allocator maintains the allocated size. - // Besides, if the bitmap is splitted, the recorded size - // in bitmap is not corresponding to allocated layout. - let fake_layout: Layout = Layout::new::(); - unsafe { - let data_ptr = NonNull::new_unchecked(self.data); - allocator.deallocate(data_ptr, fake_layout); - } - } - AllocType::Static(allocator) => { - let fake_layout: Layout = Layout::new::(); - unsafe { - let data_ptr = NonNull::new_unchecked(self.data); - allocator.deallocate(data_ptr, fake_layout); - } - } - } - } -} diff --git a/sgx_trts/src/emm/ema.rs b/sgx_trts/src/emm/ema.rs index 1d2f9bc8..bb36be17 100644 --- a/sgx_trts/src/emm/ema.rs +++ b/sgx_trts/src/emm/ema.rs @@ -19,12 +19,12 @@ use crate::arch::{SE_PAGE_SHIFT, SE_PAGE_SIZE}; use crate::emm::{PageInfo, PageRange, PageType, ProtFlags}; use crate::enclave::is_within_enclave; use alloc::boxed::Box; +use core::alloc::Allocator; use intrusive_collections::{intrusive_adapter, LinkedListLink, UnsafeRef}; use sgx_tlibc_sys::{c_void, EACCES, EFAULT, EINVAL}; use sgx_types::error::OsResult; use super::alloc::AllocType; -use super::alloc::{RsrvAlloc, StaticAlloc}; use super::bitmap::BitArray; use super::ocall; use super::page::AllocFlags; @@ -34,7 +34,6 @@ use super::pfhandler::PfHandler; /// /// Question: should we replace BitArray with pointer /// to split struct into two pieces of 80 bytes and 32 bytes or an entity of 104 bytes? -#[repr(C)] pub(crate) struct Ema { // page aligned start address start: usize, @@ -76,29 +75,7 @@ unsafe impl Sync for Ema {} impl Ema { /// Initialize Emanode with null eaccept map, /// and start address must be page aligned - pub fn new( - start: usize, - length: usize, - alloc_flags: AllocFlags, - info: PageInfo, - handler: Option, - priv_data: Option<*mut c_void>, - alloc: AllocType, - ) -> Self { - Self { - start, - length, - alloc_flags, - info, - eaccept_map: None, - handler, - priv_data, - link: LinkedListLink::new(), - alloc, - } - } - - pub fn new_options(options: &EmaOptions) -> OsResult { + pub fn new(options: &EmaOptions) -> OsResult { ensure!(options.addr.is_some(), EINVAL); Ok(Self { @@ -145,39 +122,15 @@ impl Ema { /// Employ same allocator to Clone Ema without eaccept map pub(crate) fn clone_ema(&self) -> UnsafeRef { - let ema: *mut Ema = match self.alloc { - AllocType::Reserve(allocator) => { - let ema = Box::new_in( - Ema::new( - self.start, - self.length, - self.alloc_flags, - self.info, - self.handler, - self.priv_data, - AllocType::new_rsrv(), - ), - allocator, - ); - Box::into_raw(ema) - } - AllocType::Static(allocator) => { - let ema = Box::new_in( - Ema::new( - self.start, - self.length, - self.alloc_flags, - self.info, - self.handler, - self.priv_data, - AllocType::new_static(), - ), - allocator, - ); - Box::into_raw(ema) - } - }; - unsafe { UnsafeRef::from_raw(ema) } + let mut ema_options = EmaOptions::new(Some(self.start), self.length, self.alloc_flags); + ema_options + .info(self.info) + .handle(self.handler, self.priv_data) + .alloc(self.alloc); + + let allocator = self.alloc.alloctor(); + let ema = Box::new_in(Ema::new(&ema_options).unwrap(), allocator); + unsafe { UnsafeRef::from_raw(Box::into_raw(ema)) } } /// Allocate the reserve / committed / virtual memory at corresponding memory @@ -581,16 +534,8 @@ impl Ema { } fn init_eaccept_map(&mut self) -> OsResult { - let eaccept_map = match self.alloc { - AllocType::Reserve(_allocator) => { - let page_num = self.length >> SE_PAGE_SHIFT; - BitArray::new(page_num, AllocType::new_rsrv())? - } - AllocType::Static(_allocator) => { - let page_num = self.length >> SE_PAGE_SHIFT; - BitArray::new(page_num, AllocType::new_static())? - } - }; + let page_num = self.length >> SE_PAGE_SHIFT; + let eaccept_map = BitArray::new(page_num, self.alloc)?; self.eaccept_map = Some(eaccept_map); Ok(()) } @@ -603,11 +548,15 @@ impl Ema { self.info = info; } - /// Obtain the allocator of ema - pub fn allocator(&self) -> AllocType { + pub fn alloc_type(&self) -> AllocType { self.alloc } + /// Obtain the allocator of ema + pub fn allocator(&self) -> &'static dyn Allocator { + self.alloc.alloctor() + } + pub fn flags(&self) -> AllocFlags { self.alloc_flags } @@ -634,7 +583,7 @@ impl EmaOptions { }, handler: None, priv_data: None, - alloc: AllocType::new_rsrv(), + alloc: AllocType::Reserve, } } @@ -700,16 +649,10 @@ impl EmaOptions { impl Ema { pub fn allocate(options: &EmaOptions, apply_now: bool) -> OsResult> { ensure!(options.addr.is_some(), EFAULT); - let mut new_ema = match options.alloc { - AllocType::Reserve(allocator) => { - let new_ema = Box::::new_in(Ema::new_options(options)?, allocator); - unsafe { UnsafeRef::from_raw(Box::into_raw(new_ema)) } - } - AllocType::Static(allocator) => { - let new_ema = - Box::::new_in(Ema::new_options(options)?, allocator); - unsafe { UnsafeRef::from_raw(Box::into_raw(new_ema)) } - } + let mut new_ema = { + let allocator = options.alloc.alloctor(); + let new_ema = Box::new_in(Ema::new(options)?, allocator); + unsafe { UnsafeRef::from_raw(Box::into_raw(new_ema)) } }; if apply_now { new_ema.alloc()?; diff --git a/sgx_trts/src/emm/init.rs b/sgx_trts/src/emm/init.rs index 0a8618a5..04d86136 100644 --- a/sgx_trts/src/emm/init.rs +++ b/sgx_trts/src/emm/init.rs @@ -24,13 +24,7 @@ pub fn init_emm() { init_reserve_alloc(); } -cfg_if! { - if #[cfg(not(any(feature = "sim", feature = "hyper")))] { - pub use hw::*; - } else { - pub use sw::*; - } -} +pub use hw::*; #[cfg(not(any(feature = "sim", feature = "hyper")))] mod hw { @@ -67,7 +61,7 @@ mod hw { step, )?; } - } else { + } else if layout.entry.id != arch::LAYOUT_ID_USER_REGION { build_rts_context_emas(&layout.entry, offset)?; } } @@ -76,10 +70,6 @@ mod hw { } fn build_rts_context_emas(entry: &LayoutEntry, offset: usize) -> SgxResult { - if entry.id == arch::LAYOUT_ID_USER_REGION { - return Ok(()); - } - let rva = offset + (entry.rva as usize); assert!(is_page_aligned!(rva)); @@ -226,29 +216,28 @@ mod hw { let text_relo = parse::has_text_relo()?; let base = MmLayout::image_base(); - for phdr in elf.program_iter() { - let typ = phdr.get_type().unwrap_or(Type::Null); - - if typ == Type::Load { - let mut perm = ProtFlags::R; - let start = base + trim_to_page!(phdr.virtual_addr() as usize); - let end = - base + round_to_page!(phdr.virtual_addr() as usize + phdr.mem_size() as usize); - - if phdr.flags().is_write() || text_relo { - perm |= ProtFlags::W; - } - if phdr.flags().is_execute() { - perm |= ProtFlags::X; - } - - let mut options = EmaOptions::new(Some(start), end - start, AllocFlags::SYSTEM); - options.info(PageInfo { - typ: PageType::Reg, - prot: perm, - }); - mm_init_static_region(&options).map_err(|_| SgxStatus::Unexpected)?; + for phdr in elf + .program_iter() + .filter(|phdr| phdr.get_type().unwrap_or(Type::Null) == Type::Load) + { + let mut perm = ProtFlags::R; + let start = base + trim_to_page!(phdr.virtual_addr() as usize); + let end = + base + round_to_page!(phdr.virtual_addr() as usize + phdr.mem_size() as usize); + + if phdr.flags().is_write() || text_relo { + perm |= ProtFlags::W; } + if phdr.flags().is_execute() { + perm |= ProtFlags::X; + } + + let mut options = EmaOptions::new(Some(start), end - start, AllocFlags::SYSTEM); + options.info(PageInfo { + typ: PageType::Reg, + prot: perm, + }); + mm_init_static_region(&options).map_err(|_| SgxStatus::Unexpected)?; } Ok(()) diff --git a/sgx_trts/src/emm/page.rs b/sgx_trts/src/emm/page.rs index be5ca13b..af08e7a9 100644 --- a/sgx_trts/src/emm/page.rs +++ b/sgx_trts/src/emm/page.rs @@ -18,13 +18,13 @@ use crate::arch::{SecInfo, SE_PAGE_SHIFT, SE_PAGE_SIZE}; use crate::enclave::is_within_enclave; use crate::inst::EncluInst; -use bitflags::bitflags; use core::num::NonZeroUsize; use sgx_tlibc_sys::{EFAULT, EINVAL}; use sgx_types::error::OsResult; use sgx_types::marker::ContiguousMemory; -bitflags! { +impl_bitflags! { + #[derive(Copy, Clone)] pub struct AllocFlags: u32 { const RESERVED = 0b0001; const COMMIT_NOW = 0b0010; diff --git a/sgx_trts/src/emm/tcs.rs b/sgx_trts/src/emm/tcs.rs index 4165e16b..b1dd19fa 100644 --- a/sgx_trts/src/emm/tcs.rs +++ b/sgx_trts/src/emm/tcs.rs @@ -63,8 +63,10 @@ pub fn mktcs(mk_tcs: NonNull) -> SgxResult { #[cfg(not(any(feature = "sim", feature = "hyper")))] mod hw { use crate::arch::{self, Layout, Tcs}; + use crate::emm::mm_dealloc; use crate::emm::page::PageType; use crate::emm::{mm_commit, mm_modify_type}; + use crate::enclave::state::{self, State}; use crate::enclave::MmLayout; use crate::tcs::list; use core::ptr; @@ -132,6 +134,21 @@ mod hw { Ok(()) } + #[inline] + pub fn trim_tcs(tcs: &Tcs) -> SgxResult { + let mut list_guard = list::TCS_LIST.lock(); + for tcs in list_guard.iter_mut().filter(|&t| !ptr::eq(t.as_ptr(), tcs)) { + let result = mm_dealloc(tcs.as_ptr() as usize, arch::SE_PAGE_SIZE); + if result.is_err() { + state::set_state(State::Crashed); + bail!(SgxStatus::Unexpected); + } + } + + list_guard.clear(); + Ok(()) + } + fn reentrant_add_static_tcs(table: &[Layout], offset: usize) -> SgxResult { let base = MmLayout::image_base(); unsafe { diff --git a/sgx_trts/src/emm/vmmgr.rs b/sgx_trts/src/emm/vmmgr.rs index 7722ac12..767d54b7 100644 --- a/sgx_trts/src/emm/vmmgr.rs +++ b/sgx_trts/src/emm/vmmgr.rs @@ -270,14 +270,9 @@ impl VmMgr { ema.dealloc()?; // Drop inner Ema inexplicitly - match ema.allocator() { - AllocType::Reserve(allocator) => { - let _ema_box = unsafe { Box::from_raw_in(UnsafeRef::into_raw(ema), allocator) }; - } - AllocType::Static(allocator) => { - let _ema_box = unsafe { Box::from_raw_in(UnsafeRef::into_raw(ema), allocator) }; - } - } + let allocator = ema.allocator(); + let _ema_box = unsafe { Box::from_raw_in(UnsafeRef::into_raw(ema), allocator) }; + ema_num -= 1; } Ok(()) @@ -314,7 +309,7 @@ impl VmMgr { while count != 0 { let ema = cursor.get().unwrap(); // Ema must be reserved and can not manage internal memory region - if !ema.flags().contains(AllocFlags::RESERVED) || ema.allocator() != alloc { + if !ema.flags().contains(AllocFlags::RESERVED) || ema.alloc_type() != alloc { return None; } cursor.move_next(); diff --git a/sgx_trts/src/enclave/mem.rs b/sgx_trts/src/enclave/mem.rs index 8d922810..e221cafe 100644 --- a/sgx_trts/src/enclave/mem.rs +++ b/sgx_trts/src/enclave/mem.rs @@ -407,6 +407,9 @@ impl UserRegionMem { } pub fn is_within_rts_range(start: usize, len: usize) -> bool { + if !is_within_enclave(start as *const u8, len) { + return false; + } let end = if len > 0 { if let Some(end) = start.checked_add(len - 1) { end @@ -424,6 +427,9 @@ pub fn is_within_rts_range(start: usize, len: usize) -> bool { } pub fn is_within_user_range(start: usize, len: usize) -> bool { + if !is_within_enclave(start as *const u8, len) { + return false; + } let end = if len > 0 { if let Some(end) = start.checked_add(len - 1) { end diff --git a/sgx_trts/src/enclave/uninit.rs b/sgx_trts/src/enclave/uninit.rs index 04e654f4..aaee2527 100644 --- a/sgx_trts/src/enclave/uninit.rs +++ b/sgx_trts/src/enclave/uninit.rs @@ -15,11 +15,10 @@ // specific language governing permissions and limitations // under the License.. -use crate::arch; -use crate::emm::mm_dealloc; +use crate::emm::tcs::trim_tcs; use crate::enclave::state::{self, State}; use crate::enclave::{atexit, parse}; -use crate::tcs::{list, ThreadControl}; +use crate::tcs::ThreadControl; use core::sync::atomic::AtomicBool; use sgx_types::error::SgxResult; @@ -82,16 +81,7 @@ pub fn rtuninit(tc: ThreadControl) -> SgxResult { #[cfg(not(any(feature = "sim", feature = "hyper")))] { if SysFeatures::get().is_edmm() { - let mut list_guard = list::TCS_LIST.lock(); - for tcs in list_guard.iter_mut().filter(|&t| !ptr::eq(t.as_ptr(), tcs)) { - let result = mm_dealloc(tcs.as_ptr() as usize, arch::SE_PAGE_SIZE); - if result.is_err() { - state::set_state(State::Crashed); - bail!(SgxStatus::Unexpected); - } - } - - list_guard.clear(); + trim_tcs(tcs)?; } } diff --git a/sgx_trts/src/lib.rs b/sgx_trts/src/lib.rs index 184bf2a5..d4bc51f7 100644 --- a/sgx_trts/src/lib.rs +++ b/sgx_trts/src/lib.rs @@ -39,6 +39,8 @@ #![feature(linked_list_cursors)] #![feature(strict_provenance)] #![feature(pointer_byte_offsets)] +#![feature(maybe_uninit_array_assume_init)] +#![feature(trait_upcasting)] #[cfg(all(feature = "sim", feature = "hyper"))] compile_error!("feature \"sim\" and feature \"hyper\" cannot be enabled at the same time");