diff --git a/kernel/src/cpu/percpu.rs b/kernel/src/cpu/percpu.rs index 83aaee865..1e9d7657e 100644 --- a/kernel/src/cpu/percpu.rs +++ b/kernel/src/cpu/percpu.rs @@ -347,7 +347,7 @@ impl PerCpu { fn allocate_stack(&mut self, base: VirtAddr) -> Result { let stack = VMKernelStack::new()?; let top_of_stack = stack.top_of_stack(base); - let mapping = Arc::new(Mapping::new(stack)); + let mapping = Arc::new(Mapping::new(stack)?); self.vm_range.insert_at(base, mapping)?; @@ -406,7 +406,7 @@ impl PerCpu { let vaddr = VirtAddr::from(self as *const PerCpu); let paddr = virt_to_phys(vaddr); - let self_mapping = Arc::new(VMPhysMem::new_mapping(paddr, PAGE_SIZE, true)); + let self_mapping = Arc::new(VMPhysMem::new_mapping(paddr, PAGE_SIZE, true)?); self.vm_range.insert_at(SVSM_PERCPU_BASE, self_mapping)?; Ok(()) @@ -414,12 +414,12 @@ impl PerCpu { fn initialize_vm_ranges(&mut self) -> Result<(), SvsmError> { let size_4k = SVSM_PERCPU_TEMP_END_4K - SVSM_PERCPU_TEMP_BASE_4K; - let temp_mapping_4k = Arc::new(VMReserved::new_mapping(size_4k)); + let temp_mapping_4k = Arc::new(VMReserved::new_mapping(size_4k)?); self.vm_range .insert_at(SVSM_PERCPU_TEMP_BASE_4K, temp_mapping_4k)?; let size_2m = SVSM_PERCPU_TEMP_END_2M - SVSM_PERCPU_TEMP_BASE_2M; - let temp_mapping_2m = Arc::new(VMReserved::new_mapping(size_2m)); + let temp_mapping_2m = Arc::new(VMReserved::new_mapping(size_2m)?); self.vm_range .insert_at(SVSM_PERCPU_TEMP_BASE_2M, temp_mapping_2m)?; @@ -534,7 +534,7 @@ impl PerCpu { pub fn map_guest_vmsa(&self, paddr: PhysAddr) -> Result<(), SvsmError> { assert!(self.apic_id == this_cpu().get_apic_id()); - let vmsa_mapping = Arc::new(VMPhysMem::new_mapping(paddr, PAGE_SIZE, true)); + let vmsa_mapping = Arc::new(VMPhysMem::new_mapping(paddr, PAGE_SIZE, true)?); self.vm_range .insert_at(SVSM_PERCPU_VMSA_BASE, vmsa_mapping)?; @@ -565,7 +565,7 @@ impl PerCpu { pub fn map_guest_caa(&self, paddr: PhysAddr) -> Result<(), SvsmError> { self.unmap_caa(); - let caa_mapping = Arc::new(VMPhysMem::new_mapping(paddr, PAGE_SIZE, true)); + let caa_mapping = Arc::new(VMPhysMem::new_mapping(paddr, PAGE_SIZE, true)?); self.vm_range.insert_at(SVSM_PERCPU_CAA_BASE, caa_mapping)?; Ok(()) diff --git a/kernel/src/mm/vm/mapping/api.rs b/kernel/src/mm/vm/mapping/api.rs index f143daebb..98fbcb5eb 100644 --- a/kernel/src/mm/vm/mapping/api.rs +++ b/kernel/src/mm/vm/mapping/api.rs @@ -6,6 +6,7 @@ use crate::address::{PhysAddr, VirtAddr}; use crate::error::SvsmError; +use crate::globalbox_upcast; use crate::locking::{RWLock, ReadLockGuard, WriteLockGuard}; use crate::mm::pagetable::PTEntryFlags; use crate::mm::vm::VMR; @@ -22,7 +23,6 @@ use intrusive_collections::{ use core::ops::Range; extern crate alloc; -use alloc::boxed::Box; use alloc::sync::Arc; /// Information required to resolve a page fault within a virtual mapping @@ -144,27 +144,28 @@ pub trait VirtualMapping: fmt::Debug { #[derive(Debug)] pub struct Mapping { - mapping: RWLock>, + mapping: RWLock>, } unsafe impl Send for Mapping {} unsafe impl Sync for Mapping {} impl Mapping { - pub fn new(mapping: T) -> Self + pub fn new(mapping: T) -> Result where T: VirtualMapping + 'static, { - Mapping { - mapping: RWLock::new(Box::new(mapping)), - } + let boxed = globalbox_upcast!(GlobalBox::try_new(mapping)?, VirtualMapping); + Ok(Self { + mapping: RWLock::new(boxed), + }) } - pub fn get(&self) -> ReadLockGuard<'_, Box> { + pub fn get(&self) -> ReadLockGuard<'_, GlobalBox> { self.mapping.lock_read() } - pub fn get_mut(&self) -> WriteLockGuard<'_, Box> { + pub fn get_mut(&self) -> WriteLockGuard<'_, GlobalBox> { self.mapping.lock_write() } } @@ -232,11 +233,11 @@ impl VMM { ) } - pub fn get_mapping(&self) -> ReadLockGuard<'_, Box> { + pub fn get_mapping(&self) -> ReadLockGuard<'_, GlobalBox> { self.mapping.get() } - pub fn get_mapping_mut(&self) -> WriteLockGuard<'_, Box> { + pub fn get_mapping_mut(&self) -> WriteLockGuard<'_, GlobalBox> { self.mapping.get_mut() } diff --git a/kernel/src/mm/vm/mapping/file_mapping.rs b/kernel/src/mm/vm/mapping/file_mapping.rs index 9b3e33850..c32bde7b7 100644 --- a/kernel/src/mm/vm/mapping/file_mapping.rs +++ b/kernel/src/mm/vm/mapping/file_mapping.rs @@ -153,7 +153,7 @@ fn copy_page( ) -> Result<(), SvsmError> { let page_size = usize::from(page_size); let temp_map = VMPhysMem::new(paddr_dst, page_size, true); - let vaddr_new_page = vmr.insert(Arc::new(Mapping::new(temp_map)))?; + let vaddr_new_page = vmr.insert(Arc::new(Mapping::new(temp_map)?))?; let slice = unsafe { from_raw_parts_mut(vaddr_new_page.as_mut_ptr::(), page_size) }; file.seek(offset); file.read(slice)?; diff --git a/kernel/src/mm/vm/mapping/kernel_stack.rs b/kernel/src/mm/vm/mapping/kernel_stack.rs index bf7da012c..dfcad17a4 100644 --- a/kernel/src/mm/vm/mapping/kernel_stack.rs +++ b/kernel/src/mm/vm/mapping/kernel_stack.rs @@ -101,7 +101,7 @@ impl VMKernelStack { /// /// Initialized Mapping to stack on success, Err(SvsmError::Mem) on error pub fn new_mapping() -> Result { - Ok(Mapping::new(Self::new()?)) + Mapping::new(Self::new()?) } fn alloc_pages(&mut self) -> Result<(), SvsmError> { diff --git a/kernel/src/mm/vm/mapping/phys_mem.rs b/kernel/src/mm/vm/mapping/phys_mem.rs index a86017413..6680bd3e1 100644 --- a/kernel/src/mm/vm/mapping/phys_mem.rs +++ b/kernel/src/mm/vm/mapping/phys_mem.rs @@ -5,6 +5,7 @@ // Author: Joerg Roedel use crate::address::{Address, PhysAddr}; +use crate::error::SvsmError; use crate::mm::pagetable::PTEntryFlags; use super::{Mapping, VirtualMapping}; @@ -51,7 +52,7 @@ impl VMPhysMem { /// # Returns /// /// New [`Mapping`] containing [`VMPhysMem`] - pub fn new_mapping(base: PhysAddr, size: usize, writable: bool) -> Mapping { + pub fn new_mapping(base: PhysAddr, size: usize, writable: bool) -> Result { Mapping::new(Self::new(base, size, writable)) } } diff --git a/kernel/src/mm/vm/mapping/reserved.rs b/kernel/src/mm/vm/mapping/reserved.rs index 5bd9b298b..62441f349 100644 --- a/kernel/src/mm/vm/mapping/reserved.rs +++ b/kernel/src/mm/vm/mapping/reserved.rs @@ -5,6 +5,7 @@ // Author: Joerg Roedel use crate::address::PhysAddr; +use crate::error::SvsmError; use crate::mm::pagetable::PTEntryFlags; use super::{Mapping, VirtualMapping}; @@ -41,7 +42,7 @@ impl VMReserved { /// # Returns /// /// New Mapping of VMReserved - pub fn new_mapping(size: usize) -> Mapping { + pub fn new_mapping(size: usize) -> Result { Mapping::new(Self::new(size)) } } diff --git a/kernel/src/mm/vm/mapping/vmalloc.rs b/kernel/src/mm/vm/mapping/vmalloc.rs index 41a3090fb..a5ea5b8c8 100644 --- a/kernel/src/mm/vm/mapping/vmalloc.rs +++ b/kernel/src/mm/vm/mapping/vmalloc.rs @@ -49,7 +49,7 @@ impl VMalloc { /// /// New [`Mapping`] on success, Err(SvsmError::Mem) on error pub fn new_mapping(size: usize) -> Result { - Ok(Mapping::new(Self::new(size)?)) + Mapping::new(Self::new(size)?) } fn alloc_pages(&mut self) -> Result<(), SvsmError> { diff --git a/kernel/src/task/tasks.rs b/kernel/src/task/tasks.rs index adbd51c97..3e78917e2 100644 --- a/kernel/src/task/tasks.rs +++ b/kernel/src/task/tasks.rs @@ -266,7 +266,7 @@ impl Task { let stack = VMKernelStack::new()?; let bounds = stack.bounds(VirtAddr::from(0u64)); - let mapping = Arc::new(Mapping::new(stack)); + let mapping = Arc::new(Mapping::new(stack)?); let percpu_mapping = cpu.new_mapping(mapping.clone())?; // We need to setup a context on the stack that matches the stack layout