From 7e2d047f9fa7e09e14e89a509ce30a305acbe474 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20L=C3=B3pez?= Date: Wed, 27 Dec 2023 12:22:14 +0100 Subject: [PATCH 1/9] alloc: initial TryBox implementation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce the alloc submodule and the TryBox type, a fork of the upstream alloc crate and Box type respectively. The new type behaves exactly like Box, except it only supports fallible allocations, meaning that users must always handle potential errors. Users must also explicitly set the allocator to use. Using a specific allocator by default like the standard library does is complicated. The standard library uses whichever allocator is set as global (via the #[global_allocator] macro), which can be accessed via a global instance (alloc::alloc::Global). However, the global instance is gated behind an unstable feature, and the macro cannot be reimplemented because it is a compiler intrinsic. As a workaround, one may add new higher level types in the future which use a specific allocator by default. The new type also has a few extra methods for convenience like try_default_in(), try_clone() and try_clone_in(). Signed-off-by: Carlos López --- kernel/src/alloc/boxed.rs | 918 +++++++++++++++++++++++++++++++++++++ kernel/src/alloc/mod.rs | 409 +++++++++++++++++ kernel/src/alloc/unique.rs | 195 ++++++++ kernel/src/lib.rs | 1 + 4 files changed, 1523 insertions(+) create mode 100644 kernel/src/alloc/boxed.rs create mode 100644 kernel/src/alloc/mod.rs create mode 100644 kernel/src/alloc/unique.rs diff --git a/kernel/src/alloc/boxed.rs b/kernel/src/alloc/boxed.rs new file mode 100644 index 000000000..fbbc543fc --- /dev/null +++ b/kernel/src/alloc/boxed.rs @@ -0,0 +1,918 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (C) 2023 SUSE +// +// Author: Carlos López + +//! The `TryBox` type for heap allocation. +//! +//! [`TryBox`], casually referred to as a 'box', provides the simplest form of +//! heap allocation in Rust. Boxes provide ownership for this allocation, and +//! drop their contents when they go out of scope. Boxes also ensure that they +//! never allocate more than `isize::MAX` bytes. +//! +//! This is a downstream version of `Box` with a stabilized allocator API, +//! supporting fallible allocations exclusively. + +use core::alloc::Layout; +use core::any::Any; +use core::borrow; +use core::cmp::Ordering; +use core::fmt; +use core::mem; +use core::ops::{Deref, DerefMut}; +use core::pin::Pin; +use core::ptr::{self, NonNull}; + +use super::unique::Unique; +use super::{Allocator, TryAllocError}; + +/// A pointer type that uniquely owns a heap allocation of type `T`, generic +/// over any given allocator, and supporting only fallible allocations. +/// +/// This is a downstream version of `Box` with a stabilized allocator API, +/// supporting fallible allocations exclusively. +pub struct TryBox(Unique, A); + +impl TryBox { + /// Allocates memory in the given allocator then places `x` into it, + /// returning an error if the allocation fails + /// + /// This doesn't actually allocate if `T` is zero-sized. + /// + /// # Examples + /// + /// ``` + /// # use svsm::alloc::boxed::TryBox; + /// use std::alloc::System; + /// + /// let five = TryBox::try_new_in(5, System)?; + /// # Ok::<(), svsm::alloc::TryAllocError>(()) + /// ``` + #[inline] + pub fn try_new_in(x: T, alloc: A) -> Result { + let mut boxed = Self::try_new_uninit_in(alloc)?; + unsafe { + boxed.as_mut_ptr().write(x); + Ok(boxed.assume_init()) + } + } + + /// Allocates memory in the given allocator then places `x` into it, + /// returning an error if the allocation fails + /// + /// This doesn't actually allocate if `T` is zero-sized. + /// + /// # Examples + /// + /// ``` + /// # use svsm::alloc::boxed::TryBox; + /// use std::alloc::System; + /// + /// let five = TryBox::try_new_in(5, System)?; + /// # Ok::<(), svsm::alloc::TryAllocError>(()) + /// ``` + pub fn try_new_uninit_in(alloc: A) -> Result, A>, TryAllocError> { + let ptr = if mem::size_of::() == 0 { + NonNull::dangling() + } else { + let layout = Layout::new::>(); + alloc.allocate(layout)?.cast() + }; + unsafe { Ok(TryBox::from_raw_in(ptr.as_ptr(), alloc)) } + } + + /// Constructs a new `TryBox` with uninitialized contents, with the memory + /// being filled with `0` bytes in the provided allocator. + /// + /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage + /// of this method. + /// + /// # Examples + /// + /// ``` + /// # use svsm::alloc::boxed::TryBox; + /// use std::alloc::System; + /// + /// let zero = TryBox::::try_new_zeroed_in(System)?; + /// let zero = unsafe { zero.assume_init() }; + /// + /// assert_eq!(*zero, 0); + /// # Ok::<(), svsm::alloc::TryAllocError>(()) + /// ``` + /// + /// [zeroed]: mem::MaybeUninit::zeroed + pub fn try_new_zeroed_in(alloc: A) -> Result, A>, TryAllocError> { + let ptr = if mem::size_of::() == 0 { + NonNull::dangling() + } else { + let layout = Layout::new::>(); + alloc.allocate_zeroed(layout)?.cast() + }; + unsafe { Ok(TryBox::from_raw_in(ptr.as_ptr(), alloc)) } + } + + /// Constructs a new `Pin>`. If `T` does not implement + /// [`Unpin`], then `x` will be pinned in memory and unable to be + /// moved. + /// + /// Constructing and pinning of the `TryBox` can also be done in two + /// steps: `TryBox::try_pin_in(x, alloc)` does the same as + /// [TryBox::into_pin]\([TryBox::try_new_in]\(x, alloc)?). + /// Consider using [`into_pin`](TryBox::into_pin) if you already have a + /// `TryBox`, or if you want to construct a (pinned) `TryBox` in + /// a different way than with [`TryBox::try_new_in`]. + pub fn try_pin_in(x: T, alloc: A) -> Result, TryAllocError> + where + A: 'static + Allocator, + { + let boxed = Self::try_new_in(x, alloc)?; + Ok(Self::into_pin(boxed)) + } + + pub fn into_boxed_slice(boxed: Self) -> TryBox<[T], A> { + let (raw, alloc) = TryBox::into_raw_with_allocator(boxed); + unsafe { TryBox::from_raw_in(raw as *mut [T; 1], alloc) } + } + + /// Consumes the `TryBox`, returning the wrapped value. + /// + /// # Examples + /// + /// ``` + /// # use svsm::alloc::boxed::TryBox; + /// + /// use std::alloc::{Layout, System}; + /// + /// let c = TryBox::try_new_in(5, System)?; + /// + /// assert_eq!(TryBox::into_inner(c), 5); + /// # Ok::<(), svsm::alloc::TryAllocError>(()) + /// ``` + #[inline] + pub fn into_inner(self) -> T { + unsafe { self.0.as_ptr().read() } + } +} + +impl TryBox, A> { + /// Converts to `TryBox`. + /// + /// # Safety + /// + /// As with [`MaybeUninit::assume_init`], + /// it is up to the caller to guarantee that the value + /// really is in an initialized state. + /// Calling this when the content is not yet fully initialized + /// causes immediate undefined behavior. + /// + /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init + /// + /// # Examples + /// + /// ``` + /// # use svsm::alloc::boxed::TryBox; + /// use std::alloc::System; + /// + /// let mut five = TryBox::::try_new_uninit_in(System)?; + /// + /// let five = unsafe { + /// // Deferred initialization: + /// five.as_mut_ptr().write(5); + /// + /// five.assume_init() + /// }; + /// + /// assert_eq!(*five, 5); + /// # Ok::<(), svsm::alloc::TryAllocError>(()) + /// ``` + #[inline] + pub unsafe fn assume_init(self) -> TryBox { + let (raw, alloc) = TryBox::into_raw_with_allocator(self); + unsafe { TryBox::from_raw_in(raw as *mut T, alloc) } + } + + /// Writes the value and converts to `TryBox`. + /// + /// This method converts the box similarly to [`TryBox::assume_init`] but + /// writes `value` into it before conversion thus guaranteeing safety. + /// In some scenarios use of this method may improve performance because + /// the compiler may be able to optimize copying from stack. + /// + /// # Examples + /// + /// ``` + /// # use svsm::alloc::boxed::TryBox; + /// use std::alloc::System; + /// + /// let big_box = TryBox::<[usize; 1024], _>::try_new_uninit_in(System)?; + /// + /// let mut array = [0; 1024]; + /// for (i, place) in array.iter_mut().enumerate() { + /// *place = i; + /// } + /// + /// // The optimizer may be able to elide this copy, so previous code writes + /// // to heap directly. + /// let big_box = TryBox::write(big_box, array); + /// + /// for (i, x) in big_box.iter().enumerate() { + /// assert_eq!(*x, i); + /// } + /// # Ok::<(), svsm::alloc::TryAllocError>(()) + /// ``` + #[inline] + pub fn write(mut boxed: Self, value: T) -> TryBox { + unsafe { + (*boxed).write(value); + boxed.assume_init() + } + } +} + +impl TryBox { + /// Constructs a box from a raw pointer in the given allocator. + /// + /// After calling this function, the raw pointer is owned by the + /// resulting `TryBox`. Specifically, the `TryBox` destructor will call + /// the destructor of `T` and free the allocated memory. For this + /// to be safe, the memory must have been allocated in accordance + /// with the memory layout used by `TryBox` . + /// + /// # Safety + /// + /// This function is unsafe because improper use may lead to + /// memory problems. For example, a double-free may occur if the + /// function is called twice on the same raw pointer. + /// + /// + /// # Examples + /// + /// Recreate a `TryBox` which was previously converted to a raw pointer + /// using [`TryBox::into_raw_with_allocator`]: + /// ``` + /// # use svsm::alloc::boxed::TryBox; + /// use std::alloc::System; + /// + /// let x = TryBox::try_new_in(5, System)?; + /// let (ptr, alloc) = TryBox::into_raw_with_allocator(x); + /// let x = unsafe { TryBox::from_raw_in(ptr, alloc) }; + /// # Ok::<(), svsm::alloc::TryAllocError>(()) + /// ``` + /// Manually create a `TryBox` from scratch by using the system allocator: + /// ``` + /// # use svsm::alloc::{boxed::TryBox, Allocator}; + /// use std::alloc::{Layout, System}; + /// + /// unsafe { + /// let ptr = System.allocate(Layout::new::())?.as_ptr() as *mut i32; + /// // In general .write is required to avoid attempting to destruct + /// // the (uninitialized) previous contents of `ptr`, though for this + /// // simple example `*ptr = 5` would have worked as well. + /// ptr.write(5); + /// let x = TryBox::from_raw_in(ptr, System); + /// } + /// # Ok::<(), svsm::alloc::TryAllocError>(()) + /// ``` + #[inline] + pub unsafe fn from_raw_in(raw: *mut T, alloc: A) -> Self { + Self(unsafe { Unique::new_unchecked(raw) }, alloc) + } + + /// Consumes the `TryBox`, returning a wrapped raw pointer. + /// + /// The pointer will be properly aligned and non-null. + /// + /// After calling this function, the caller is responsible for the + /// memory previously managed by the `TryBox`. In particular, the + /// caller should properly destroy `T` and release the memory, taking + /// into account the memory layout used by `TryBox`. The easiest way to + /// do this is to convert the raw pointer back into a `TryBox` with the + /// [`TryBox::from_raw_in`] function, allowing the `TryBox` destructor to perform + /// the cleanup. + /// + /// Note: this is an associated function, which means that you have + /// to call it as `TryBox::into_raw(b)` instead of `b.into_raw()`. This + /// is so that there is no conflict with a method on the inner type. + /// + /// # Examples + /// Converting the raw pointer back into a `TryBox` with [`TryBox::from_raw_in`] + /// for automatic cleanup: + /// ``` + /// # use svsm::alloc::boxed::TryBox; + /// use std::alloc::System; + /// + /// let x = TryBox::try_new_in(String::from("Hello"), System)?; + /// let ptr = TryBox::into_raw(x); + /// let x = unsafe { TryBox::from_raw_in(ptr, System) }; + /// # Ok::<(), svsm::alloc::TryAllocError>(()) + /// ``` + /// Manual cleanup by explicitly running the destructor and deallocating + /// the memory: + /// ``` + /// # use svsm::alloc::{boxed::TryBox, Allocator}; + /// use std::alloc::{Layout, System}; + /// use std::ptr::{self, NonNull}; + /// + /// let x = TryBox::try_new_in(String::from("Hello"), System)?; + /// let p = TryBox::into_raw(x); + /// unsafe { + /// ptr::drop_in_place(p); + /// let non_null = NonNull::new_unchecked(p); + /// System.deallocate(non_null.cast(), Layout::new::()); + /// } + /// # Ok::<(), svsm::alloc::TryAllocError>(()) + /// ``` + #[inline] + pub fn into_raw(b: Self) -> *mut T { + Self::into_raw_with_allocator(b).0 + } + + /// Consumes the `TryBox`, returning a wrapped raw pointer and the allocator. + /// + /// The pointer will be properly aligned and non-null. + /// + /// After calling this function, the caller is responsible for the + /// memory previously managed by the `TryBox`. In particular, the + /// caller should properly destroy `T` and release the memory, taking + /// into account the memory layout used by `TryBox`. The easiest way to + /// do this is to convert the raw pointer back into a `TryBox` with the + /// [`TryBox::from_raw_in`] function, allowing the `TryBox` destructor to perform + /// the cleanup. + /// + /// Note: this is an associated function, which means that you have + /// to call it as `TryBox::into_raw_with_allocator(b)` instead of `b.into_raw_with_allocator()`. This + /// is so that there is no conflict with a method on the inner type. + /// + /// # Examples + /// Converting the raw pointer back into a `TryBox` with [`TryBox::from_raw_in`] + /// for automatic cleanup: + /// ``` + /// # use svsm::alloc::boxed::TryBox; + /// use std::alloc::System; + /// + /// let x = TryBox::try_new_in(String::from("Hello"), System)?; + /// let (ptr, alloc) = TryBox::into_raw_with_allocator(x); + /// let x = unsafe { TryBox::from_raw_in(ptr, alloc) }; + /// # Ok::<(), svsm::alloc::TryAllocError>(()) + /// ``` + /// Manual cleanup by explicitly running the destructor and deallocating + /// the memory: + /// ``` + /// # use svsm::alloc::{boxed::TryBox, Allocator}; + /// + /// use std::alloc::{Layout, System}; + /// use std::ptr::{self, NonNull}; + /// + /// let x = TryBox::try_new_in(String::from("Hello"), System)?; + /// let (ptr, alloc) = TryBox::into_raw_with_allocator(x); + /// unsafe { + /// ptr::drop_in_place(ptr); + /// let non_null = NonNull::new_unchecked(ptr); + /// alloc.deallocate(non_null.cast(), Layout::new::()); + /// } + /// # Ok::<(), svsm::alloc::TryAllocError>(()) + /// ``` + #[inline] + pub fn into_raw_with_allocator(b: Self) -> (*mut T, A) { + let (leaked, alloc) = TryBox::into_unique(b); + (leaked.as_ptr(), alloc) + } + + #[inline] + pub(super) fn into_unique(b: Self) -> (Unique, A) { + // TryBox is recognized as a "unique pointer" by Stacked Borrows, but internally it is a + // raw pointer for the type system. Turning it directly into a raw pointer would not be + // recognized as "releasing" the unique pointer to permit aliased raw accesses, + // so all raw pointer methods have to go through `TryBox::leak`. Turning *that* to a raw pointer + // behaves correctly. + let alloc = unsafe { ptr::read(&b.1) }; + (Unique::from(Self::leak(b)), alloc) + } + + /// Returns a reference to the underlying allocator. + /// + /// Note: this is an associated function, which means that you have + /// to call it as `TryBox::allocator(&b)` instead of `b.allocator()`. This + /// is so that there is no conflict with a method on the inner type. + #[inline] + pub const fn allocator(b: &Self) -> &A { + &b.1 + } + + /// Consumes and leaks the `TryBox`, returning a mutable reference, + /// `&'a mut T`. Note that the type `T` must outlive the chosen lifetime + /// `'a`. If the type has only static references, or none at all, then this + /// may be chosen to be `'static`. + /// + /// This function is mainly useful for data that lives for the remainder of + /// the program's life. Dropping the returned reference will cause a memory + /// leak. If this is not acceptable, the reference should first be wrapped + /// with the [`TryBox::from_raw_in`] function producing a `TryBox`. This `TryBox` can + /// then be dropped which will properly destroy `T` and release the + /// allocated memory. + /// + /// Note: this is an associated function, which means that you have + /// to call it as `TryBox::leak(b)` instead of `b.leak()`. This + /// is so that there is no conflict with a method on the inner type. + /// + /// # Examples + /// + /// Simple usage: + /// + /// ``` + /// # use svsm::alloc::boxed::TryBox; + /// use std::alloc::System; + /// + /// let x = TryBox::try_new_in(41, System)?; + /// let static_ref: &'static mut usize = TryBox::leak(x); + /// *static_ref += 1; + /// assert_eq!(*static_ref, 42); + /// + /// // Deallocate + /// let x = unsafe { TryBox::from_raw_in(static_ref, System) }; + /// # Ok::<(), svsm::alloc::TryAllocError>(()) + /// ``` + /// + /// Unsized data: + /// + /// ``` + /// # use svsm::alloc::boxed::TryBox; + /// use std::alloc::System; + /// + /// let x = TryBox::into_boxed_slice(TryBox::try_new_in(41, System)?); + /// let static_ref = TryBox::leak(x); + /// static_ref[0] = 4; + /// assert_eq!(static_ref[0], 4); + /// + /// // Deallocate + /// let x = unsafe { TryBox::from_raw_in(static_ref, System) }; + /// # Ok::<(), svsm::alloc::TryAllocError>(()) + /// ``` + #[inline] + pub fn leak<'a>(b: Self) -> &'a mut T + where + A: 'a, + { + unsafe { &mut *mem::ManuallyDrop::new(b).0.as_ptr() } + } + + /// Converts a `TryBox` into a `Pin>`. If `T` does not implement [`Unpin`], then + /// `*boxed` will be pinned in memory and unable to be moved. + /// + /// This conversion does not allocate on the heap and happens in place. + /// + /// This is also available via [`From`]. + /// + /// Constructing and pinning a `TryBox` with TryBox::into_pin([TryBox::try_new_in]\(x, alloc)) + /// can also be written more concisely using [TryBox::try_pin_in]\(x, alloc). + /// This `into_pin` method is useful if you already have a `TryBox`, or you are + /// constructing a (pinned) `TryBox` in a different way than with [`TryBox::try_new_in`]. + /// + /// # Notes + /// + /// It's not recommended that crates add an impl like `From> for Pin`, + /// as it'll introduce an ambiguity when calling `Pin::from`. + pub fn into_pin(boxed: Self) -> Pin + where + A: 'static, + { + // It's not possible to move or replace the insides of a `Pin>` + // when `T: !Unpin`, so it's safe to pin it directly without any + // additional requirements. + unsafe { Pin::new_unchecked(boxed) } + } +} + +impl Drop for TryBox { + #[inline] + fn drop(&mut self) { + let ptr = self.0; + unsafe { + let layout = Layout::for_value(ptr.as_ref()); + ptr.as_ptr().drop_in_place(); + if layout.size() != 0 { + self.1.deallocate(From::from(ptr.cast()), layout); + } + } + } +} + +impl TryBox { + /// Allocates memory in the given allocator and places the default value + /// for `T` into it. + #[inline] + pub fn try_default_in(alloc: A) -> Result { + Self::try_new_in(T::default(), alloc) + } +} + +impl TryBox { + /// Returns a new `TryBox` with this box's contents. The new box is + /// allocated with this box's allocator. + pub fn try_clone(&self) -> Result { + let boxed = Self::try_new_uninit_in(self.1.clone())?; + Ok(TryBox::write(boxed, unsafe { self.0.as_ref().clone() })) + } +} + +impl TryBox { + /// Returns a new `TryBox` with this box's contents. The new box is + /// allocated with the given allocator. + pub fn try_clone_in(&self, alloc: A) -> Result { + let boxed = Self::try_new_uninit_in(alloc)?; + Ok(TryBox::write(boxed, unsafe { self.0.as_ref().clone() })) + } +} + +impl PartialEq for TryBox { + #[inline] + fn eq(&self, other: &Self) -> bool { + PartialEq::eq(&**self, &**other) + } +} + +impl PartialOrd for TryBox { + #[inline] + fn partial_cmp(&self, other: &Self) -> Option { + PartialOrd::partial_cmp(&**self, &**other) + } + #[inline] + fn lt(&self, other: &Self) -> bool { + PartialOrd::lt(&**self, &**other) + } + #[inline] + fn le(&self, other: &Self) -> bool { + PartialOrd::le(&**self, &**other) + } + #[inline] + fn ge(&self, other: &Self) -> bool { + PartialOrd::ge(&**self, &**other) + } + #[inline] + fn gt(&self, other: &Self) -> bool { + PartialOrd::gt(&**self, &**other) + } +} + +impl Ord for TryBox { + #[inline] + fn cmp(&self, other: &Self) -> Ordering { + Ord::cmp(&**self, &**other) + } +} + +impl Eq for TryBox {} + +impl From> for Pin> +where + A: 'static, +{ + /// Converts a `TryBox` into a `Pin>`. If `T` does not implement [`Unpin`], then + /// `*boxed` will be pinned in memory and unable to be moved. + /// + /// This conversion does not allocate on the heap and happens in place. + /// + /// This is also available via [`TryBox::into_pin`]. + /// + /// Constructing and pinning a `TryBox` with >>::from([TryBox::try_new_in]\(x, alloc)?) + /// can also be written more concisely using [TryBox::try_pin_in]\(x, alloc)?. + /// This `From` implementation is useful if you already have a `TryBox`, or you are + /// constructing a (pinned) `TryBox` in a different way than with [`TryBox::try_new_in`]. + fn from(boxed: TryBox) -> Self { + TryBox::into_pin(boxed) + } +} + +/// Upcast a [`TryBox`] to a `dyn trait` object. Normally this macro would not +/// be necessary, as trait coercion via [`CoerceUnsized`](core::ops::CoerceUnsized) +/// would transparently convert any `TryBox` to `TryBox`, +/// but since `CoerceUnsized` is not stable, we need an explicit macro. +/// +/// ``` +/// use std::alloc::System; +/// use svsm::alloc::boxed::TryBox; +/// use svsm::trybox_upcast; +/// +/// trait MyTrait {} +/// impl MyTrait for usize {} +/// +/// let boxed = TryBox::try_new_in(5usize, System)?; +/// let v: TryBox = trybox_upcast!(boxed, MyTrait); +/// # Ok::<(), svsm::alloc::TryAllocError>(()) +/// ``` +/// +/// Upcasting to a trait that `T` does not implement does not work: +/// +/// ```compile_fail +/// use std::alloc::System; +/// use svsm::trybox_upcast; +/// use svsm::alloc::boxed::TryBox; +/// +/// trait MyTrait {} +/// +/// let boxed = TryBox::try_new_in(5usize, System)?; +/// let v: TryBox = trybox_upcast!(boxed, MyTrait); +/// # Ok::<(), svsm::alloc::TryAllocError>(()) +/// ``` +#[macro_export] +macro_rules! trybox_upcast { + ($boxed:expr, $bound:tt $(+ $others:tt)*) => {{ + let (ptr, alloc) = TryBox::into_raw_with_allocator($boxed); + unsafe { TryBox::from_raw_in(ptr as *mut (dyn $bound $(+ $others)*), alloc) } + }} +} + +impl TryBox { + /// Attempt to downcast the box to a concrete type. + /// + /// # Examples + /// + /// ``` + /// use std::alloc::System; + /// use std::any::Any; + /// use svsm::alloc::{boxed::TryBox, Allocator}; + /// use svsm::trybox_upcast; + /// + /// fn print_if_string(value: TryBox) { + /// if let Ok(string) = value.downcast::() { + /// println!("String ({}): {}", string.len(), string); + /// } + /// } + /// + /// let my_string = "Hello World".to_string(); + /// print_if_string(trybox_upcast!(TryBox::try_new_in(my_string, System)?, Any)); + /// print_if_string(trybox_upcast!(TryBox::try_new_in(0i8, System)?, Any)); + /// # Ok::<(), svsm::alloc::TryAllocError>(()) + /// ``` + #[inline] + pub fn downcast(self) -> Result, Self> { + if self.is::() { + unsafe { Ok(self.downcast_unchecked::()) } + } else { + Err(self) + } + } + + /// Downcasts the box to a concrete type. + /// + /// For a safe alternative see [`downcast`]. + /// + /// # Examples + /// + /// ``` + /// use std::alloc::System; + /// use std::any::Any; + /// use svsm::alloc::boxed::TryBox; + /// use svsm::trybox_upcast; + /// + /// let x = trybox_upcast!(TryBox::try_new_in(1_usize, System)?, Any); + /// + /// unsafe { + /// assert_eq!(*x.downcast_unchecked::(), 1); + /// } + /// # Ok::<(), svsm::alloc::TryAllocError>(()) + /// ``` + /// + /// # Safety + /// + /// The contained value must be of type `T`. Calling this method + /// with the incorrect type is *undefined behavior*. + /// + /// [`downcast`]: Self::downcast + #[inline] + pub unsafe fn downcast_unchecked(self) -> TryBox { + debug_assert!(self.is::()); + unsafe { + let (raw, alloc): (*mut dyn Any, _) = TryBox::into_raw_with_allocator(self); + TryBox::from_raw_in(raw as *mut T, alloc) + } + } +} + +impl TryBox { + /// Attempt to downcast the box to a concrete type. + /// + /// # Examples + /// + /// ``` + /// use std::alloc::System; + /// use std::any::Any; + /// use svsm::alloc::boxed::TryBox; + /// use svsm::trybox_upcast; + /// + /// fn print_if_string(value: TryBox) { + /// if let Ok(string) = value.downcast::() { + /// println!("String ({}): {}", string.len(), string); + /// } + /// } + /// + /// let my_string = "Hello World".to_string(); + /// print_if_string(trybox_upcast!( + /// TryBox::try_new_in(my_string, System)?, + /// Any + Send + /// )); + /// print_if_string(trybox_upcast!(TryBox::try_new_in(0i8, System)?, Any + Send)); + /// # Ok::<(), svsm::alloc::TryAllocError>(()) + /// ``` + #[inline] + pub fn downcast(self) -> Result, Self> { + if self.is::() { + unsafe { Ok(self.downcast_unchecked::()) } + } else { + Err(self) + } + } + + /// Downcasts the box to a concrete type. + /// + /// For a safe alternative see [`downcast`]. + /// + /// # Examples + /// + /// ``` + /// use std::alloc::System; + /// use std::any::Any; + /// use svsm::alloc::boxed::TryBox; + /// use svsm::trybox_upcast; + /// + /// let x = trybox_upcast!(TryBox::try_new_in(1_usize, System)?, Any + Send); + /// + /// unsafe { + /// assert_eq!(*x.downcast_unchecked::(), 1); + /// } + /// # Ok::<(), svsm::alloc::TryAllocError>(()) + /// ``` + /// + /// # Safety + /// + /// The contained value must be of type `T`. Calling this method + /// with the incorrect type is *undefined behavior*. + /// + /// [`downcast`]: Self::downcast + #[inline] + pub unsafe fn downcast_unchecked(self) -> TryBox { + debug_assert!(self.is::()); + unsafe { + let (raw, alloc): (*mut (dyn Any + Send), _) = TryBox::into_raw_with_allocator(self); + TryBox::from_raw_in(raw as *mut T, alloc) + } + } +} + +impl TryBox { + /// Attempt to downcast the box to a concrete type. + /// + /// # Examples + /// + /// ``` + /// use std::alloc::System; + /// use std::any::Any; + /// use svsm::alloc::boxed::TryBox; + /// use svsm::trybox_upcast; + /// + /// fn print_if_string(value: TryBox) { + /// if let Ok(string) = value.downcast::() { + /// println!("String ({}): {}", string.len(), string); + /// } + /// } + /// + /// let my_string = "Hello World".to_string(); + /// print_if_string(trybox_upcast!( + /// TryBox::try_new_in(my_string, System)?, + /// Any + Send + Sync + /// )); + /// print_if_string(trybox_upcast!( + /// TryBox::try_new_in(0i8, System)?, + /// Any + Send + Sync + /// )); + /// # Ok::<(), svsm::alloc::TryAllocError>(()) + /// ``` + #[inline] + pub fn downcast(self) -> Result, Self> { + if self.is::() { + unsafe { Ok(self.downcast_unchecked::()) } + } else { + Err(self) + } + } + + /// Downcasts the box to a concrete type. + /// + /// For a safe alternative see [`downcast`]. + /// + /// # Examples + /// + /// ``` + /// use std::alloc::System; + /// use std::any::Any; + /// use svsm::alloc::boxed::TryBox; + /// use svsm::trybox_upcast; + /// + /// let x = trybox_upcast!(TryBox::try_new_in(1_usize, System)?, Any + Send + Sync); + /// + /// unsafe { + /// assert_eq!(*x.downcast_unchecked::(), 1); + /// } + /// # Ok::<(), svsm::alloc::TryAllocError>(()) + /// ``` + /// + /// # Safety + /// + /// The contained value must be of type `T`. Calling this method + /// with the incorrect type is *undefined behavior*. + /// + /// [`downcast`]: Self::downcast + #[inline] + pub unsafe fn downcast_unchecked(self) -> TryBox { + debug_assert!(self.is::()); + unsafe { + let (raw, alloc): (*mut (dyn Any + Send + Sync), _) = + TryBox::into_raw_with_allocator(self); + TryBox::from_raw_in(raw as *mut T, alloc) + } + } +} + +impl fmt::Display for TryBox { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&**self, f) + } +} + +impl fmt::Debug for TryBox { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +impl fmt::Pointer for TryBox { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // It's not possible to extract the inner Uniq directly from the Box, + // instead we cast it to a *const which aliases the Unique + let ptr: *const T = &**self; + fmt::Pointer::fmt(&ptr, f) + } +} + +impl Deref for TryBox { + type Target = T; + + fn deref(&self) -> &T { + unsafe { &*self.0.as_ptr() } + } +} + +impl DerefMut for TryBox { + fn deref_mut(&mut self) -> &mut T { + unsafe { &mut *self.0.as_ptr() } + } +} + +impl borrow::Borrow for TryBox { + fn borrow(&self) -> &T { + unsafe { &*self.0.as_ptr() } + } +} + +impl borrow::BorrowMut for TryBox { + fn borrow_mut(&mut self) -> &mut T { + unsafe { &mut *self.0.as_ptr() } + } +} + +impl AsRef for TryBox { + fn as_ref(&self) -> &T { + unsafe { &*self.0.as_ptr() } + } +} + +impl AsMut for TryBox { + fn as_mut(&mut self) -> &mut T { + unsafe { &mut *self.0.as_ptr() } + } +} + +/* Nota bene + * + * We could have chosen not to add this impl, and instead have written a + * function of Pin> to Pin. Such a function would not be sound, + * because Box implements Unpin even when T does not, as a result of + * this impl. + * + * We chose this API instead of the alternative for a few reasons: + * - Logically, it is helpful to understand pinning in regard to the + * memory region being pointed to. For this reason none of the + * standard library pointer types support projecting through a pin + * (Box is the only pointer type in std for which this would be + * safe.) + * - It is in practice very useful to have Box be unconditionally + * Unpin because of trait objects, for which the structural auto + * trait functionality does not apply (e.g., Box would + * otherwise not be Unpin). + * + * Another type with the same semantics as Box but only a conditional + * implementation of `Unpin` (where `T: Unpin`) would be valid/safe, and + * could have a method to project a Pin from it. + */ +impl Unpin for TryBox where A: 'static {} diff --git a/kernel/src/alloc/mod.rs b/kernel/src/alloc/mod.rs new file mode 100644 index 000000000..1f2b273a7 --- /dev/null +++ b/kernel/src/alloc/mod.rs @@ -0,0 +1,409 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (C) 2023 SUSE +// +// Author: Carlos López + +//! An adapted version of the upstream Rust alloc crate with a stabilized allocator API. + +use core::alloc::{Layout, LayoutError}; +use core::ptr::{self, NonNull}; + +pub mod boxed; +mod unique; + +/// A stable version of [`AllocError`](core::alloc::AllocError). +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum TryAllocError { + OutOfMemory, + ZeroSized, + Layout(LayoutError), + CapacityOverflow, + Internal, +} + +impl From for TryAllocError { + fn from(err: LayoutError) -> Self { + Self::Layout(err) + } +} + +/// A stable version of the [`Allocator`](core::alloc::Allocator) trait. +/// +/// An implementation of `Allocator` can allocate, grow, shrink, and deallocate arbitrary blocks of +/// data described via [`Layout`][]. +/// +/// `Allocator` is designed to be implemented on ZSTs, references, or smart pointers because having +/// an allocator like `MyAlloc([u8; N])` cannot be moved, without updating the pointers to the +/// allocated memory. +/// +/// Unlike [`GlobalAlloc`], zero-sized allocations are allowed in `Allocator`. If an underlying +/// allocator does not support this (like jemalloc) or return a null pointer (such as +/// `libc::malloc`), this must be caught by the implementation. +/// +/// ### Currently allocated memory +/// +/// Some of the methods require that a memory block be *currently allocated* via an allocator. This +/// means that: +/// +/// * the starting address for that memory block was previously returned by [`allocate`], [`grow`], or +/// [`shrink`], and +/// +/// * the memory block has not been subsequently deallocated, where blocks are either deallocated +/// directly by being passed to [`deallocate`] or were changed by being passed to [`grow`] or +/// [`shrink`] that returns `Ok`. If `grow` or `shrink` have returned `Err`, the passed pointer +/// remains valid. +/// +/// [`allocate`]: Allocator::allocate +/// [`grow`]: Allocator::grow +/// [`shrink`]: Allocator::shrink +/// [`deallocate`]: Allocator::deallocate +/// [`GlobalAlloc`]: core::alloc::GlobalAlloc +/// +/// ### Memory fitting +/// +/// Some of the methods require that a layout *fit* a memory block. What it means for a layout to +/// "fit" a memory block means (or equivalently, for a memory block to "fit" a layout) is that the +/// following conditions must hold: +/// +/// * The block must be allocated with the same alignment as [`layout.align()`], and +/// +/// * The provided [`layout.size()`] must fall in the range `min ..= max`, where: +/// - `min` is the size of the layout most recently used to allocate the block, and +/// - `max` is the latest actual size returned from [`allocate`], [`grow`], or [`shrink`]. +/// +/// [`layout.align()`]: Layout::align +/// [`layout.size()`]: Layout::size +/// +/// # Safety +/// +/// * Memory blocks returned from an allocator that are [*currently allocated*] must point to +/// valid memory and retain their validity while they are [*currently allocated*] and at +/// least one of the instance and all of its clones has not been dropped. +/// +/// * copying, cloning, or moving the allocator must not invalidate memory blocks returned from this +/// allocator. A copied or cloned allocator must behave like the same allocator, and +/// +/// * any pointer to a memory block which is [*currently allocated*] may be passed to any other +/// method of the allocator. +/// +/// [*currently allocated*]: #currently-allocated-memory +pub unsafe trait Allocator { + /// Attempts to allocate a block of memory. + /// + /// On success, returns a [`NonNull<[u8]>`][NonNull] meeting the size and alignment guarantees of `layout`. + /// + /// The returned block may have a larger size than specified by `layout.size()`, and may or may + /// not have its contents initialized. + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or `layout` does not meet + /// allocator's size or alignment constraints. + /// + /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or + /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement + /// this trait atop an underlying native allocation library that aborts on memory exhaustion.) + fn allocate(&self, layout: Layout) -> Result, TryAllocError>; + + /// Behaves like `allocate`, but also ensures that the returned memory is zero-initialized. + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or `layout` does not meet + /// allocator's size or alignment constraints. + /// + /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or + /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement + /// this trait atop an underlying native allocation library that aborts on memory exhaustion.) + fn allocate_zeroed(&self, layout: Layout) -> Result, TryAllocError> { + let ptr = self.allocate(layout)?; + // SAFETY: `alloc` returns a valid memory block + unsafe { ptr.as_ptr().cast::().write_bytes(0, ptr.len()) }; + Ok(ptr) + } + + /// Deallocates the memory referenced by `ptr`. + /// + /// # Safety + /// + /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator, and + /// * `layout` must [*fit*] that block of memory. + /// + /// [*currently allocated*]: #currently-allocated-memory + /// [*fit*]: #memory-fitting + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout); + + /// Attempts to extend the memory block. + /// + /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the allocated + /// memory. The pointer is suitable for holding data described by `new_layout`. To accomplish + /// this, the allocator may extend the allocation referenced by `ptr` to fit the new layout. + /// + /// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been + /// transferred to this allocator. Any access to the old `ptr` is Undefined Behavior, even if the + /// allocation was grown in-place. The newly returned pointer is the only valid pointer + /// for accessing this memory now. + /// + /// If this method returns `Err`, then ownership of the memory block has not been transferred to + /// this allocator, and the contents of the memory block are unaltered. + /// + /// # Safety + /// + /// * `ptr` must denote a block of memory currently allocated via this allocator. + /// * `old_layout` must fit that block of memory (The `new_layout` argument need not fit it.). + /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`. + /// + /// Note that `new_layout.align()` need not be the same as `old_layout.align()`. + /// + /// [*currently allocated*]: #currently-allocated-memory + /// [*fit*]: #memory-fitting + /// + /// # Errors + /// + /// Returns `Err` if the new layout does not meet the allocator's size and alignment + /// constraints of the allocator, or if growing otherwise fails. + /// + /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or + /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement + /// this trait atop an underlying native allocation library that aborts on memory exhaustion.) + unsafe fn grow( + &self, + ptr: NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, TryAllocError> { + debug_assert!( + new_layout.size() >= old_layout.size(), + "`new_layout.size()` must be greater than or equal to `old_layout.size()`" + ); + + let new_ptr = self.allocate(new_layout)?; + + // SAFETY: because `new_layout.size()` must be greater than or equal to + // `old_layout.size()`, both the old and new memory allocation are valid for reads and + // writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet + // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is + // safe. The safety contract for `dealloc` must be upheld by the caller. + unsafe { + ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr().cast(), old_layout.size()); + self.deallocate(ptr, old_layout); + } + + Ok(new_ptr) + } + + /// Behaves like `grow`, but also ensures that the new contents are set to zero before being + /// returned. + /// + /// The memory block will contain the following contents after a successful call to + /// `grow_zeroed`: + /// * Bytes `0..old_layout.size()` are preserved from the original allocation. + /// * Bytes `old_layout.size()..old_size` will either be preserved or zeroed, depending on + /// the allocator implementation. `old_size` refers to the size of the memory block prior + /// to the `grow_zeroed` call, which may be larger than the size that was originally + /// requested when it was allocated. + /// * Bytes `old_size..new_size` are zeroed. `new_size` refers to the size of the memory + /// block returned by the `grow_zeroed` call. + /// + /// # Safety + /// + /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator. + /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.). + /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`. + /// + /// Note that `new_layout.align()` need not be the same as `old_layout.align()`. + /// + /// [*currently allocated*]: #currently-allocated-memory + /// [*fit*]: #memory-fitting + /// + /// # Errors + /// + /// Returns `Err` if the new layout does not meet the allocator's size and alignment + /// constraints of the allocator, or if growing otherwise fails. + /// + /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or + /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement + /// this trait atop an underlying native allocation library that aborts on memory exhaustion.) + unsafe fn grow_zeroed( + &self, + ptr: NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, TryAllocError> { + debug_assert!( + new_layout.size() >= old_layout.size(), + "`new_layout.size()` must be greater than or equal to `old_layout.size()`" + ); + + let new_ptr = self.allocate_zeroed(new_layout)?; + + // SAFETY: because `new_layout.size()` must be greater than or equal to + // `old_layout.size()`, both the old and new memory allocation are valid for reads and + // writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet + // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is + // safe. The safety contract for `dealloc` must be upheld by the caller. + unsafe { + ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr().cast(), old_layout.size()); + self.deallocate(ptr, old_layout); + } + + Ok(new_ptr) + } + + /// Attempts to shrink the memory block. + /// + /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the allocated + /// memory. The pointer is suitable for holding data described by `new_layout`. To accomplish + /// this, the allocator may shrink the allocation referenced by `ptr` to fit the new layout. + /// + /// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been + /// transferred to this allocator. Any access to the old `ptr` is Undefined Behavior, even if the + /// allocation was shrunk in-place. The newly returned pointer is the only valid pointer + /// for accessing this memory now. + /// + /// If this method returns `Err`, then ownership of the memory block has not been transferred to + /// this allocator, and the contents of the memory block are unaltered. + /// + /// # Safety + /// + /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator. + /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.). + /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`. + /// + /// Note that `new_layout.align()` need not be the same as `old_layout.align()`. + /// + /// [*currently allocated*]: #currently-allocated-memory + /// [*fit*]: #memory-fitting + /// + /// # Errors + /// + /// Returns `Err` if the new layout does not meet the allocator's size and alignment + /// constraints of the allocator, or if shrinking otherwise fails. + /// + /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or + /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement + /// this trait atop an underlying native allocation library that aborts on memory exhaustion.) + /// + /// Clients wishing to abort computation in response to an allocation error are encouraged to + /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + unsafe fn shrink( + &self, + ptr: NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, TryAllocError> { + debug_assert!( + new_layout.size() <= old_layout.size(), + "`new_layout.size()` must be smaller than or equal to `old_layout.size()`" + ); + + let new_ptr = self.allocate(new_layout)?; + + // SAFETY: because `new_layout.size()` must be lower than or equal to + // `old_layout.size()`, both the old and new memory allocation are valid for reads and + // writes for `new_layout.size()` bytes. Also, because the old allocation wasn't yet + // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is + // safe. The safety contract for `dealloc` must be upheld by the caller. + unsafe { + ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr().cast(), new_layout.size()); + self.deallocate(ptr, old_layout); + } + + Ok(new_ptr) + } + + /// Creates a "by reference" adapter for this instance of `Allocator`. + /// + /// The returned adapter also implements `Allocator` and will simply borrow this. + #[inline(always)] + fn by_ref(&self) -> &Self + where + Self: Sized, + { + self + } +} + +unsafe impl Allocator for &A +where + A: Allocator + ?Sized, +{ + #[inline] + fn allocate(&self, layout: Layout) -> Result, TryAllocError> { + (**self).allocate(layout) + } + + #[inline] + fn allocate_zeroed(&self, layout: Layout) -> Result, TryAllocError> { + (**self).allocate_zeroed(layout) + } + + #[inline] + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + // SAFETY: the safety contract must be upheld by the caller + unsafe { (**self).deallocate(ptr, layout) } + } + + #[inline] + unsafe fn grow( + &self, + ptr: NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, TryAllocError> { + // SAFETY: the safety contract must be upheld by the caller + unsafe { (**self).grow(ptr, old_layout, new_layout) } + } + + #[inline] + unsafe fn grow_zeroed( + &self, + ptr: NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, TryAllocError> { + // SAFETY: the safety contract must be upheld by the caller + unsafe { (**self).grow_zeroed(ptr, old_layout, new_layout) } + } + + #[inline] + unsafe fn shrink( + &self, + ptr: NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, TryAllocError> { + // SAFETY: the safety contract must be upheld by the caller + unsafe { (**self).shrink(ptr, old_layout, new_layout) } + } +} + +#[cfg(not(target_os = "none"))] +extern crate std; +#[cfg(not(target_os = "none"))] +use core::alloc::GlobalAlloc; +#[cfg(not(target_os = "none"))] +unsafe impl Allocator for std::alloc::System { + fn allocate(&self, layout: Layout) -> Result, TryAllocError> { + match layout.size() { + 0 => Ok(NonNull::slice_from_raw_parts(NonNull::dangling(), 0)), + size => { + // SAFETY: size is nonzero + let raw_ptr = unsafe { self.alloc(layout) }; + let ptr = NonNull::new(raw_ptr).ok_or(TryAllocError::OutOfMemory)?; + Ok(NonNull::slice_from_raw_parts(ptr, size)) + } + } + } + + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + if layout.size() != 0 { + // SAFETY: `layout` is non-zero in size, + // other conditions must be upheld by the caller + self.dealloc(ptr.as_ptr(), layout) + } + } +} diff --git a/kernel/src/alloc/unique.rs b/kernel/src/alloc/unique.rs new file mode 100644 index 000000000..2202cc039 --- /dev/null +++ b/kernel/src/alloc/unique.rs @@ -0,0 +1,195 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (C) 2023 SUSE +// +// Author: Carlos López + +use core::convert::From; +use core::fmt; +use core::marker::PhantomData; +use core::ptr::NonNull; + +/// A wrapper around a raw non-null `*mut T` that indicates that the possessor +/// of this wrapper owns the referent. Useful for building abstractions like +/// `Box`, `Vec`, `String`, and `HashMap`. +/// +/// Unlike `*mut T`, `Unique` behaves "as if" it were an instance of `T`. +/// It implements `Send`/`Sync` if `T` is `Send`/`Sync`. It also implies +/// the kind of strong aliasing guarantees an instance of `T` can expect: +/// the referent of the pointer should not be modified without a unique path to +/// its owning Unique. +/// +/// If you're uncertain of whether it's correct to use `Unique` for your purposes, +/// consider using `NonNull`, which has weaker semantics. +/// +/// Unlike `*mut T`, the pointer must always be non-null, even if the pointer +/// is never dereferenced. This is so that enums may use this forbidden value +/// as a discriminant -- `Option>` has the same size as `Unique`. +/// However the pointer may still dangle if it isn't dereferenced. +/// +/// Unlike `*mut T`, `Unique` is covariant over `T`. This should always be correct +/// for any type which upholds Unique's aliasing requirements. +#[repr(transparent)] +// Lang item used experimentally by Miri to define the semantics of `Unique`. +pub struct Unique { + pointer: NonNull, + // NOTE: this marker has no consequences for variance, but is necessary + // for dropck to understand that we logically own a `T`. + // + // For details, see: + // https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data + _marker: PhantomData, +} + +/// `Unique` pointers are `Send` if `T` is `Send` because the data they +/// reference is unaliased. Note that this aliasing invariant is +/// unenforced by the type system; the abstraction using the +/// `Unique` must enforce it. +unsafe impl Send for Unique {} + +/// `Unique` pointers are `Sync` if `T` is `Sync` because the data they +/// reference is unaliased. Note that this aliasing invariant is +/// unenforced by the type system; the abstraction using the +/// `Unique` must enforce it. +unsafe impl Sync for Unique {} + +impl Unique { + /// Creates a new `Unique` that is dangling, but well-aligned. + /// + /// This is useful for initializing types which lazily allocate, like + /// `Vec::new` does. + /// + /// Note that the pointer value may potentially represent a valid pointer to + /// a `T`, which means this must not be used as a "not yet initialized" + /// sentinel value. Types that lazily allocate must track initialization by + /// some other means. + #[must_use] + #[inline] + pub const fn dangling() -> Self { + // FIXME(const-hack) replace with `From` + Unique { + pointer: NonNull::dangling(), + _marker: PhantomData, + } + } +} + +impl Unique { + /// Creates a new `Unique`. + /// + /// # Safety + /// + /// `ptr` must be non-null. + #[inline] + pub const unsafe fn new_unchecked(ptr: *mut T) -> Self { + // SAFETY: the caller must guarantee that `ptr` is non-null. + unsafe { + Unique { + pointer: NonNull::new_unchecked(ptr), + _marker: PhantomData, + } + } + } + + /// Creates a new `Unique` if `ptr` is non-null. + #[inline] + pub fn new(ptr: *mut T) -> Option { + NonNull::new(ptr).map(|pointer| Unique { + pointer, + _marker: PhantomData, + }) + } + + /// Acquires the underlying `*mut` pointer. + #[must_use = "`self` will be dropped if the result is not used"] + #[inline] + pub const fn as_ptr(self) -> *mut T { + self.pointer.as_ptr() + } + + /// Dereferences the content. + /// + /// The resulting lifetime is bound to self so this behaves "as if" + /// it were actually an instance of T that is getting borrowed. If a longer + /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`. + #[must_use] + #[inline] + pub const unsafe fn as_ref(&self) -> &T { + // SAFETY: the caller must guarantee that `self` meets all the + // requirements for a reference. + unsafe { self.pointer.as_ref() } + } + + /// Mutably dereferences the content. + /// + /// The resulting lifetime is bound to self so this behaves "as if" + /// it were actually an instance of T that is getting borrowed. If a longer + /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`. + #[must_use] + #[inline] + pub unsafe fn as_mut(&mut self) -> &mut T { + // SAFETY: the caller must guarantee that `self` meets all the + // requirements for a mutable reference. + unsafe { self.pointer.as_mut() } + } + + /// Casts to a pointer of another type. + #[must_use = "`self` will be dropped if the result is not used"] + #[inline] + pub const fn cast(self) -> Unique { + // FIXME(const-hack): replace with `From` + // SAFETY: is `NonNull` + unsafe { Unique::new_unchecked(self.pointer.cast().as_ptr()) } + } +} + +impl Clone for Unique { + #[inline] + fn clone(&self) -> Self { + *self + } +} + +impl Copy for Unique {} + +impl fmt::Debug for Unique { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Pointer::fmt(&self.as_ptr(), f) + } +} + +impl fmt::Pointer for Unique { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Pointer::fmt(&self.as_ptr(), f) + } +} + +impl From<&mut T> for Unique { + /// Converts a `&mut T` to a `Unique`. + /// + /// This conversion is infallible since references cannot be null. + #[inline] + fn from(reference: &mut T) -> Self { + Self::from(NonNull::from(reference)) + } +} + +impl From> for Unique { + /// Converts a `NonNull` to a `Unique`. + /// + /// This conversion is infallible since `NonNull` cannot be null. + #[inline] + fn from(pointer: NonNull) -> Self { + Unique { + pointer, + _marker: PhantomData, + } + } +} + +impl From> for NonNull { + #[inline] + fn from(unique: Unique) -> Self { + unique.pointer + } +} diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs index 979c65f5e..57cfa58c8 100644 --- a/kernel/src/lib.rs +++ b/kernel/src/lib.rs @@ -12,6 +12,7 @@ pub mod acpi; pub mod address; +pub mod alloc; pub mod config; pub mod console; pub mod cpu; From 7659fa6ef4c66e3722eb8569cf0c474b224a676c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20L=C3=B3pez?= Date: Thu, 4 Jan 2024 12:29:11 +0100 Subject: [PATCH 2/9] mm/alloc: implement Allocator for SvsmAllocator MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In order to be used along with the new TryBox type, an allocator must implement the Allocator trait, so implement it for the current global allocator. Signed-off-by: Carlos López --- kernel/src/mm/alloc.rs | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/kernel/src/mm/alloc.rs b/kernel/src/mm/alloc.rs index 2d2d1b358..d964f2104 100644 --- a/kernel/src/mm/alloc.rs +++ b/kernel/src/mm/alloc.rs @@ -5,6 +5,7 @@ // Author: Joerg Roedel use crate::address::{Address, PhysAddr, VirtAddr}; +use crate::alloc::{Allocator, TryAllocError}; use crate::error::SvsmError; use crate::locking::SpinLock; use crate::mm::virt_to_phys; @@ -12,7 +13,7 @@ use crate::types::{PAGE_SHIFT, PAGE_SIZE}; use crate::utils::{align_down, align_up, zero_mem_region}; use core::alloc::{GlobalAlloc, Layout}; use core::mem::size_of; -use core::ptr; +use core::ptr::{self, NonNull}; #[cfg(any(test, fuzzing))] use crate::locking::LockGuard; @@ -1553,6 +1554,30 @@ unsafe impl GlobalAlloc for SvsmAllocator { } } +unsafe impl Allocator for SvsmAllocator { + fn allocate(&self, layout: Layout) -> Result, TryAllocError> { + match layout.size() { + 0 => Ok(NonNull::slice_from_raw_parts(NonNull::dangling(), 0)), + size => { + // SAFETY: size is nonzero + let raw_ptr = unsafe { self.alloc(layout) }; + // FIXME: find a way to return a more correct error here. At + // some point we must reconcile AllocError and TryAllocError. + let ptr = NonNull::new(raw_ptr).ok_or(TryAllocError::OutOfMemory)?; + Ok(NonNull::slice_from_raw_parts(ptr, size)) + } + } + } + + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + // SAFETY: `layout` is non-zero in size, + // other conditions must be upheld by the caller + if layout.size() != 0 { + self.dealloc(ptr.as_ptr(), layout) + } + } +} + #[cfg_attr(any(target_os = "none"), global_allocator)] #[cfg_attr(not(target_os = "none"), allow(dead_code))] static ALLOCATOR: SvsmAllocator = SvsmAllocator::new(); From 54a1cdf7bae35adb7a8b710a07ce2bf032fa69d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20L=C3=B3pez?= Date: Mon, 15 Jan 2024 13:24:56 +0100 Subject: [PATCH 3/9] error: add SvsmError::TryAlloc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a new variant to hold a TryAllocError from the alloc submodule. This new error does not implement Copy, so SvsmError cannot implement it either. Signed-off-by: Carlos López --- kernel/src/error.rs | 5 ++++- kernel/src/greq/driver.rs | 2 +- kernel/src/protocols/errors.rs | 2 +- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/kernel/src/error.rs b/kernel/src/error.rs index 335575d14..7153598d2 100644 --- a/kernel/src/error.rs +++ b/kernel/src/error.rs @@ -4,6 +4,7 @@ // // Author: Carlos López +use crate::alloc::TryAllocError; use crate::cpu::vc::VcError; use crate::fs::FsError; use crate::fw_cfg::FwCfgError; @@ -18,7 +19,7 @@ use crate::task::TaskError; // containing a leaf error type, usually the one corresponding to // that module. We always provide a way to convert a leaf error into // a SvsmError via the From trait at the module level. -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Debug)] pub enum SvsmError { // Errors related to GHCB Ghcb(GhcbError), @@ -46,6 +47,8 @@ pub enum SvsmError { FileSystem(FsError), // Task management errors, Task(TaskError), + // Smart pointer errors + TryAlloc(TryAllocError), // Errors from #VC handler Vc(VcError), } diff --git a/kernel/src/greq/driver.rs b/kernel/src/greq/driver.rs index e5b1c2662..5dd8db248 100644 --- a/kernel/src/greq/driver.rs +++ b/kernel/src/greq/driver.rs @@ -211,7 +211,7 @@ impl SnpGuestRequestDriver { .staging .decrypt_get(msg_type, msg_seqno, &vmpck0, buffer); - if let Err(e) = result { + if let Err(ref e) = result { match e { // The buffer provided is too small to store the unwrapped response. // There is no need to clear the VMPCK0, just report it as invalid parameter. diff --git a/kernel/src/protocols/errors.rs b/kernel/src/protocols/errors.rs index 2a22e5ebb..185e2be28 100644 --- a/kernel/src/protocols/errors.rs +++ b/kernel/src/protocols/errors.rs @@ -38,7 +38,7 @@ impl From for u64 { } } -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone)] pub enum SvsmReqError { RequestError(SvsmResultCode), FatalError(SvsmError), From 918ff1712648c7d7101abbc8093d23a2c5789df6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20L=C3=B3pez?= Date: Wed, 24 Jan 2024 12:07:33 +0100 Subject: [PATCH 4/9] mm: add GlobalBox implementation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a new type, GlobalBox, which wraps around the TryBox type but uses the global SvsmAllocator transparently. The new type implements AsRef and Into, so regular TryBox methods can also be used on the new type. Signed-off-by: Carlos López --- kernel/src/mm/alloc.rs | 2 +- kernel/src/mm/boxed.rs | 214 +++++++++++++++++++++++++++++++++++++++++ kernel/src/mm/mod.rs | 2 + 3 files changed, 217 insertions(+), 1 deletion(-) create mode 100644 kernel/src/mm/boxed.rs diff --git a/kernel/src/mm/alloc.rs b/kernel/src/mm/alloc.rs index d964f2104..7e71db173 100644 --- a/kernel/src/mm/alloc.rs +++ b/kernel/src/mm/alloc.rs @@ -1580,7 +1580,7 @@ unsafe impl Allocator for SvsmAllocator { #[cfg_attr(any(target_os = "none"), global_allocator)] #[cfg_attr(not(target_os = "none"), allow(dead_code))] -static ALLOCATOR: SvsmAllocator = SvsmAllocator::new(); +pub(super) static ALLOCATOR: SvsmAllocator = SvsmAllocator::new(); /// Initializes the root memory region with the specified physical start /// address, virtual start address, and page count. diff --git a/kernel/src/mm/boxed.rs b/kernel/src/mm/boxed.rs new file mode 100644 index 000000000..360cb79b3 --- /dev/null +++ b/kernel/src/mm/boxed.rs @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) 2023 SUSE LLC +// +// Author: Carlos López + +use super::alloc::{SvsmAllocator, ALLOCATOR}; +use crate::alloc::boxed::TryBox; +use crate::alloc::TryAllocError; +use crate::error::SvsmError; +use core::mem::MaybeUninit; +use core::ops::{Deref, DerefMut}; + +impl From for SvsmError { + fn from(err: TryAllocError) -> Self { + SvsmError::TryAlloc(err) + } +} + +/// See the documentation for [`trybox_upcast`](crate::trybox_upcast). +#[macro_export] +macro_rules! globalbox_upcast { + ($boxed:expr, $bound:tt $(+ $others:tt)*) => {{ + let ptr = GlobalBox::into_raw($boxed); + unsafe { GlobalBox::from_raw(ptr as *mut (dyn $bound $(+ $others)*)) } + }} +} + +/// A [`TryBox`] wrapper which uses the global memory allocator. +#[derive(Debug)] +pub struct GlobalBox(TryBox); + +impl GlobalBox { + /// See the documentation for [`TryBox::try_new_in()`]. + #[inline] + pub fn try_new(val: T) -> Result { + let inner = TryBox::try_new_in(val, &ALLOCATOR)?; + Ok(Self(inner)) + } + + /// See the documentation for [`TryBox::try_new_uninit_in()`]. + #[inline] + pub fn try_new_uninit() -> Result>, SvsmError> { + let inner = TryBox::try_new_uninit_in(&ALLOCATOR)?; + Ok(GlobalBox(inner)) + } + + /// See the documentation for [`TryBox::try_new_zeroed_in()`]. + #[inline] + pub fn try_new_zeroed() -> Result>, SvsmError> { + let inner = TryBox::try_new_zeroed_in(&ALLOCATOR)?; + Ok(GlobalBox(inner)) + } + + /// See the documentation for [`TryBox::into_inner()`]. + #[inline] + pub fn into_inner(self) -> T { + TryBox::into_inner(self.0) + } +} + +impl GlobalBox { + /// # Safety + /// + /// See the safety requirements for [`TryBox::from_raw_in()`]. + #[inline] + pub unsafe fn from_raw(raw: *mut T) -> Self { + Self(TryBox::from_raw_in(raw, &ALLOCATOR)) + } + + #[inline] + /// See the documentation for [`TryBox::into_raw`]. + pub fn into_raw(b: Self) -> *mut T { + TryBox::into_raw(b.0) + } +} + +impl GlobalBox> { + /// # Safety + /// + /// See safety requirements for [`TryBox::assume_init()`]. + #[inline] + pub unsafe fn assume_init(self) -> GlobalBox { + GlobalBox(TryBox::assume_init(self.0)) + } +} + +impl GlobalBox { + /// Allocates memory in the given allocator and places the default value + /// for `T` into it. + #[inline] + pub fn try_default() -> Result { + TryBox::try_default_in(&ALLOCATOR).map(Self) + } +} + +impl From> for GlobalBox { + fn from(boxed: TryBox) -> Self { + Self(boxed) + } +} + +impl From> for TryBox { + fn from(boxed: GlobalBox) -> Self { + boxed.0 + } +} + +impl AsRef> for GlobalBox { + fn as_ref(&self) -> &TryBox { + &self.0 + } +} + +impl AsMut> for GlobalBox { + fn as_mut(&mut self) -> &mut TryBox { + &mut self.0 + } +} + +impl AsRef for GlobalBox { + fn as_ref(&self) -> &T { + TryBox::as_ref(&self.0) + } +} + +impl AsMut for GlobalBox { + fn as_mut(&mut self) -> &mut T { + TryBox::as_mut(&mut self.0) + } +} + +impl Deref for GlobalBox { + type Target = T; + + fn deref(&self) -> &T { + TryBox::deref(self.as_ref()) + } +} + +impl DerefMut for GlobalBox { + fn deref_mut(&mut self) -> &mut T { + TryBox::deref_mut(self.as_mut()) + } +} + +#[cfg(test)] +mod tests { + #[cfg(not(test_in_svsm))] + extern crate std; + #[cfg(test_in_svsm)] + use super::ALLOCATOR as Alloc; + use super::*; + #[cfg(not(test_in_svsm))] + use std::alloc::System as Alloc; + + #[test] + fn box_try_new() { + let obj = TryBox::try_new_in(5, &Alloc).unwrap(); + assert_eq!(*obj, 5); + } + + #[test] + fn box_try_uninit() { + let mut obj = TryBox::::try_new_uninit_in(&Alloc).unwrap(); + // SAFETY: TryBox owns valid memory. Memory is initialized before use. + let init = unsafe { + obj.as_mut_ptr().write(5); + obj.assume_init() + }; + assert_eq!(*init, 5); + } + + #[test] + fn box_try_uninit_write() { + let obj = TryBox::::try_new_uninit_in(&Alloc).unwrap(); + let init = TryBox::write(obj, 7); + assert_eq!(*init, 7); + } + + #[test] + fn box_try_zeroed() { + let obj = TryBox::::try_new_zeroed_in(&Alloc).unwrap(); + // SAFETY: memory is initialized to zero, which is valid for u32 + let init = unsafe { obj.assume_init() }; + assert_eq!(*init, 0); + } + + #[test] + fn box_nested_deref() { + let inner = TryBox::try_new_in([13; 32], &Alloc).unwrap(); + { + let outer = TryBox::try_new_in(inner, &Alloc).unwrap(); + assert_eq!(**outer, [13; 32]); + } + } + + #[test] + fn box_try_clone() { + let first = TryBox::try_new_in([13; 32], &Alloc).unwrap(); + let second = first.try_clone().unwrap(); + drop(first); + assert_eq!(*second, [13; 32]); + } + + #[test] + fn box_try_clone_mut() { + let mut first = TryBox::try_new_in([13; 32], &Alloc).unwrap(); + let second = first.try_clone().unwrap(); + first.fill(14); + assert_eq!(*second, [13; 32]); + assert_eq!(*first, [14; 32]); + } +} diff --git a/kernel/src/mm/mod.rs b/kernel/src/mm/mod.rs index 9721c532d..fd2ee6c16 100644 --- a/kernel/src/mm/mod.rs +++ b/kernel/src/mm/mod.rs @@ -6,6 +6,7 @@ pub mod address_space; pub mod alloc; +mod boxed; pub mod guestmem; pub mod memory; pub mod page_visibility; @@ -17,6 +18,7 @@ pub mod virtualrange; pub mod vm; pub use address_space::*; +pub use boxed::GlobalBox; pub use guestmem::GuestPtr; pub use memory::{valid_phys_address, writable_phys_addr}; pub use ptguards::*; From 68f2dafb69bd16719eaba452a0f9ad7538d3f056 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20L=C3=B3pez?= Date: Wed, 24 Jan 2024 12:31:18 +0100 Subject: [PATCH 5/9] mm/pagetable: replace Box with GlobalBox MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allocate PageTablePart structs with the new fallible-allocation-aware GlobalBox type. Signed-off-by: Carlos López --- kernel/src/mm/pagetable.rs | 14 ++++++-------- kernel/src/mm/vm/range.rs | 2 +- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/kernel/src/mm/pagetable.rs b/kernel/src/mm/pagetable.rs index 61aadf98f..60ef321fe 100644 --- a/kernel/src/mm/pagetable.rs +++ b/kernel/src/mm/pagetable.rs @@ -12,6 +12,7 @@ use crate::cpu::flush_tlb_global_sync; use crate::error::SvsmError; use crate::locking::{LockGuard, SpinLock}; use crate::mm::alloc::{allocate_zeroed_page, free_page}; +use crate::mm::GlobalBox; use crate::mm::{phys_to_virt, virt_to_phys, PGTABLE_LVL3_IDX_SHARED}; use crate::types::{PageSize, PAGE_SIZE, PAGE_SIZE_2M}; use crate::utils::immut_after_init::ImmutAfterInitCell; @@ -20,9 +21,6 @@ use bitflags::bitflags; use core::ops::{Deref, DerefMut, Index, IndexMut}; use core::{cmp, ptr}; -extern crate alloc; -use alloc::boxed::Box; - const ENTRY_COUNT: usize = 512; static ENCRYPT_MASK: ImmutAfterInitCell = ImmutAfterInitCell::new(0); static MAX_PHYS_ADDR: ImmutAfterInitCell = ImmutAfterInitCell::uninit(); @@ -899,7 +897,7 @@ impl Drop for RawPageTablePart { #[derive(Debug)] pub struct PageTablePart { /// The root of the page-table sub-tree - raw: Box, + raw: GlobalBox, /// The top-level index this PageTablePart is populated at idx: usize, } @@ -914,11 +912,11 @@ impl PageTablePart { /// # Returns /// /// A new instance of PageTablePart - pub fn new(start: VirtAddr) -> Self { - PageTablePart { - raw: Box::::default(), + pub fn new(start: VirtAddr) -> Result { + Ok(PageTablePart { + raw: GlobalBox::::try_default()?, idx: PageTable::index::<3>(start), - } + }) } /// Request PageTable index to populate this instance to diff --git a/kernel/src/mm/vm/range.rs b/kernel/src/mm/vm/range.rs index f898b6f8d..f4ae48425 100644 --- a/kernel/src/mm/vm/range.rs +++ b/kernel/src/mm/vm/range.rs @@ -96,7 +96,7 @@ impl VMR { let mut vec = self.pgtbl_parts.lock_write(); for idx in 0..count { - vec.push(PageTablePart::new(start + (idx * VMR_GRANULE))); + vec.push(PageTablePart::new(start + (idx * VMR_GRANULE))?); } Ok(()) From 8265a896c08ebe60f0c1dd656bd0542c8bce473c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20L=C3=B3pez?= Date: Wed, 24 Jan 2024 13:03:34 +0100 Subject: [PATCH 6/9] greq: replace Box with GlobalBox MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the new fallible-allocation-aware GlobalBox type when allocating SNP request messages. Signed-off-by: Carlos López --- kernel/src/greq/driver.rs | 19 ++++++++-------- kernel/src/greq/msg.rs | 46 +++++++++++---------------------------- 2 files changed, 22 insertions(+), 43 deletions(-) diff --git a/kernel/src/greq/driver.rs b/kernel/src/greq/driver.rs index 5dd8db248..eeba93a74 100644 --- a/kernel/src/greq/driver.rs +++ b/kernel/src/greq/driver.rs @@ -8,18 +8,17 @@ //! request or response command types defined in the SEV-SNP spec, regardless if it's //! a regular or an extended command. -extern crate alloc; - -use alloc::boxed::Box; use core::ptr::addr_of_mut; use core::{cell::OnceCell, mem::size_of}; use crate::{ address::VirtAddr, + alloc::boxed::TryBox, cpu::ghcb::current_ghcb, error::SvsmError, greq::msg::{SnpGuestRequestExtData, SnpGuestRequestMsg, SnpGuestRequestMsgType}, locking::SpinLock, + mm::GlobalBox, protocols::errors::{SvsmReqError, SvsmResultCode}, sev::{ghcb::GhcbError, secrets_page, secrets_page_mut, VMPCK_SIZE}, types::PAGE_SHIFT, @@ -48,14 +47,14 @@ enum SnpGuestRequestClass { #[derive(Debug)] struct SnpGuestRequestDriver { /// Shared page used for the `SNP_GUEST_REQUEST` request - request: Box, + request: GlobalBox, /// Shared page used for the `SNP_GUEST_REQUEST` response - response: Box, + response: GlobalBox, /// Encrypted page where we perform crypto operations - staging: Box, + staging: GlobalBox, /// Extended data buffer that will be provided to the hypervisor /// to store the SEV-SNP certificates - ext_data: Box, + ext_data: GlobalBox, /// Extended data size (`certs` size) provided by the user in [`super::services::get_extended_report`]. /// It will be provided to the hypervisor. user_extdata_size: usize, @@ -84,21 +83,21 @@ impl Drop for SnpGuestRequestDriver { SnpGuestRequestMsg::boxed_new().expect("GREQ: failed to allocate request"); let old_req = core::mem::replace(&mut self.request, new_req); log::error!("GREQ: request: failed to set page to encrypted. Memory leak!"); - Box::leak(old_req); + TryBox::leak(old_req.into()); } if self.response.set_encrypted().is_err() { let new_resp = SnpGuestRequestMsg::boxed_new().expect("GREQ: failed to allocate response"); let old_resp = core::mem::replace(&mut self.response, new_resp); log::error!("GREQ: response: failed to set page to encrypted. Memory leak!"); - Box::leak(old_resp); + TryBox::leak(old_resp.into()); } if self.ext_data.set_encrypted().is_err() { let new_data = SnpGuestRequestExtData::boxed_new().expect("GREQ: failed to allocate ext_data"); let old_data = core::mem::replace(&mut self.ext_data, new_data); log::error!("GREQ: ext_data: failed to set pages to encrypted. Memory leak!"); - Box::leak(old_data); + TryBox::leak(old_data.into()); } } } diff --git a/kernel/src/greq/msg.rs b/kernel/src/greq/msg.rs index eb6a19c25..dadb1f90d 100644 --- a/kernel/src/greq/msg.rs +++ b/kernel/src/greq/msg.rs @@ -6,12 +6,6 @@ //! Message that carries an encrypted `SNP_GUEST_REQUEST` command in the payload -extern crate alloc; - -use alloc::{ - alloc::{alloc_zeroed, Layout}, - boxed::Box, -}; use core::{ mem::size_of, ptr::{addr_of, addr_of_mut}, @@ -23,7 +17,7 @@ use crate::{ cpu::ghcb::current_ghcb, cpu::percpu::this_cpu_mut, crypto::aead::{Aes256Gcm, Aes256GcmTrait, AUTHTAG_SIZE, IV_SIZE}, - mm::virt_to_phys, + mm::{virt_to_phys, GlobalBox}, protocols::errors::SvsmReqError, sev::{ghcb::PageStateChangeOp, secrets_page::VMPCK_SIZE}, types::{PageSize, PAGE_SIZE}, @@ -214,20 +208,12 @@ impl SnpGuestRequestMsg { /// # Panics /// /// Panics if the new allocation is not page aligned. - pub fn boxed_new() -> Result, SvsmReqError> { - let layout = Layout::new::(); - - unsafe { - let addr = alloc_zeroed(layout); - if addr.is_null() { - return Err(SvsmReqError::invalid_request()); - } - - assert!(VirtAddr::from(addr).is_page_aligned()); - - let ptr = addr.cast::(); - Ok(Box::from_raw(ptr)) - } + pub fn boxed_new() -> Result, SvsmReqError> { + let raw = GlobalBox::::try_new_zeroed()?; + // SAFETY: all zeros is a valid representation for SnpGuestRequestMsg + let msg = unsafe { GlobalBox::assume_init(raw) }; + assert!(VirtAddr::from(msg.as_ref() as *const Self).is_page_aligned()); + Ok(msg) } /// Clear the C-bit (memory encryption bit) for the Self page @@ -473,18 +459,12 @@ pub struct SnpGuestRequestExtData { impl SnpGuestRequestExtData { /// Allocate the object in the heap without going through stack as /// this is a large object - pub fn boxed_new() -> Result, SvsmReqError> { - let layout = Layout::new::(); - unsafe { - let addr = alloc_zeroed(layout); - if addr.is_null() { - return Err(SvsmReqError::invalid_request()); - } - assert!(VirtAddr::from(addr).is_page_aligned()); - - let ptr = addr.cast::(); - Ok(Box::from_raw(ptr)) - } + pub fn boxed_new() -> Result, SvsmReqError> { + let raw = GlobalBox::::try_new_zeroed()?; + // SAFETY: all zeros is a valid representation for SnpGuestRequestExtData + let data = unsafe { GlobalBox::assume_init(raw) }; + assert!(VirtAddr::from(data.as_ref() as *const Self).is_page_aligned()); + Ok(data) } /// Clear the C-bit (memory encryption bit) for the Self pages From 54019fc37b92e97a724950fc30236343e089da28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20L=C3=B3pez?= Date: Wed, 31 Jan 2024 14:43:05 +0100 Subject: [PATCH 7/9] mm/vm: replace Box with GlobalBox MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace uses of Box with the new fallible-allocation-aware GlobalBox type. Unfortunately, replacing the Box used in the RB tree for virtual mappings is very involved. In short, the intrusive_collections crate provides an intrusive_adapter macro, which generates a new adapter type that handles the insertion and deletion of any type into an intrusive collection. Sadly, this macro is only prepared to work with the smart pointer types in the standard library. This is done by implementing the PointerOps trait for DefaultPointerOps, where T is one of the regular smart pointer types (Rc, Arc, Box, etc.). The macro then generates an implementation of Adapter for the newly generated type, which uses the selected smart pointer. We define a substitute for DefaultPointerOps, CustomPointerOps, and implement PointerOps for CustomPointerOps. We then add a manual implementation of Adapter for VMMAdapter, which uses GlobalBox under the hood. Signed-off-by: Carlos López --- kernel/src/mm/vm/mapping/api.rs | 132 +++++++++++++++++++++++++++++--- kernel/src/mm/vm/range.rs | 6 +- 2 files changed, 124 insertions(+), 14 deletions(-) diff --git a/kernel/src/mm/vm/mapping/api.rs b/kernel/src/mm/vm/mapping/api.rs index c902b0d14..ad6ed2cdc 100644 --- a/kernel/src/mm/vm/mapping/api.rs +++ b/kernel/src/mm/vm/mapping/api.rs @@ -9,10 +9,15 @@ use crate::error::SvsmError; use crate::locking::{RWLock, ReadLockGuard, WriteLockGuard}; use crate::mm::pagetable::PTEntryFlags; use crate::mm::vm::VMR; +use crate::mm::GlobalBox; use crate::types::{PageSize, PAGE_SHIFT}; +use core::fmt; +use core::marker::PhantomData; use intrusive_collections::rbtree::Link; -use intrusive_collections::{intrusive_adapter, KeyAdapter}; +use intrusive_collections::{ + container_of, offset_of, Adapter, DefaultLinkOps, KeyAdapter, LinkOps, PointerOps, +}; use core::ops::Range; @@ -31,7 +36,7 @@ pub struct VMPageFaultResolution { pub flags: PTEntryFlags, } -pub trait VirtualMapping: core::fmt::Debug { +pub trait VirtualMapping: fmt::Debug { /// Request the size of the virtual memory mapping /// /// # Returns @@ -181,15 +186,6 @@ pub struct VMM { mapping: Arc, } -intrusive_adapter!(pub VMMAdapter = Box: VMM { link: Link }); - -impl<'a> KeyAdapter<'a> for VMMAdapter { - type Key = usize; - fn get_key(&self, node: &'a VMM) -> Self::Key { - node.range.start - } -} - impl VMM { /// Create a new VMM instance with at a given address and backing struct /// @@ -247,3 +243,117 @@ impl VMM { self.mapping.clone() } } + +/// A simple newtype wrapper around a [`PhantomData`] used as a workaround for +/// Rust's orphan rules, in order to implement [`PointerOps`]. +/// +/// Does a similar job as [`DefaultPointerOps`](intrusive_collections::DefaultPointerOps). +#[derive(Debug, Clone, Copy, Default)] +pub struct CustomPointerOps(PhantomData); + +impl CustomPointerOps { + const NEW: Self = Self(PhantomData); +} + +/// An implementation of [`PointerOps`] for [`CustomPointerOps>`] +/// similar to the one for [`DefaultPointerOps>`](intrusive_collections::DefaultPointerOps). +unsafe impl PointerOps for CustomPointerOps> { + type Value = T; + type Pointer = GlobalBox; + + #[inline] + unsafe fn from_raw(&self, raw: *const Self::Value) -> Self::Pointer { + GlobalBox::from_raw(raw as *mut _) + } + + #[inline] + fn into_raw(&self, ptr: Self::Pointer) -> *const Self::Value { + GlobalBox::into_raw(ptr) + } +} + +/// An adapter to insert a [`VMM`] in an intrusive collection, similar to the +/// one generated by the [`intrusive_adapter`](intrusive_collections::intrusive_adapter) +/// macro. +pub struct VMMAdapter { + link_ops: ::Ops, + pointer_ops: CustomPointerOps>, +} + +#[allow(dead_code)] +impl VMMAdapter { + pub const NEW: Self = VMMAdapter { + link_ops: ::NEW, + pointer_ops: CustomPointerOps::NEW, + }; + + #[inline] + pub fn new() -> Self { + Self::NEW + } +} + +impl Default for VMMAdapter { + #[inline] + fn default() -> Self { + Self::NEW + } +} + +// Implement this manually because we have `deny(missing_debug_implementations)` +// but `link_ops` does not implement Debug. +impl fmt::Debug for VMMAdapter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + f.debug_struct("VMMAdapter") + .field("link_ops", &"_") + .field("pointer_ops", &"_") + .finish() + } +} + +/// Allows a [`VMM`] to be introduced in an intrusive collection. This is a +/// manual implementation of the code generated by the +/// [`intrusive_adapter`](intrusive_collections::intrusive_adapter) macro. +unsafe impl Adapter for VMMAdapter { + type LinkOps = ::Ops; + type PointerOps = CustomPointerOps>; + + #[inline] + unsafe fn get_value( + &self, + link: ::LinkPtr, + ) -> *const ::Value { + container_of!(link.as_ptr(), VMM, link) + } + + #[inline] + unsafe fn get_link( + &self, + value: *const ::Value, + ) -> ::LinkPtr { + let ptr = (value as *const u8).add(offset_of!(VMM, link)); + core::ptr::NonNull::new_unchecked(ptr as *mut _) + } + + #[inline] + fn link_ops(&self) -> &Self::LinkOps { + &self.link_ops + } + + #[inline] + fn link_ops_mut(&mut self) -> &mut Self::LinkOps { + &mut self.link_ops + } + + #[inline] + fn pointer_ops(&self) -> &Self::PointerOps { + &self.pointer_ops + } +} + +impl<'a> KeyAdapter<'a> for VMMAdapter { + type Key = usize; + fn get_key(&self, node: &'a VMM) -> Self::Key { + node.range.start + } +} diff --git a/kernel/src/mm/vm/range.rs b/kernel/src/mm/vm/range.rs index f4ae48425..7387e94e9 100644 --- a/kernel/src/mm/vm/range.rs +++ b/kernel/src/mm/vm/range.rs @@ -9,6 +9,7 @@ use crate::cpu::flush_tlb_global_sync; use crate::error::SvsmError; use crate::locking::RWLock; use crate::mm::pagetable::{PTEntryFlags, PageTable, PageTablePart, PageTableRef}; +use crate::mm::GlobalBox; use crate::types::{PageSize, PAGE_SHIFT, PAGE_SIZE}; use crate::utils::{align_down, align_up}; @@ -20,7 +21,6 @@ use intrusive_collections::Bound; use super::{Mapping, VMMAdapter, VMM}; extern crate alloc; -use alloc::boxed::Box; use alloc::sync::Arc; use alloc::vec::Vec; @@ -217,7 +217,7 @@ impl VMR { start_pfn: usize, cursor: &mut CursorMut<'_, VMMAdapter>, ) -> Result<(), SvsmError> { - let vmm = Box::new(VMM::new(start_pfn, mapping)); + let vmm = GlobalBox::try_new(VMM::new(start_pfn, mapping))?; if let Err(e) = self.map_vmm(&vmm) { self.unmap_vmm(&vmm); Err(e) @@ -355,7 +355,7 @@ impl VMR { /// # Returns /// /// The removed mapping on success, SvsmError::Mem on error - pub fn remove(&self, base: VirtAddr) -> Result, SvsmError> { + pub fn remove(&self, base: VirtAddr) -> Result, SvsmError> { let mut tree = self.tree.lock_write(); let addr = base.pfn(); From 8eec6ec29f617002ce861477bf3b49a03e1e800f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20L=C3=B3pez?= Date: Thu, 8 Feb 2024 13:37:14 +0100 Subject: [PATCH 8/9] mm/vm/mapping: replace Box with GlobalBox MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace uses of Box in the mapping API with the new fallible-allocation-aware GlobalBox type. Signed-off-by: Carlos López --- kernel/src/cpu/percpu.rs | 12 ++++++------ kernel/src/mm/vm/mapping/api.rs | 21 +++++++++++---------- kernel/src/mm/vm/mapping/file_mapping.rs | 2 +- kernel/src/mm/vm/mapping/kernel_stack.rs | 2 +- kernel/src/mm/vm/mapping/phys_mem.rs | 3 ++- kernel/src/mm/vm/mapping/reserved.rs | 3 ++- kernel/src/mm/vm/mapping/vmalloc.rs | 2 +- kernel/src/task/tasks.rs | 2 +- 8 files changed, 25 insertions(+), 22 deletions(-) diff --git a/kernel/src/cpu/percpu.rs b/kernel/src/cpu/percpu.rs index 83aaee865..1e9d7657e 100644 --- a/kernel/src/cpu/percpu.rs +++ b/kernel/src/cpu/percpu.rs @@ -347,7 +347,7 @@ impl PerCpu { fn allocate_stack(&mut self, base: VirtAddr) -> Result { let stack = VMKernelStack::new()?; let top_of_stack = stack.top_of_stack(base); - let mapping = Arc::new(Mapping::new(stack)); + let mapping = Arc::new(Mapping::new(stack)?); self.vm_range.insert_at(base, mapping)?; @@ -406,7 +406,7 @@ impl PerCpu { let vaddr = VirtAddr::from(self as *const PerCpu); let paddr = virt_to_phys(vaddr); - let self_mapping = Arc::new(VMPhysMem::new_mapping(paddr, PAGE_SIZE, true)); + let self_mapping = Arc::new(VMPhysMem::new_mapping(paddr, PAGE_SIZE, true)?); self.vm_range.insert_at(SVSM_PERCPU_BASE, self_mapping)?; Ok(()) @@ -414,12 +414,12 @@ impl PerCpu { fn initialize_vm_ranges(&mut self) -> Result<(), SvsmError> { let size_4k = SVSM_PERCPU_TEMP_END_4K - SVSM_PERCPU_TEMP_BASE_4K; - let temp_mapping_4k = Arc::new(VMReserved::new_mapping(size_4k)); + let temp_mapping_4k = Arc::new(VMReserved::new_mapping(size_4k)?); self.vm_range .insert_at(SVSM_PERCPU_TEMP_BASE_4K, temp_mapping_4k)?; let size_2m = SVSM_PERCPU_TEMP_END_2M - SVSM_PERCPU_TEMP_BASE_2M; - let temp_mapping_2m = Arc::new(VMReserved::new_mapping(size_2m)); + let temp_mapping_2m = Arc::new(VMReserved::new_mapping(size_2m)?); self.vm_range .insert_at(SVSM_PERCPU_TEMP_BASE_2M, temp_mapping_2m)?; @@ -534,7 +534,7 @@ impl PerCpu { pub fn map_guest_vmsa(&self, paddr: PhysAddr) -> Result<(), SvsmError> { assert!(self.apic_id == this_cpu().get_apic_id()); - let vmsa_mapping = Arc::new(VMPhysMem::new_mapping(paddr, PAGE_SIZE, true)); + let vmsa_mapping = Arc::new(VMPhysMem::new_mapping(paddr, PAGE_SIZE, true)?); self.vm_range .insert_at(SVSM_PERCPU_VMSA_BASE, vmsa_mapping)?; @@ -565,7 +565,7 @@ impl PerCpu { pub fn map_guest_caa(&self, paddr: PhysAddr) -> Result<(), SvsmError> { self.unmap_caa(); - let caa_mapping = Arc::new(VMPhysMem::new_mapping(paddr, PAGE_SIZE, true)); + let caa_mapping = Arc::new(VMPhysMem::new_mapping(paddr, PAGE_SIZE, true)?); self.vm_range.insert_at(SVSM_PERCPU_CAA_BASE, caa_mapping)?; Ok(()) diff --git a/kernel/src/mm/vm/mapping/api.rs b/kernel/src/mm/vm/mapping/api.rs index ad6ed2cdc..52a72d002 100644 --- a/kernel/src/mm/vm/mapping/api.rs +++ b/kernel/src/mm/vm/mapping/api.rs @@ -6,6 +6,7 @@ use crate::address::{PhysAddr, VirtAddr}; use crate::error::SvsmError; +use crate::globalbox_upcast; use crate::locking::{RWLock, ReadLockGuard, WriteLockGuard}; use crate::mm::pagetable::PTEntryFlags; use crate::mm::vm::VMR; @@ -22,7 +23,6 @@ use intrusive_collections::{ use core::ops::Range; extern crate alloc; -use alloc::boxed::Box; use alloc::sync::Arc; /// Information required to resolve a page fault within a virtual mapping @@ -143,27 +143,28 @@ pub trait VirtualMapping: fmt::Debug { #[derive(Debug)] pub struct Mapping { - mapping: RWLock>, + mapping: RWLock>, } unsafe impl Send for Mapping {} unsafe impl Sync for Mapping {} impl Mapping { - pub fn new(mapping: T) -> Self + pub fn new(mapping: T) -> Result where T: VirtualMapping + 'static, { - Mapping { - mapping: RWLock::new(Box::new(mapping)), - } + let boxed = globalbox_upcast!(GlobalBox::try_new(mapping)?, VirtualMapping); + Ok(Self { + mapping: RWLock::new(boxed), + }) } - pub fn get(&self) -> ReadLockGuard<'_, Box> { + pub fn get(&self) -> ReadLockGuard<'_, GlobalBox> { self.mapping.lock_read() } - pub fn get_mut(&self) -> WriteLockGuard<'_, Box> { + pub fn get_mut(&self) -> WriteLockGuard<'_, GlobalBox> { self.mapping.lock_write() } } @@ -231,11 +232,11 @@ impl VMM { ) } - pub fn get_mapping(&self) -> ReadLockGuard<'_, Box> { + pub fn get_mapping(&self) -> ReadLockGuard<'_, GlobalBox> { self.mapping.get() } - pub fn get_mapping_mut(&self) -> WriteLockGuard<'_, Box> { + pub fn get_mapping_mut(&self) -> WriteLockGuard<'_, GlobalBox> { self.mapping.get_mut() } diff --git a/kernel/src/mm/vm/mapping/file_mapping.rs b/kernel/src/mm/vm/mapping/file_mapping.rs index 411a2afa6..11ed82017 100644 --- a/kernel/src/mm/vm/mapping/file_mapping.rs +++ b/kernel/src/mm/vm/mapping/file_mapping.rs @@ -153,7 +153,7 @@ fn copy_page( ) -> Result<(), SvsmError> { let page_size = usize::from(page_size); let temp_map = VMPhysMem::new(paddr_dst, page_size, true); - let vaddr_new_page = vmr.insert(Arc::new(Mapping::new(temp_map)))?; + let vaddr_new_page = vmr.insert(Arc::new(Mapping::new(temp_map)?))?; let slice = unsafe { from_raw_parts_mut(vaddr_new_page.as_mut_ptr::(), page_size) }; file.seek(offset); file.read(slice)?; diff --git a/kernel/src/mm/vm/mapping/kernel_stack.rs b/kernel/src/mm/vm/mapping/kernel_stack.rs index bf7da012c..dfcad17a4 100644 --- a/kernel/src/mm/vm/mapping/kernel_stack.rs +++ b/kernel/src/mm/vm/mapping/kernel_stack.rs @@ -101,7 +101,7 @@ impl VMKernelStack { /// /// Initialized Mapping to stack on success, Err(SvsmError::Mem) on error pub fn new_mapping() -> Result { - Ok(Mapping::new(Self::new()?)) + Mapping::new(Self::new()?) } fn alloc_pages(&mut self) -> Result<(), SvsmError> { diff --git a/kernel/src/mm/vm/mapping/phys_mem.rs b/kernel/src/mm/vm/mapping/phys_mem.rs index a86017413..6680bd3e1 100644 --- a/kernel/src/mm/vm/mapping/phys_mem.rs +++ b/kernel/src/mm/vm/mapping/phys_mem.rs @@ -5,6 +5,7 @@ // Author: Joerg Roedel use crate::address::{Address, PhysAddr}; +use crate::error::SvsmError; use crate::mm::pagetable::PTEntryFlags; use super::{Mapping, VirtualMapping}; @@ -51,7 +52,7 @@ impl VMPhysMem { /// # Returns /// /// New [`Mapping`] containing [`VMPhysMem`] - pub fn new_mapping(base: PhysAddr, size: usize, writable: bool) -> Mapping { + pub fn new_mapping(base: PhysAddr, size: usize, writable: bool) -> Result { Mapping::new(Self::new(base, size, writable)) } } diff --git a/kernel/src/mm/vm/mapping/reserved.rs b/kernel/src/mm/vm/mapping/reserved.rs index 5bd9b298b..62441f349 100644 --- a/kernel/src/mm/vm/mapping/reserved.rs +++ b/kernel/src/mm/vm/mapping/reserved.rs @@ -5,6 +5,7 @@ // Author: Joerg Roedel use crate::address::PhysAddr; +use crate::error::SvsmError; use crate::mm::pagetable::PTEntryFlags; use super::{Mapping, VirtualMapping}; @@ -41,7 +42,7 @@ impl VMReserved { /// # Returns /// /// New Mapping of VMReserved - pub fn new_mapping(size: usize) -> Mapping { + pub fn new_mapping(size: usize) -> Result { Mapping::new(Self::new(size)) } } diff --git a/kernel/src/mm/vm/mapping/vmalloc.rs b/kernel/src/mm/vm/mapping/vmalloc.rs index 41a3090fb..a5ea5b8c8 100644 --- a/kernel/src/mm/vm/mapping/vmalloc.rs +++ b/kernel/src/mm/vm/mapping/vmalloc.rs @@ -49,7 +49,7 @@ impl VMalloc { /// /// New [`Mapping`] on success, Err(SvsmError::Mem) on error pub fn new_mapping(size: usize) -> Result { - Ok(Mapping::new(Self::new(size)?)) + Mapping::new(Self::new(size)?) } fn alloc_pages(&mut self) -> Result<(), SvsmError> { diff --git a/kernel/src/task/tasks.rs b/kernel/src/task/tasks.rs index adbd51c97..3e78917e2 100644 --- a/kernel/src/task/tasks.rs +++ b/kernel/src/task/tasks.rs @@ -266,7 +266,7 @@ impl Task { let stack = VMKernelStack::new()?; let bounds = stack.bounds(VirtAddr::from(0u64)); - let mapping = Arc::new(Mapping::new(stack)); + let mapping = Arc::new(Mapping::new(stack)?); let percpu_mapping = cpu.new_mapping(mapping.clone())?; // We need to setup a context on the stack that matches the stack layout From 6a05132f6468b78dd2cd503dab8869b65680e1aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20L=C3=B3pez?= Date: Wed, 31 Jan 2024 14:55:01 +0100 Subject: [PATCH 9/9] sev/secrets_page: replace Box with GlobalBox MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the new fallible-allocation-aware GlobalBox type when allocating a new secrets page for the given VPML. Signed-off-by: Carlos López --- kernel/src/sev/secrets_page.rs | 11 +++++------ kernel/src/svsm.rs | 2 +- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/kernel/src/sev/secrets_page.rs b/kernel/src/sev/secrets_page.rs index 7ad773ff1..afe24762f 100644 --- a/kernel/src/sev/secrets_page.rs +++ b/kernel/src/sev/secrets_page.rs @@ -5,13 +5,12 @@ // Author: Joerg Roedel use crate::address::VirtAddr; +use crate::error::SvsmError; use crate::locking::{RWLock, ReadLockGuard, WriteLockGuard}; +use crate::mm::GlobalBox; use crate::sev::vmsa::VMPL_MAX; use crate::types::GUEST_VMPL; -extern crate alloc; -use alloc::boxed::Box; - pub const VMPCK_SIZE: usize = 32; #[derive(Copy, Clone, Debug)] @@ -73,13 +72,13 @@ impl SecretsPage { } } - pub fn copy_for_vmpl(&self, vmpl: usize) -> Box { - let mut sp = Box::new(*self); + pub fn copy_for_vmpl(&self, vmpl: usize) -> Result, SvsmError> { + let mut sp = GlobalBox::try_new(*self)?; for idx in 0..vmpl { sp.clear_vmpck(idx); } - sp + Ok(sp) } pub fn set_svsm_data(&mut self, base: u64, size: u64, caa_addr: u64) { diff --git a/kernel/src/svsm.rs b/kernel/src/svsm.rs index 08ce3afbf..3dc470021 100755 --- a/kernel/src/svsm.rs +++ b/kernel/src/svsm.rs @@ -125,7 +125,7 @@ fn copy_secrets_page_to_fw(fw_addr: PhysAddr, caa_addr: PhysAddr) -> Result<(), zero_mem_region(start, start + PAGE_SIZE); // Copy secrets page - let mut fw_secrets_page = secrets_page().copy_for_vmpl(GUEST_VMPL); + let mut fw_secrets_page = secrets_page().copy_for_vmpl(GUEST_VMPL)?; let &li = &*LAUNCH_INFO;