Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

alloc: initial TryBox implementation #196

Draft
wants to merge 9 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
918 changes: 918 additions & 0 deletions kernel/src/alloc/boxed.rs

Large diffs are not rendered by default.

409 changes: 409 additions & 0 deletions kernel/src/alloc/mod.rs

Large diffs are not rendered by default.

195 changes: 195 additions & 0 deletions kernel/src/alloc/unique.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,195 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
//
// Copyright (C) 2023 SUSE
//
// Author: Carlos López <[email protected]>

use core::convert::From;
use core::fmt;
use core::marker::PhantomData;
use core::ptr::NonNull;

/// A wrapper around a raw non-null `*mut T` that indicates that the possessor
/// of this wrapper owns the referent. Useful for building abstractions like
/// `Box<T>`, `Vec<T>`, `String`, and `HashMap<K, V>`.
///
/// Unlike `*mut T`, `Unique<T>` behaves "as if" it were an instance of `T`.
/// It implements `Send`/`Sync` if `T` is `Send`/`Sync`. It also implies
/// the kind of strong aliasing guarantees an instance of `T` can expect:
/// the referent of the pointer should not be modified without a unique path to
/// its owning Unique.
///
/// If you're uncertain of whether it's correct to use `Unique` for your purposes,
/// consider using `NonNull`, which has weaker semantics.
///
/// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
/// is never dereferenced. This is so that enums may use this forbidden value
/// as a discriminant -- `Option<Unique<T>>` has the same size as `Unique<T>`.
/// However the pointer may still dangle if it isn't dereferenced.
///
/// Unlike `*mut T`, `Unique<T>` is covariant over `T`. This should always be correct
/// for any type which upholds Unique's aliasing requirements.
#[repr(transparent)]
// Lang item used experimentally by Miri to define the semantics of `Unique`.
pub struct Unique<T: ?Sized> {
pointer: NonNull<T>,
// NOTE: this marker has no consequences for variance, but is necessary
// for dropck to understand that we logically own a `T`.
//
// For details, see:
// https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data
_marker: PhantomData<T>,
}

/// `Unique` pointers are `Send` if `T` is `Send` because the data they
/// reference is unaliased. Note that this aliasing invariant is
/// unenforced by the type system; the abstraction using the
/// `Unique` must enforce it.
unsafe impl<T: Send + ?Sized> Send for Unique<T> {}

/// `Unique` pointers are `Sync` if `T` is `Sync` because the data they
/// reference is unaliased. Note that this aliasing invariant is
/// unenforced by the type system; the abstraction using the
/// `Unique` must enforce it.
unsafe impl<T: Sync + ?Sized> Sync for Unique<T> {}

impl<T: Sized> Unique<T> {
/// Creates a new `Unique` that is dangling, but well-aligned.
///
/// This is useful for initializing types which lazily allocate, like
/// `Vec::new` does.
///
/// Note that the pointer value may potentially represent a valid pointer to
/// a `T`, which means this must not be used as a "not yet initialized"
/// sentinel value. Types that lazily allocate must track initialization by
/// some other means.
#[must_use]
#[inline]
pub const fn dangling() -> Self {
// FIXME(const-hack) replace with `From`
Unique {
pointer: NonNull::dangling(),
_marker: PhantomData,
}
}
}

impl<T: ?Sized> Unique<T> {
/// Creates a new `Unique`.
///
/// # Safety
///
/// `ptr` must be non-null.
#[inline]
pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
// SAFETY: the caller must guarantee that `ptr` is non-null.
unsafe {
Unique {
pointer: NonNull::new_unchecked(ptr),
_marker: PhantomData,
}
}
}

/// Creates a new `Unique` if `ptr` is non-null.
#[inline]
pub fn new(ptr: *mut T) -> Option<Self> {
NonNull::new(ptr).map(|pointer| Unique {
pointer,
_marker: PhantomData,
})
}

/// Acquires the underlying `*mut` pointer.
#[must_use = "`self` will be dropped if the result is not used"]
#[inline]
pub const fn as_ptr(self) -> *mut T {
self.pointer.as_ptr()
}

/// Dereferences the content.
///
/// The resulting lifetime is bound to self so this behaves "as if"
/// it were actually an instance of T that is getting borrowed. If a longer
/// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`.
#[must_use]
#[inline]
pub const unsafe fn as_ref(&self) -> &T {
// SAFETY: the caller must guarantee that `self` meets all the
// requirements for a reference.
unsafe { self.pointer.as_ref() }
}

/// Mutably dereferences the content.
///
/// The resulting lifetime is bound to self so this behaves "as if"
/// it were actually an instance of T that is getting borrowed. If a longer
/// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`.
#[must_use]
#[inline]
pub unsafe fn as_mut(&mut self) -> &mut T {
// SAFETY: the caller must guarantee that `self` meets all the
// requirements for a mutable reference.
unsafe { self.pointer.as_mut() }
}

/// Casts to a pointer of another type.
#[must_use = "`self` will be dropped if the result is not used"]
#[inline]
pub const fn cast<U>(self) -> Unique<U> {
// FIXME(const-hack): replace with `From`
// SAFETY: is `NonNull`
unsafe { Unique::new_unchecked(self.pointer.cast().as_ptr()) }
}
}

impl<T: ?Sized> Clone for Unique<T> {
#[inline]
fn clone(&self) -> Self {
*self
}
}

impl<T: ?Sized> Copy for Unique<T> {}

impl<T: ?Sized> fmt::Debug for Unique<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Pointer::fmt(&self.as_ptr(), f)
}
}

impl<T: ?Sized> fmt::Pointer for Unique<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Pointer::fmt(&self.as_ptr(), f)
}
}

impl<T: ?Sized> From<&mut T> for Unique<T> {
/// Converts a `&mut T` to a `Unique<T>`.
///
/// This conversion is infallible since references cannot be null.
#[inline]
fn from(reference: &mut T) -> Self {
Self::from(NonNull::from(reference))
}
}

impl<T: ?Sized> From<NonNull<T>> for Unique<T> {
/// Converts a `NonNull<T>` to a `Unique<T>`.
///
/// This conversion is infallible since `NonNull` cannot be null.
#[inline]
fn from(pointer: NonNull<T>) -> Self {
Unique {
pointer,
_marker: PhantomData,
}
}
}

impl<T: ?Sized> From<Unique<T>> for NonNull<T> {
#[inline]
fn from(unique: Unique<T>) -> Self {
unique.pointer
}
}
12 changes: 6 additions & 6 deletions kernel/src/cpu/percpu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,7 @@ impl PerCpu {
fn allocate_stack(&mut self, base: VirtAddr) -> Result<VirtAddr, SvsmError> {
let stack = VMKernelStack::new()?;
let top_of_stack = stack.top_of_stack(base);
let mapping = Arc::new(Mapping::new(stack));
let mapping = Arc::new(Mapping::new(stack)?);

self.vm_range.insert_at(base, mapping)?;

Expand Down Expand Up @@ -406,20 +406,20 @@ impl PerCpu {
let vaddr = VirtAddr::from(self as *const PerCpu);
let paddr = virt_to_phys(vaddr);

let self_mapping = Arc::new(VMPhysMem::new_mapping(paddr, PAGE_SIZE, true));
let self_mapping = Arc::new(VMPhysMem::new_mapping(paddr, PAGE_SIZE, true)?);
self.vm_range.insert_at(SVSM_PERCPU_BASE, self_mapping)?;

Ok(())
}

fn initialize_vm_ranges(&mut self) -> Result<(), SvsmError> {
let size_4k = SVSM_PERCPU_TEMP_END_4K - SVSM_PERCPU_TEMP_BASE_4K;
let temp_mapping_4k = Arc::new(VMReserved::new_mapping(size_4k));
let temp_mapping_4k = Arc::new(VMReserved::new_mapping(size_4k)?);
self.vm_range
.insert_at(SVSM_PERCPU_TEMP_BASE_4K, temp_mapping_4k)?;

let size_2m = SVSM_PERCPU_TEMP_END_2M - SVSM_PERCPU_TEMP_BASE_2M;
let temp_mapping_2m = Arc::new(VMReserved::new_mapping(size_2m));
let temp_mapping_2m = Arc::new(VMReserved::new_mapping(size_2m)?);
self.vm_range
.insert_at(SVSM_PERCPU_TEMP_BASE_2M, temp_mapping_2m)?;

Expand Down Expand Up @@ -534,7 +534,7 @@ impl PerCpu {

pub fn map_guest_vmsa(&self, paddr: PhysAddr) -> Result<(), SvsmError> {
assert!(self.apic_id == this_cpu().get_apic_id());
let vmsa_mapping = Arc::new(VMPhysMem::new_mapping(paddr, PAGE_SIZE, true));
let vmsa_mapping = Arc::new(VMPhysMem::new_mapping(paddr, PAGE_SIZE, true)?);
self.vm_range
.insert_at(SVSM_PERCPU_VMSA_BASE, vmsa_mapping)?;

Expand Down Expand Up @@ -565,7 +565,7 @@ impl PerCpu {
pub fn map_guest_caa(&self, paddr: PhysAddr) -> Result<(), SvsmError> {
self.unmap_caa();

let caa_mapping = Arc::new(VMPhysMem::new_mapping(paddr, PAGE_SIZE, true));
let caa_mapping = Arc::new(VMPhysMem::new_mapping(paddr, PAGE_SIZE, true)?);
self.vm_range.insert_at(SVSM_PERCPU_CAA_BASE, caa_mapping)?;

Ok(())
Expand Down
5 changes: 4 additions & 1 deletion kernel/src/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
//
// Author: Carlos López <[email protected]>

use crate::alloc::TryAllocError;
use crate::cpu::vc::VcError;
use crate::fs::FsError;
use crate::fw_cfg::FwCfgError;
Expand All @@ -18,7 +19,7 @@ use crate::task::TaskError;
// containing a leaf error type, usually the one corresponding to
// that module. We always provide a way to convert a leaf error into
// a SvsmError via the From trait at the module level.
#[derive(Clone, Copy, Debug)]
#[derive(Clone, Debug)]
pub enum SvsmError {
// Errors related to GHCB
Ghcb(GhcbError),
Expand Down Expand Up @@ -46,6 +47,8 @@ pub enum SvsmError {
FileSystem(FsError),
// Task management errors,
Task(TaskError),
// Smart pointer errors
TryAlloc(TryAllocError),
// Errors from #VC handler
Vc(VcError),
}
21 changes: 10 additions & 11 deletions kernel/src/greq/driver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,18 +8,17 @@
//! request or response command types defined in the SEV-SNP spec, regardless if it's
//! a regular or an extended command.

extern crate alloc;

use alloc::boxed::Box;
use core::ptr::addr_of_mut;
use core::{cell::OnceCell, mem::size_of};

use crate::{
address::VirtAddr,
alloc::boxed::TryBox,
cpu::ghcb::current_ghcb,
error::SvsmError,
greq::msg::{SnpGuestRequestExtData, SnpGuestRequestMsg, SnpGuestRequestMsgType},
locking::SpinLock,
mm::GlobalBox,
protocols::errors::{SvsmReqError, SvsmResultCode},
sev::{ghcb::GhcbError, secrets_page, secrets_page_mut, VMPCK_SIZE},
types::PAGE_SHIFT,
Expand Down Expand Up @@ -48,14 +47,14 @@ enum SnpGuestRequestClass {
#[derive(Debug)]
struct SnpGuestRequestDriver {
/// Shared page used for the `SNP_GUEST_REQUEST` request
request: Box<SnpGuestRequestMsg>,
request: GlobalBox<SnpGuestRequestMsg>,
/// Shared page used for the `SNP_GUEST_REQUEST` response
response: Box<SnpGuestRequestMsg>,
response: GlobalBox<SnpGuestRequestMsg>,
/// Encrypted page where we perform crypto operations
staging: Box<SnpGuestRequestMsg>,
staging: GlobalBox<SnpGuestRequestMsg>,
/// Extended data buffer that will be provided to the hypervisor
/// to store the SEV-SNP certificates
ext_data: Box<SnpGuestRequestExtData>,
ext_data: GlobalBox<SnpGuestRequestExtData>,
/// Extended data size (`certs` size) provided by the user in [`super::services::get_extended_report`].
/// It will be provided to the hypervisor.
user_extdata_size: usize,
Expand Down Expand Up @@ -84,21 +83,21 @@ impl Drop for SnpGuestRequestDriver {
SnpGuestRequestMsg::boxed_new().expect("GREQ: failed to allocate request");
let old_req = core::mem::replace(&mut self.request, new_req);
log::error!("GREQ: request: failed to set page to encrypted. Memory leak!");
Box::leak(old_req);
TryBox::leak(old_req.into());
}
if self.response.set_encrypted().is_err() {
let new_resp =
SnpGuestRequestMsg::boxed_new().expect("GREQ: failed to allocate response");
let old_resp = core::mem::replace(&mut self.response, new_resp);
log::error!("GREQ: response: failed to set page to encrypted. Memory leak!");
Box::leak(old_resp);
TryBox::leak(old_resp.into());
}
if self.ext_data.set_encrypted().is_err() {
let new_data =
SnpGuestRequestExtData::boxed_new().expect("GREQ: failed to allocate ext_data");
let old_data = core::mem::replace(&mut self.ext_data, new_data);
log::error!("GREQ: ext_data: failed to set pages to encrypted. Memory leak!");
Box::leak(old_data);
TryBox::leak(old_data.into());
}
}
}
Expand Down Expand Up @@ -211,7 +210,7 @@ impl SnpGuestRequestDriver {
.staging
.decrypt_get(msg_type, msg_seqno, &vmpck0, buffer);

if let Err(e) = result {
if let Err(ref e) = result {
match e {
// The buffer provided is too small to store the unwrapped response.
// There is no need to clear the VMPCK0, just report it as invalid parameter.
Expand Down
Loading
Loading