From c7f8c32bf318897178795cb09ddf45a065535c3e Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Tue, 16 Jul 2024 02:01:39 +1200 Subject: [PATCH] Revert "Add custom global heap allocator and documentation for clarity." This reverts commit d9650448ac4602d5a3e0f9a7e98fc99f775cc6e4. --- hypervisor/src/allocator.rs | 333 +++++++++++++++++++ hypervisor/src/global_const.rs | 4 - hypervisor/src/heap.rs | 64 ---- hypervisor/src/intel/hooks/hook_manager.rs | 12 +- hypervisor/src/intel/hooks/memory_manager.rs | 2 +- hypervisor/src/lib.rs | 2 +- uefi/Cargo.toml | 1 - uefi/src/main.rs | 21 +- uefi/src/setup.rs | 2 +- 9 files changed, 348 insertions(+), 93 deletions(-) create mode 100644 hypervisor/src/allocator.rs delete mode 100644 hypervisor/src/heap.rs diff --git a/hypervisor/src/allocator.rs b/hypervisor/src/allocator.rs new file mode 100644 index 0000000..db0e028 --- /dev/null +++ b/hypervisor/src/allocator.rs @@ -0,0 +1,333 @@ +//! This module provides a global allocator using a linked list heap allocation strategy. +//! The allocator is initialized with a fixed-size memory pool and supports memory allocation, +//! deallocation, and reallocation operations. The allocator tracks memory usage and provides +//! debugging information. + +use { + crate::global_const::TOTAL_HEAP_SIZE, + alloc::boxed::Box, + core::{ + alloc::{GlobalAlloc, Layout}, + ptr, + }, + log::debug, + spin::Mutex, +}; + +/// Global allocator instance with a heap size of `HEAP_SIZE`. +#[global_allocator] +pub static mut HEAP: ListHeap = ListHeap::new(); + +/// Initializes the linked list heap. +pub unsafe fn heap_init() { + HEAP.reset(); +} + +/// A heap allocator based on a linked list of free chunks. +/// +/// This struct manages a heap of a fixed size using a linked list +/// of free chunks. It supports memory allocation, deallocation, and +/// reallocation. +#[repr(align(0x10))] +pub struct ListHeap(core::mem::MaybeUninit<[u8; SIZE]>); + +/// Static mutex to ensure thread safety during allocation and deallocation. +static ALLOCATOR_MUTEX: Mutex<()> = Mutex::new(()); + +impl ListHeap { + /// Creates a new, uninitialized ListHeap. + /// + /// # Returns + /// + /// A new instance of `ListHeap`. + pub const fn new() -> Self { + Self(core::mem::MaybeUninit::uninit()) + } + + /// Returns the heap as a slice. + /// + /// # Returns + /// + /// A slice representing the heap. + pub fn as_slice(&self) -> &[u8] { + unsafe { &self.0.assume_init_ref()[..] } + } + + /// Resets the heap to its default state. This must be called at the start. + /// + /// # Safety + /// + /// This function is unsafe because it must be called exactly once before any allocations are made. + pub unsafe fn reset(&mut self) { + let start = self.first_link(); + let last = self.last_link(); + (&mut *start).size = 0; + (&mut *start).next = last; + (&mut *last).size = 0; + (&mut *last).next = last; + } + + /// Returns the first link in the heap. + /// + /// # Returns + /// + /// A pointer to the first link in the heap. + fn first_link(&self) -> *mut Link { + self.0.as_ptr() as *mut _ + } + + /// Returns the last link in the heap. + /// + /// # Returns + /// + /// A pointer to the last link in the heap. + fn last_link(&self) -> *mut Link { + unsafe { (self.0.as_ptr() as *const u8).add(SIZE).sub(Link::SIZE) as *mut _ } + } + + /// Debugging function to print the current state of the heap. + pub fn _debug(&self) { + unsafe { + let mut total_freespace = 0usize; + let mut total_allocations = 0usize; + let mut total_allocation_size = 0usize; + + let mut max_freespace = 0usize; + let mut largest_allocation = 0usize; + + let mut link = self.first_link(); + while (*link).next != link { + let free = (&*link).free_space() as usize; + let used = (&*link).size as usize; + + total_allocations += 1; + total_allocation_size += used; + total_freespace += free; + max_freespace = max_freespace.max(free); + largest_allocation = largest_allocation.max(used); + + link = (*link).next; + } + + // Skip the first link + total_allocations -= 1; + + let wasted = (total_allocations + 2) * Link::SIZE; + debug!("Total Heap Size: 0x{:X}", SIZE); + debug!("Space wasted on memory management: 0x{wasted:X} bytes"); + debug!("Total memory allocated: 0x{total_allocation_size:X} bytes"); + debug!("Total memory available: 0x{total_freespace:X} bytes"); + debug!("Largest allocated buffer: 0x{largest_allocation:X} bytes"); + debug!("Largest available buffer: 0x{max_freespace:X} bytes"); + debug!("Total allocation count: 0x{total_allocations:X}"); + } + } +} + +/// A structure representing a link in a linked list heap. +/// +/// This struct is used to manage free and allocated memory chunks in the heap. +/// Each link points to the next chunk and tracks the size of the current chunk. +#[repr(C, align(0x10))] +struct Link { + /// Pointer to the next link in the list. + next: *mut Link, + /// Size of the current chunk. + size: isize, +} + +impl Link { + const SIZE: usize = size_of::(); + const ALIGN: usize = align_of::(); + + /// Gets the start of the buffer. + /// + /// # Returns + /// + /// The start position of the buffer. + pub fn position(&self) -> usize { + self as *const _ as usize + Link::SIZE + } + + /// Checks if the link is the last in the list. + /// + /// # Returns + /// + /// `true` if the link is the last, `false` otherwise. + pub fn is_last(&self) -> bool { + self.next as *const _ == self + } + + /// Returns the maximum size available for allocation. + /// + /// # Returns + /// + /// The maximum size available for allocation. + pub fn max_size(&self) -> isize { + (self.next as usize - self.position()) as isize + } + + /// Returns the free space available for allocation. + /// + /// # Returns + /// + /// The free space available for allocation. + pub fn free_space(&self) -> isize { + self.max_size() - self.size + } + + /// Returns the start position of the free space. + /// + /// # Returns + /// + /// The start position of the free space. + pub fn free_space_start(&self) -> usize { + self.position() + self.size as usize + } +} + +unsafe impl GlobalAlloc for ListHeap { + /// Allocates memory from the linked list heap. + /// + /// # Arguments + /// + /// * `layout` - The layout of the memory to be allocated. + /// + /// # Returns + /// + /// A pointer to the allocated memory. + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + let _guard = ALLOCATOR_MUTEX.lock(); // Ensure thread safety + + let mut link = self.first_link(); + + // The required alignment and size for this type + // We don't support alignments less than 0x10 because of the Link + let required_align = layout.align().max(Link::ALIGN); + let required_size = layout.size() as isize; + + while !(&*link).is_last() { + if ((*link).next as usize) < (&*link).position() { + debug!("Last: {:p}", self.last_link()); + debug!("link: {:p}", link); + debug!("next: {:p}", (*link).next); + debug!("size: 0x{:x}", (*link).size); + } + + if (&*link).free_space() > required_size { + // The effective size and start address after we account for our link + let effective_start = (&*link).free_space_start() + Link::SIZE; + let effective_size = (&*link).free_space() - Link::SIZE as isize; + + // Align the pointer, and adjust the size to account for the bytes we lost + let mask = required_align - 1; + let aligned_pointer = (effective_start + mask) & !mask; + let aligned_size = effective_size - (aligned_pointer - effective_start) as isize; + + // If the required size is less than the effective size after alignment, use it + if required_size < aligned_size { + let new_link = (aligned_pointer - Link::SIZE) as *mut Link; + (&mut *new_link).next = (&mut *link).next; + (&mut *new_link).size = required_size; + (&mut *link).next = new_link; + + return aligned_pointer as *mut _; + } + } + + // Not enough room, keep looking + link = (&mut *link).next; + } + + self._debug(); + // No free memory for this allocation + 0 as *mut _ + } + + /// Deallocates memory within the linked list heap. + /// + /// # Arguments + /// + /// * `ptr` - A pointer to the memory to be deallocated. + /// * `layout` - The layout of the memory to be deallocated. + unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { + if ptr.is_null() { + return; + } + let _guard = ALLOCATOR_MUTEX.lock(); // Ensure thread safety + + let link = &mut *(ptr.sub(size_of::()) as *mut Link); + + // Sanity check, don't deallocate the last link + if link.is_last() { + return; + } + + // Find the previous link + let mut prev = self.first_link(); + while (&*prev).next != link { + prev = (&*prev).next; + } + + // Remove the link from the list, and it's deallocated + (&mut *prev).next = link.next; + } + + /// Tries to grow the current allocator if it can, + /// if not just reallocates and copies the buffer to the new allocation. + /// + /// # Arguments + /// + /// * `ptr` - A pointer to the memory to be reallocated. + /// * `layout` - The current layout of the memory. + /// * `new_size` - The new size of the memory to be allocated. + /// + /// # Returns + /// + /// A pointer to the reallocated memory. + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + let _guard = ALLOCATOR_MUTEX.lock(); // Ensure thread safety + + let link = &mut *(ptr.sub(size_of::()) as *mut Link); + + // Just resize the buffer + if link.max_size() > new_size as isize { + link.size = new_size as isize; + return ptr; + } + + // Construct the new layout and try to allocate it + let nlayout = Layout::from_size_align_unchecked(new_size, layout.align()); + let new_ptr = self.alloc(nlayout); + + // Failed to allocate a new buffer, don't alter original data and abort + if new_ptr.is_null() { + return new_ptr; + } + + // Copy data to the new array + ptr::copy_nonoverlapping(ptr, new_ptr, layout.size()); + + self.dealloc(ptr, layout); + + new_ptr + } +} + +/// Allocates and zeros memory for a given type, returning a boxed instance. +/// +/// # Safety +/// +/// This function allocates memory and initializes it to zero. It must be called +/// in a safe context where allocation errors and uninitialized memory access are handled. +/// +/// # Returns +/// +/// Returns a `Box` pointing to the zero-initialized memory of type `T`. +/// +/// # Panics +/// +/// Panics if memory allocation fails. +pub unsafe fn box_zeroed() -> Box { + unsafe { Box::::new_zeroed().assume_init() } +} diff --git a/hypervisor/src/global_const.rs b/hypervisor/src/global_const.rs index 908fc7a..3cddc48 100644 --- a/hypervisor/src/global_const.rs +++ b/hypervisor/src/global_const.rs @@ -1,9 +1,5 @@ use {crate::intel::vm::Vm, core::mem::size_of}; -/// Maximum number of stack memory allocations that can be recorded per logical processor (128), -/// plus one additional allocation for the image base itself. -pub const MAX_RECORDABLE_STACK_ALLOCATIONS: usize = 128 + 1; - /// Number of stack pages per logical processor. /// Includes size of `Vm` in pages plus 0x1000 (4096) pages for padding. /// - Size of `Vm`: 1027 pages (0x403 pages). diff --git a/hypervisor/src/heap.rs b/hypervisor/src/heap.rs deleted file mode 100644 index da1f0dc..0000000 --- a/hypervisor/src/heap.rs +++ /dev/null @@ -1,64 +0,0 @@ -//! This crate provides a fixed-size heap implementation and utilities for memory allocation in Rust. -//! It includes a zero-initialized memory allocation function for safe and efficient memory management. - -use {crate::global_const::TOTAL_HEAP_SIZE, alloc::boxed::Box}; - -/// A static mutable heap of fixed size. This heap is used for memory allocation. -/// -/// # Safety -/// -/// This static mutable heap should be used with caution. Ensure proper synchronization -/// if accessed from multiple threads. -pub static mut HEAP: Heap = Heap::new(); - -/// A heap structure with a fixed size, aligned to 4096 bytes. -/// -/// This structure represents a heap with a fixed size, which can be used for -/// memory allocations within the hypervisor or other low-level system components. -/// -/// # Generics -/// -/// - `SIZE`: The size of the heap in bytes. -#[repr(C, align(4096))] -pub struct Heap { - /// The underlying byte array representing the heap memory. - heap: [u8; SIZE], -} - -impl Heap { - /// Creates a new instance of the heap, initialized to zero. - /// - /// # Returns - /// - /// Returns a new `Heap` instance with the specified size. - pub const fn new() -> Self { - Self { heap: [0u8; SIZE] } - } - - /// Returns a mutable pointer to the heap. - /// - /// # Returns - /// - /// Returns a mutable pointer to the `Heap` instance. - pub const fn as_mut_ptr(&mut self) -> *mut Heap { - self - } -} - -/// Allocates and zeros memory for a given type, returning a boxed instance. -/// -/// # Safety -/// -/// This function allocates memory and initializes it to zero. It must be called -/// in a safe context where allocation errors and uninitialized memory access are handled. -/// -/// # Returns -/// -/// Returns a `Box` pointing to the zero-initialized memory of type `T`. -/// -/// # Panics -/// -/// Panics if memory allocation fails. -pub unsafe fn box_zeroed() -> Box { - unsafe { Box::::new_zeroed().assume_init() } -} diff --git a/hypervisor/src/intel/hooks/hook_manager.rs b/hypervisor/src/intel/hooks/hook_manager.rs index deb11e2..b15c839 100644 --- a/hypervisor/src/intel/hooks/hook_manager.rs +++ b/hypervisor/src/intel/hooks/hook_manager.rs @@ -1,7 +1,6 @@ use { crate::{ error::HypervisorError, - global_const::MAX_RECORDABLE_STACK_ALLOCATIONS, intel::{ addresses::PhysicalAddress, bitmap::{MsrAccessType, MsrBitmap, MsrOperation}, @@ -68,8 +67,7 @@ pub struct HookManager { /// KiSetCacheInformation -> KiSetCacheInformationIntel -> KiSetStandardizedCacheInformation -> __cpuid(4, 0) pub has_cpuid_cache_info_been_called: bool, - /// A vector for tracking stack memory allocations. Each entry contains the base address and size of the allocation. - pub stack_memory_allocations: Vec<(usize, usize)>, + pub stack_memory: Vec<(usize, usize)>, } lazy_static! { @@ -91,7 +89,7 @@ lazy_static! { ntoskrnl_base_pa: 0, ntoskrnl_size: 0, has_cpuid_cache_info_been_called: false, - stack_memory_allocations: Vec::with_capacity(MAX_RECORDABLE_STACK_ALLOCATIONS), + stack_memory: Vec::with_capacity(128), }); } @@ -121,12 +119,12 @@ impl HookManager { /// * `start` - The start address of the memory allocation. /// * `size` - The size of the memory allocation. pub fn record_allocation(&mut self, start: usize, size: usize) { - self.stack_memory_allocations.push((start, size)); + self.stack_memory.push((start, size)); } /// Prints the allocated memory ranges for debugging purposes. pub fn print_allocated_memory(&self) { - self.stack_memory_allocations.iter().for_each(|(start, size)| { + self.stack_memory.iter().for_each(|(start, size)| { debug!("Memory Range: Start = {:#x}, Size = {:#x}", start, size); }); } @@ -220,7 +218,7 @@ impl HookManager { /// Returns `Ok(())` if the hooks were successfully installed, `Err(HypervisorError)` otherwise. pub fn hide_hypervisor_memory(&mut self, vm: &mut Vm, page_permissions: AccessType) -> Result<(), HypervisorError> { let pages: Vec = self - .stack_memory_allocations + .stack_memory .iter() .step_by(BASE_PAGE_SIZE) .map(|(start, _size)| *start as u64) diff --git a/hypervisor/src/intel/hooks/memory_manager.rs b/hypervisor/src/intel/hooks/memory_manager.rs index 70abc63..8ec92cf 100644 --- a/hypervisor/src/intel/hooks/memory_manager.rs +++ b/hypervisor/src/intel/hooks/memory_manager.rs @@ -4,8 +4,8 @@ use { crate::{ + allocator::box_zeroed, error::HypervisorError, - heap::box_zeroed, intel::{ept::Pt, hooks::hook_manager::EptHookType, page::Page}, }, alloc::{boxed::Box, collections::BTreeMap, vec::Vec}, diff --git a/hypervisor/src/lib.rs b/hypervisor/src/lib.rs index 4ce64c7..5c5cbc9 100644 --- a/hypervisor/src/lib.rs +++ b/hypervisor/src/lib.rs @@ -13,9 +13,9 @@ extern crate alloc; extern crate static_assertions; +pub mod allocator; pub mod error; pub mod global_const; -pub mod heap; pub mod intel; pub mod logger; pub mod vmm; diff --git a/uefi/Cargo.toml b/uefi/Cargo.toml index 6d1083c..c554f16 100644 --- a/uefi/Cargo.toml +++ b/uefi/Cargo.toml @@ -15,5 +15,4 @@ log = { version = "0.4.20", default-features = false } # https://crates.io/crate once_cell = "1.19.0" # https://crates.io/crates/once_cell spin = "0.9" # https://crates.io/crates/spin com_logger = "0.1.1" # https://crates.io/crates/com_logger -axalloc = { git = "https://github.com/arceos-org/arceos.git", features = ["slab"] } # https://github.com/arceos-org/arceos.git hypervisor = { path = "../hypervisor" } \ No newline at end of file diff --git a/uefi/src/main.rs b/uefi/src/main.rs index d2b6f4b..def736d 100644 --- a/uefi/src/main.rs +++ b/uefi/src/main.rs @@ -10,10 +10,8 @@ extern crate alloc; use { crate::{processor::start_hypervisor_on_all_processors, setup::setup, stack::init}, - axalloc::GlobalAllocator, hypervisor::{ - global_const::TOTAL_HEAP_SIZE, - heap::HEAP, + allocator::heap_init, logger::{self, SerialPort}, }, log::*, @@ -44,9 +42,6 @@ fn panic_handler(info: &core::panic::PanicInfo) -> ! { loop {} } -#[global_allocator] -static GLOBAL_ALLOCATOR: GlobalAllocator = GlobalAllocator::new(); - /// Entry point for the UEFI application. /// /// Initializes logging, UEFI services, and attempts to start the hypervisor on all processors. @@ -62,19 +57,17 @@ static GLOBAL_ALLOCATOR: GlobalAllocator = GlobalAllocator::new(); /// or `Status::ABORTED` if the hypervisor fails to install. #[entry] fn main(_image_handle: Handle, mut system_table: SystemTable) -> Status { - // Initialize logging with the COM1 port and set the level filter. - logger::init(SerialPort::COM1, LevelFilter::Info); - info!("The Matrix is an illusion"); - unsafe { - // Initialize the global heap allocator. - GLOBAL_ALLOCATOR.init(HEAP.as_mut_ptr() as usize, TOTAL_HEAP_SIZE); - // Initialize the stack allocator. init(&mut system_table); + // Initialize the global heap allocator. + heap_init(); } - debug!("Heap size: {:#x}", TOTAL_HEAP_SIZE); + // Initialize logging with the COM2 port and set the level filter to Debug. + logger::init(SerialPort::COM1, LevelFilter::Info); + + info!("The Matrix is an illusion"); let boot_services = system_table.boot_services(); diff --git a/uefi/src/setup.rs b/uefi/src/setup.rs index b492287..7736c62 100644 --- a/uefi/src/setup.rs +++ b/uefi/src/setup.rs @@ -5,7 +5,7 @@ use { alloc::boxed::Box, hypervisor::{ - heap::box_zeroed, + allocator::box_zeroed, intel::{ hooks::hook_manager::{HookManager, SHARED_HOOK_MANAGER}, page::Page,