From d1cf53d1b92058505ebdcde7a098ef0d2fb656ac Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 00:32:01 +1200 Subject: [PATCH 01/87] Create allocator.rs --- uefi/src/allocator.rs | 217 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 217 insertions(+) create mode 100644 uefi/src/allocator.rs diff --git a/uefi/src/allocator.rs b/uefi/src/allocator.rs new file mode 100644 index 0000000..ebeceac --- /dev/null +++ b/uefi/src/allocator.rs @@ -0,0 +1,217 @@ +//! This module provides a global allocator using UEFI's memory allocation functions. +//! It tracks memory usage and ensures thread-safe operations. + +use { + core::{ + alloc::{GlobalAlloc, Layout}, + ffi::c_void, + ptr, + sync::atomic::{AtomicPtr, AtomicU32, AtomicUsize, Ordering}, + }, + uefi::{ + proto::loaded_image::LoadedImage, + table::{ + boot::{BootServices, MemoryType}, + Boot, SystemTable, + }, + }, +}; + +/// The size of the heap in bytes. +const HEAP_SIZE: usize = 0x10000; + +/// Reference to the system table, used to call the boot services pool memory +/// allocation functions. +static SYSTEM_TABLE: AtomicPtr = AtomicPtr::new(ptr::null_mut()); + +/// The memory type used for pool memory allocations. +static MEMORY_TYPE: AtomicU32 = AtomicU32::new(MemoryType::LOADER_DATA.0); + +/// A global allocator that uses UEFI's pool allocation functions and tracks memory usage. +pub struct GlobalAllocator { + /// Atomic counter to track used memory. + used_memory: AtomicUsize, +} + +impl GlobalAllocator { + /// Creates a new, uninitialized GlobalAllocator. + /// + /// # Returns + /// + /// A new instance of `GlobalAllocator`. + pub const fn new() -> Self { + Self { + used_memory: AtomicUsize::new(0), + } + } + + /// Initializes the allocator and sets the system table. + /// + /// # Safety + /// + /// This function must be called exactly once before any allocations are made. + /// + /// # Arguments + /// + /// * `system_table` - A reference to the UEFI system table. + pub unsafe fn init(&self, system_table: &SystemTable) { + // Store the system table pointer for later use in allocation and deallocation. + SYSTEM_TABLE.store(system_table.as_ptr().cast_mut(), Ordering::Release); + + // Set the memory type based on the loaded image data type. + let boot_services = system_table.boot_services(); + if let Ok(loaded_image) = boot_services.open_protocol_exclusive::(boot_services.image_handle()) { + MEMORY_TYPE.store(loaded_image.data_type().0, Ordering::Release); + } + } + + /// Returns the amount of memory currently in use. + /// + /// # Returns + /// + /// The amount of memory currently in use, in bytes. + pub fn used(&self) -> usize { + self.used_memory.load(Ordering::SeqCst) + } + + /// Returns the amount of memory currently available. + /// + /// # Returns + /// + /// The amount of memory currently available, in bytes. + pub fn free(&self) -> usize { + HEAP_SIZE - self.used() + } + + /// Access the boot services. + /// + /// # Returns + /// + /// A reference to the boot services. + fn boot_services(&self) -> *const BootServices { + let ptr = SYSTEM_TABLE.load(Ordering::Acquire); + let system_table = unsafe { SystemTable::from_ptr(ptr) }.expect("The system table handle is not available"); + system_table.boot_services() + } +} + +/// Global allocator instance. +#[global_allocator] +static GLOBAL_ALLOCATOR: GlobalAllocator = GlobalAllocator::new(); + +unsafe impl GlobalAlloc for GlobalAllocator { + /// Allocates memory using UEFI's pool allocation functions. + /// + /// # Arguments + /// + /// * `layout` - The layout of the memory to be allocated. + /// + /// # Returns + /// + /// A pointer to the allocated memory. + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + let size = layout.size(); + let align = layout.align(); + let memory_type = MemoryType(MEMORY_TYPE.load(Ordering::Acquire)); + let boot_services = &*self.boot_services(); + + if align > 8 { + let full_alloc_ptr = if let Ok(ptr) = boot_services.allocate_pool(memory_type, size + align) { + ptr + } else { + return ptr::null_mut(); + }; + + let mut offset = full_alloc_ptr.align_offset(align); + if offset == 0 { + offset = align; + } + + let aligned_ptr = full_alloc_ptr.add(offset); + aligned_ptr.cast::<*mut u8>().sub(1).write(full_alloc_ptr); + self.used_memory.fetch_add(size, Ordering::SeqCst); + aligned_ptr + } else { + let alloc_ptr = boot_services.allocate_pool(memory_type, size).map(|ptr| ptr).unwrap_or(ptr::null_mut()); + if !alloc_ptr.is_null() { + self.used_memory.fetch_add(size, Ordering::SeqCst); + } + alloc_ptr + } + } + + /// Deallocates memory using UEFI's pool allocation functions. + /// + /// # Arguments + /// + /// * `ptr` - A pointer to the memory to be deallocated. + /// * `layout` - The layout of the memory to be deallocated. + unsafe fn dealloc(&self, mut ptr: *mut u8, layout: Layout) { + if layout.align() > 8 { + ptr = (ptr as *const *mut u8).sub(1).read(); + } + let boot_services = &*self.boot_services(); + boot_services.free_pool(ptr).unwrap(); + self.used_memory.fetch_sub(layout.size(), Ordering::SeqCst); + } + + /// Allocates zeroed memory using UEFI's pool allocation functions. + /// + /// # Arguments + /// + /// * `layout` - The layout of the memory to be allocated. + /// + /// # Returns + /// + /// A pointer to the allocated and zeroed memory. + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { + let ptr = self.alloc(layout); + if !ptr.is_null() { + ptr::write_bytes(ptr, 0, layout.size()); + } + ptr + } + + /// Reallocates memory using UEFI's pool allocation functions. + /// + /// # Arguments + /// + /// * `ptr` - A pointer to the memory to be reallocated. + /// * `layout` - The layout of the memory to be reallocated. + /// * `new_size` - The new size of the memory to be allocated. + /// + /// # Returns + /// + /// A pointer to the reallocated memory. + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + let new_ptr = self.alloc(Layout::from_size_align(new_size, layout.align()).unwrap()); + if !new_ptr.is_null() { + ptr::copy_nonoverlapping(ptr, new_ptr, layout.size()); + self.dealloc(ptr, layout); + } + new_ptr + } +} + +/// Initializes the global heap allocator with the UEFI system table. +/// +/// This function must be called before any memory allocation operations are performed. +/// +/// # Safety +/// +/// This function is unsafe because it must be called exactly once and must be called +/// before any allocations are made. +/// +/// # Arguments +/// +/// * `system_table` - A reference to the UEFI system table. +pub unsafe fn init_heap(system_table: &SystemTable) { + GLOBAL_ALLOCATOR.init(system_table); +} + +/// Notifies the allocator library that boot services are no longer available. +/// +/// This function must be called before exiting UEFI boot services. +pub fn exit_boot_services() { + SYSTEM_TABLE.store(ptr::null_mut(), Ordering::Release); +} From 9371194f1d697987895d353882b1e4f395b54b39 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 01:01:59 +1200 Subject: [PATCH 02/87] Update allocator.rs --- {uefi => hypervisor}/src/allocator.rs | 90 ++++++++++----------------- 1 file changed, 33 insertions(+), 57 deletions(-) rename {uefi => hypervisor}/src/allocator.rs (72%) diff --git a/uefi/src/allocator.rs b/hypervisor/src/allocator.rs similarity index 72% rename from uefi/src/allocator.rs rename to hypervisor/src/allocator.rs index ebeceac..f2fa867 100644 --- a/uefi/src/allocator.rs +++ b/hypervisor/src/allocator.rs @@ -18,7 +18,7 @@ use { }; /// The size of the heap in bytes. -const HEAP_SIZE: usize = 0x10000; +const HEAP_SIZE: usize = 0x8000; /// Reference to the system table, used to call the boot services pool memory /// allocation functions. @@ -31,6 +31,8 @@ static MEMORY_TYPE: AtomicU32 = AtomicU32::new(MemoryType::LOADER_DATA.0); pub struct GlobalAllocator { /// Atomic counter to track used memory. used_memory: AtomicUsize, + /// Base address of the allocated heap. + heap_base_address: AtomicUsize, } impl GlobalAllocator { @@ -42,6 +44,7 @@ impl GlobalAllocator { pub const fn new() -> Self { Self { used_memory: AtomicUsize::new(0), + heap_base_address: AtomicUsize::new(0), } } @@ -63,6 +66,13 @@ impl GlobalAllocator { if let Ok(loaded_image) = boot_services.open_protocol_exclusive::(boot_services.image_handle()) { MEMORY_TYPE.store(loaded_image.data_type().0, Ordering::Release); } + + // Allocate the initial heap pool and set the base address. + let heap_base = boot_services + .allocate_pool(MemoryType::LOADER_DATA, HEAP_SIZE) + .expect("Failed to allocate heap pool") as usize; + + self.heap_base_address.store(heap_base, Ordering::Release); } /// Returns the amount of memory currently in use. @@ -74,13 +84,13 @@ impl GlobalAllocator { self.used_memory.load(Ordering::SeqCst) } - /// Returns the amount of memory currently available. + /// Returns the base address of the heap. /// /// # Returns /// - /// The amount of memory currently available, in bytes. - pub fn free(&self) -> usize { - HEAP_SIZE - self.used() + /// The base address of the heap. + pub fn heap_base(&self) -> usize { + self.heap_base_address.load(Ordering::Acquire) } /// Access the boot services. @@ -97,7 +107,7 @@ impl GlobalAllocator { /// Global allocator instance. #[global_allocator] -static GLOBAL_ALLOCATOR: GlobalAllocator = GlobalAllocator::new(); +pub static GLOBAL_ALLOCATOR: GlobalAllocator = GlobalAllocator::new(); unsafe impl GlobalAlloc for GlobalAllocator { /// Allocates memory using UEFI's pool allocation functions. @@ -115,29 +125,32 @@ unsafe impl GlobalAlloc for GlobalAllocator { let memory_type = MemoryType(MEMORY_TYPE.load(Ordering::Acquire)); let boot_services = &*self.boot_services(); - if align > 8 { - let full_alloc_ptr = if let Ok(ptr) = boot_services.allocate_pool(memory_type, size + align) { - ptr - } else { - return ptr::null_mut(); - }; - + let ptr = if align > 8 { + let full_alloc_ptr = boot_services + .allocate_pool(memory_type, size + align) + .ok() + .map(|ptr| ptr) + .unwrap_or(ptr::null_mut()); let mut offset = full_alloc_ptr.align_offset(align); if offset == 0 { offset = align; } - let aligned_ptr = full_alloc_ptr.add(offset); aligned_ptr.cast::<*mut u8>().sub(1).write(full_alloc_ptr); - self.used_memory.fetch_add(size, Ordering::SeqCst); aligned_ptr } else { - let alloc_ptr = boot_services.allocate_pool(memory_type, size).map(|ptr| ptr).unwrap_or(ptr::null_mut()); - if !alloc_ptr.is_null() { - self.used_memory.fetch_add(size, Ordering::SeqCst); - } - alloc_ptr + boot_services + .allocate_pool(memory_type, size) + .ok() + .map(|ptr| ptr) + .unwrap_or(ptr::null_mut()) + }; + + if !ptr.is_null() { + self.used_memory.fetch_add(size, Ordering::SeqCst); } + + ptr } /// Deallocates memory using UEFI's pool allocation functions. @@ -154,43 +167,6 @@ unsafe impl GlobalAlloc for GlobalAllocator { boot_services.free_pool(ptr).unwrap(); self.used_memory.fetch_sub(layout.size(), Ordering::SeqCst); } - - /// Allocates zeroed memory using UEFI's pool allocation functions. - /// - /// # Arguments - /// - /// * `layout` - The layout of the memory to be allocated. - /// - /// # Returns - /// - /// A pointer to the allocated and zeroed memory. - unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { - let ptr = self.alloc(layout); - if !ptr.is_null() { - ptr::write_bytes(ptr, 0, layout.size()); - } - ptr - } - - /// Reallocates memory using UEFI's pool allocation functions. - /// - /// # Arguments - /// - /// * `ptr` - A pointer to the memory to be reallocated. - /// * `layout` - The layout of the memory to be reallocated. - /// * `new_size` - The new size of the memory to be allocated. - /// - /// # Returns - /// - /// A pointer to the reallocated memory. - unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { - let new_ptr = self.alloc(Layout::from_size_align(new_size, layout.align()).unwrap()); - if !new_ptr.is_null() { - ptr::copy_nonoverlapping(ptr, new_ptr, layout.size()); - self.dealloc(ptr, layout); - } - new_ptr - } } /// Initializes the global heap allocator with the UEFI system table. From 9af14ed928a37f76cf2f003f62fd6a714580e675 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 01:41:49 +1200 Subject: [PATCH 03/87] Update allocator.rs --- hypervisor/src/allocator.rs | 98 ++++++++++++++++--------------------- 1 file changed, 43 insertions(+), 55 deletions(-) diff --git a/hypervisor/src/allocator.rs b/hypervisor/src/allocator.rs index f2fa867..5981739 100644 --- a/hypervisor/src/allocator.rs +++ b/hypervisor/src/allocator.rs @@ -10,15 +10,12 @@ use { }, uefi::{ proto::loaded_image::LoadedImage, - table::{ - boot::{BootServices, MemoryType}, - Boot, SystemTable, - }, + table::{boot::MemoryType, Boot, SystemTable}, }, }; /// The size of the heap in bytes. -const HEAP_SIZE: usize = 0x8000; +const HEAP_SIZE: usize = 0x10000; /// Reference to the system table, used to call the boot services pool memory /// allocation functions. @@ -32,7 +29,9 @@ pub struct GlobalAllocator { /// Atomic counter to track used memory. used_memory: AtomicUsize, /// Base address of the allocated heap. - heap_base_address: AtomicUsize, + heap_base_address: AtomicPtr, + /// Size of the allocated heap. + heap_size: usize, } impl GlobalAllocator { @@ -44,7 +43,8 @@ impl GlobalAllocator { pub const fn new() -> Self { Self { used_memory: AtomicUsize::new(0), - heap_base_address: AtomicUsize::new(0), + heap_base_address: AtomicPtr::new(ptr::null_mut()), + heap_size: HEAP_SIZE, } } @@ -69,8 +69,8 @@ impl GlobalAllocator { // Allocate the initial heap pool and set the base address. let heap_base = boot_services - .allocate_pool(MemoryType::LOADER_DATA, HEAP_SIZE) - .expect("Failed to allocate heap pool") as usize; + .allocate_pool(MemoryType::LOADER_DATA, self.heap_size) + .expect("Failed to allocate heap pool"); self.heap_base_address.store(heap_base, Ordering::Release); } @@ -89,20 +89,9 @@ impl GlobalAllocator { /// # Returns /// /// The base address of the heap. - pub fn heap_base(&self) -> usize { + pub fn heap_base(&self) -> *mut u8 { self.heap_base_address.load(Ordering::Acquire) } - - /// Access the boot services. - /// - /// # Returns - /// - /// A reference to the boot services. - fn boot_services(&self) -> *const BootServices { - let ptr = SYSTEM_TABLE.load(Ordering::Acquire); - let system_table = unsafe { SystemTable::from_ptr(ptr) }.expect("The system table handle is not available"); - system_table.boot_services() - } } /// Global allocator instance. @@ -110,7 +99,7 @@ impl GlobalAllocator { pub static GLOBAL_ALLOCATOR: GlobalAllocator = GlobalAllocator::new(); unsafe impl GlobalAlloc for GlobalAllocator { - /// Allocates memory using UEFI's pool allocation functions. + /// Allocates memory from the pre-allocated heap. /// /// # Arguments /// @@ -122,51 +111,50 @@ unsafe impl GlobalAlloc for GlobalAllocator { unsafe fn alloc(&self, layout: Layout) -> *mut u8 { let size = layout.size(); let align = layout.align(); - let memory_type = MemoryType(MEMORY_TYPE.load(Ordering::Acquire)); - let boot_services = &*self.boot_services(); - - let ptr = if align > 8 { - let full_alloc_ptr = boot_services - .allocate_pool(memory_type, size + align) - .ok() - .map(|ptr| ptr) - .unwrap_or(ptr::null_mut()); - let mut offset = full_alloc_ptr.align_offset(align); - if offset == 0 { - offset = align; - } - let aligned_ptr = full_alloc_ptr.add(offset); - aligned_ptr.cast::<*mut u8>().sub(1).write(full_alloc_ptr); - aligned_ptr - } else { - boot_services - .allocate_pool(memory_type, size) - .ok() - .map(|ptr| ptr) - .unwrap_or(ptr::null_mut()) - }; - if !ptr.is_null() { - self.used_memory.fetch_add(size, Ordering::SeqCst); + // Ensure the alignment and size fit within the heap bounds + let used = self.used(); + let start = self.heap_base().add(used); + let aligned_start = start.add(start.align_offset(align)); + let end = aligned_start.add(size); + + if end > self.heap_base().add(self.heap_size) { + return ptr::null_mut(); // Out of memory } - ptr + self.used_memory.fetch_add(end as usize - start as usize, Ordering::SeqCst); + + aligned_start } - /// Deallocates memory using UEFI's pool allocation functions. + /// Deallocates memory within the pre-allocated heap. /// /// # Arguments /// /// * `ptr` - A pointer to the memory to be deallocated. /// * `layout` - The layout of the memory to be deallocated. - unsafe fn dealloc(&self, mut ptr: *mut u8, layout: Layout) { - if layout.align() > 8 { - ptr = (ptr as *const *mut u8).sub(1).read(); - } - let boot_services = &*self.boot_services(); - boot_services.free_pool(ptr).unwrap(); + unsafe fn dealloc(&self, _ptr: *mut u8, layout: Layout) { + // Note: In a simple bump allocator, deallocation is often a no-op. + // You might want to implement more complex free logic if needed. self.used_memory.fetch_sub(layout.size(), Ordering::SeqCst); } + + /// Allocates zeroed memory from the pre-allocated heap. + /// + /// # Arguments + /// + /// * `layout` - The layout of the memory to be allocated. + /// + /// # Returns + /// + /// A pointer to the allocated and zeroed memory. + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { + let ptr = self.alloc(layout); + if !ptr.is_null() { + ptr::write_bytes(ptr, 0, layout.size()); + } + ptr + } } /// Initializes the global heap allocator with the UEFI system table. From 51eb7c8e527bba3cbaa7f68871f50569de33c65a Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 01:59:02 +1200 Subject: [PATCH 04/87] Update allocator.rs --- hypervisor/src/allocator.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/hypervisor/src/allocator.rs b/hypervisor/src/allocator.rs index 5981739..f26bf2a 100644 --- a/hypervisor/src/allocator.rs +++ b/hypervisor/src/allocator.rs @@ -15,7 +15,7 @@ use { }; /// The size of the heap in bytes. -const HEAP_SIZE: usize = 0x10000; +const HEAP_SIZE: usize = 0x800000; // 4MB /// Reference to the system table, used to call the boot services pool memory /// allocation functions. @@ -111,18 +111,22 @@ unsafe impl GlobalAlloc for GlobalAllocator { unsafe fn alloc(&self, layout: Layout) -> *mut u8 { let size = layout.size(); let align = layout.align(); + log::debug!("Requested allocation: size = {:#x}, align = {:#x}", size, align); // Ensure the alignment and size fit within the heap bounds let used = self.used(); + log::debug!("Current used memory: {:#x}", used); let start = self.heap_base().add(used); let aligned_start = start.add(start.align_offset(align)); let end = aligned_start.add(size); if end > self.heap_base().add(self.heap_size) { + log::error!("Out of memory: requested end = {:#x}, heap end = {:#x}", end as usize, self.heap_base().add(self.heap_size) as usize); return ptr::null_mut(); // Out of memory } self.used_memory.fetch_add(end as usize - start as usize, Ordering::SeqCst); + log::debug!("Allocated memory: start = {:#x}, end = {:#x}", start as usize, end as usize); aligned_start } From e80f7d40f608a857f9e343b69081181680495c7c Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 01:59:21 +1200 Subject: [PATCH 05/87] Update Cargo.toml --- hypervisor/Cargo.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hypervisor/Cargo.toml b/hypervisor/Cargo.toml index e157e92..9fed21e 100644 --- a/hypervisor/Cargo.toml +++ b/hypervisor/Cargo.toml @@ -12,6 +12,8 @@ path = "src/lib.rs" [dependencies] x86 = "0.52.0" # https://crates.io/crates/x86 x86_64 = "0.15.0" # https://crates.io/crates/x86_64 +uefi = { version = "0.28.0", features = ["alloc"] } # https://crates.io/crates/uefi +#uefi-services = { version = "0.25.0", default-features = false } # https://crates.io/crates/uefi-services thiserror-no-std = "2.0.2" # https://crates.io/crates/thiserror-no-std bitfield = "0.15.0" # https://crates.io/crates/bitfield bit_field = "0.10.2" # https://crates.io/crates/bit_field From b67be747ce814000d4f35074c8a8e3619255d863 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 01:59:23 +1200 Subject: [PATCH 06/87] Update Cargo.toml --- uefi/Cargo.toml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/uefi/Cargo.toml b/uefi/Cargo.toml index f9daf7d..d455d71 100644 --- a/uefi/Cargo.toml +++ b/uefi/Cargo.toml @@ -10,10 +10,11 @@ name = "illusion" path = "src/main.rs" [dependencies] -uefi = { version = "0.28.0", features = ["global_allocator", "alloc"] } # https://crates.io/crates/uefi -uefi-services = { version = "0.25.0", default-features = false } # https://crates.io/crates/uefi-services +uefi = { version = "0.28.0", features = ["alloc"] } # https://crates.io/crates/uefi +#uefi-services = { version = "0.25.0", default-features = false } # https://crates.io/crates/uefi-services log = { version = "0.4.20", default-features = false } # https://crates.io/crates/log once_cell = "1.19.0" # https://crates.io/crates/once_cell spin = "0.9" # https://crates.io/crates/spin com_logger = "0.1.1" # https://crates.io/crates/com_logger +heapless = "0.8.0" # https://crates.io/crates/heapless hypervisor = { path = "../hypervisor" } \ No newline at end of file From b7aa062680b21340674d12b8932643a19ddfd02f Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 02:02:44 +1200 Subject: [PATCH 07/87] Update allocator.rs --- hypervisor/src/allocator.rs | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/hypervisor/src/allocator.rs b/hypervisor/src/allocator.rs index f26bf2a..e939c99 100644 --- a/hypervisor/src/allocator.rs +++ b/hypervisor/src/allocator.rs @@ -2,6 +2,10 @@ //! It tracks memory usage and ensures thread-safe operations. use { + alloc::{ + alloc::{alloc_zeroed, handle_alloc_error}, + boxed::Box, + }, core::{ alloc::{GlobalAlloc, Layout}, ffi::c_void, @@ -183,3 +187,26 @@ pub unsafe fn init_heap(system_table: &SystemTable) { pub fn exit_boot_services() { SYSTEM_TABLE.store(ptr::null_mut(), Ordering::Release); } + +/// Allocates and zeros memory for a given type, returning a boxed instance. +/// +/// # Safety +/// +/// This function allocates memory and initializes it to zero. It must be called +/// in a safe context where allocation errors and uninitialized memory access are handled. +/// +/// # Returns +/// +/// Returns a `Box` pointing to the zero-initialized memory of type `T`. +/// +/// # Panics +/// +/// Panics if memory allocation fails. +pub unsafe fn box_zeroed() -> Box { + let layout = Layout::new::(); + let ptr = unsafe { alloc_zeroed(layout) }.cast::(); + if ptr.is_null() { + handle_alloc_error(layout); + } + unsafe { Box::from_raw(ptr) } +} From 72b30daa19d8dde75edb3578b9e723079a74448d Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 02:02:46 +1200 Subject: [PATCH 08/87] Delete allocate.rs --- hypervisor/src/allocate.rs | 102 ------------------------------------- 1 file changed, 102 deletions(-) delete mode 100644 hypervisor/src/allocate.rs diff --git a/hypervisor/src/allocate.rs b/hypervisor/src/allocate.rs deleted file mode 100644 index f82d6c1..0000000 --- a/hypervisor/src/allocate.rs +++ /dev/null @@ -1,102 +0,0 @@ -//! Provides utilities for stack allocation and zero-initialized memory in hypervisor contexts. -//! Supports dynamic stack management and safe memory initialization for virtualization. -//! Tracks allocated memory regions for enhanced stealth capabilities. - -use { - crate::intel::page::Page, - alloc::{ - alloc::{alloc_zeroed, handle_alloc_error}, - boxed::Box, - collections::BTreeSet, - }, - core::alloc::Layout, - spin::Mutex, -}; - -/// A global set to keep track of allocated memory regions. -pub static ALLOCATED_MEMORY: Mutex> = Mutex::new(BTreeSet::new()); - -/// Records an allocated memory region. -/// -/// # Arguments -/// -/// * `base` - The base address of the allocated memory region. -/// * `size` - The size of the allocated memory region. -fn record_allocation(base: u64, size: u64) { - let mut allocated_memory = ALLOCATED_MEMORY.lock(); - allocated_memory.insert((base, base + size)); -} - -/// Allocates stack space and returns the base address of the stack. -/// -/// # Arguments -/// -/// * `n` - The number of pages to allocate. -/// -/// # Returns -/// -/// The base address of the allocated stack space. -pub fn allocate_stack_space(n: usize) -> u64 { - let layout = Layout::array::(n).unwrap(); - let stack = unsafe { alloc_zeroed(layout) }; - if stack.is_null() { - handle_alloc_error(layout); - } - let stack_base = stack as u64 + layout.size() as u64 - 0x10; - record_allocation(stack as u64, layout.size() as u64); - stack_base -} - -/// Allocates and zeros memory for a given type, returning a boxed instance. -/// -/// # Safety -/// -/// This function allocates memory and initializes it to zero. It must be called -/// in a safe context where allocation errors and uninitialized memory access are handled. -/// -/// # Returns -/// -/// Returns a `Box` pointing to the zero-initialized memory of type `T`. -/// -/// # Panics -/// -/// Panics if memory allocation fails. -pub unsafe fn box_zeroed() -> Box { - let layout = Layout::new::(); - let ptr = unsafe { alloc_zeroed(layout) }.cast::(); - if ptr.is_null() { - handle_alloc_error(layout); - } - let base = ptr as u64; - let size = layout.size() as u64; - record_allocation(base, size); - unsafe { Box::from_raw(ptr) } -} - -/// Creates a dummy page filled with a specific byte value. -/// -/// # Arguments -/// -/// * `fill_byte` - The byte value to fill the page with. -/// -/// # Returns -/// -/// The physical address of the dummy page. -pub fn create_dummy_page(fill_byte: u8) -> u64 { - let mut dummy_page = unsafe { box_zeroed::() }; - dummy_page.0.iter_mut().for_each(|byte| *byte = fill_byte); - let dummy_page_pa = Box::into_raw(dummy_page) as u64; - dummy_page_pa -} - -/// Records an image allocation in the global memory set. -/// This function is useful for tracking allocated memory regions for enhanced stealth capabilities. -/// -/// # Arguments -/// -/// * `base` - The base address of the allocated memory region. -/// * `size` - The size of the allocated memory region. -pub fn record_image_allocation(base: u64, size: u64) { - let mut allocated_memory = ALLOCATED_MEMORY.lock(); - allocated_memory.insert((base, base + size)); -} From 0f24b263fa2be0bf3fa2aed8e69206b4dee0461b Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 02:03:02 +1200 Subject: [PATCH 09/87] Update memory_manager.rs --- hypervisor/src/intel/hooks/memory_manager.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hypervisor/src/intel/hooks/memory_manager.rs b/hypervisor/src/intel/hooks/memory_manager.rs index c1ea193..f25a5d5 100644 --- a/hypervisor/src/intel/hooks/memory_manager.rs +++ b/hypervisor/src/intel/hooks/memory_manager.rs @@ -4,7 +4,7 @@ use { crate::{ - allocate::box_zeroed, + allocator::box_zeroed, error::HypervisorError, intel::{ept::Pt, hooks::hook_manager::EptHookType, page::Page}, }, From 00ab2c0bc41f689dd3c62bf71ccac2ed83c1300e Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 02:03:07 +1200 Subject: [PATCH 10/87] Update lib.rs --- hypervisor/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hypervisor/src/lib.rs b/hypervisor/src/lib.rs index eb4d216..e632d3e 100644 --- a/hypervisor/src/lib.rs +++ b/hypervisor/src/lib.rs @@ -13,7 +13,7 @@ extern crate alloc; extern crate static_assertions; -pub mod allocate; +pub mod allocator; pub mod error; pub mod intel; pub mod logger; From 8c1a5a44eafe45a95ac6e6dac6dd03c89bf92f97 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 02:03:19 +1200 Subject: [PATCH 11/87] Update main.rs --- uefi/src/main.rs | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/uefi/src/main.rs b/uefi/src/main.rs index 7304e76..7c6f405 100644 --- a/uefi/src/main.rs +++ b/uefi/src/main.rs @@ -11,7 +11,10 @@ extern crate alloc; use { crate::{processor::start_hypervisor_on_all_processors, relocation::zap_relocations}, - hypervisor::logger::{self, SerialPort}, + hypervisor::{ + allocator::init_heap, + logger::{self, SerialPort}, + }, log::*, uefi::prelude::*, }; @@ -53,12 +56,17 @@ fn panic_handler(info: &core::panic::PanicInfo) -> ! { /// The status of the application execution. Returns `Status::SUCCESS` on successful execution, /// or `Status::ABORTED` if the hypervisor fails to install. #[entry] -fn main(_image_handle: Handle, mut system_table: SystemTable) -> Status { - // Initialize logging with the COM2 port and set the level filter to Trace. - logger::init(SerialPort::COM1, LevelFilter::Debug); +fn main(_image_handle: Handle, system_table: SystemTable) -> Status { + // Initialize the allocator BEFORE it's used. + // + // This unsafe block is necessary because the `init_heap` function must be called exactly once + // before any allocations are made. It initializes the heap allocator with the system table. + unsafe { + init_heap(&system_table); + } - // Initialize UEFI services. - uefi::helpers::init(&mut system_table).unwrap(); + // Initialize logging with the COM2 port and set the level filter to Debug. + logger::init(SerialPort::COM1, LevelFilter::Trace); info!("The Matrix is an illusion"); From 32c4265eb63dbf336b7bbf8dd043efb14bd9a0a2 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 02:04:02 +1200 Subject: [PATCH 12/87] Update virtualize.rs --- uefi/src/virtualize.rs | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/uefi/src/virtualize.rs b/uefi/src/virtualize.rs index c14f4d2..869d9b1 100644 --- a/uefi/src/virtualize.rs +++ b/uefi/src/virtualize.rs @@ -4,8 +4,12 @@ //! Credits to Satoshi Tanda: https://github.com/tandasat/Hello-VT-rp/blob/main/hypervisor/src/switch_stack.rs use { - core::arch::global_asm, - hypervisor::{allocate::allocate_stack_space, intel::capture::GuestRegisters, vmm::start_hypervisor}, + alloc::alloc::handle_alloc_error, + core::{alloc::Layout, arch::global_asm}, + hypervisor::{ + intel::{capture::GuestRegisters, page::Page}, + vmm::start_hypervisor, + }, log::debug, }; @@ -16,7 +20,14 @@ use { /// * `guest_registers` - The guest registers to use for the hypervisor. pub fn virtualize_system(guest_registers: &GuestRegisters) -> ! { debug!("Allocating stack space for host"); - let host_stack = allocate_stack_space(0x3000); + + let layout = Layout::array::(0x10).unwrap(); + let stack = unsafe { alloc::alloc::alloc_zeroed(layout) }; + if stack.is_null() { + handle_alloc_error(layout); + } + let host_stack = stack as u64 + layout.size() as u64 - 0x10; + debug!("Stack range: {:#x?}", stack as u64..host_stack); unsafe { switch_stack(guest_registers, start_hypervisor as usize, host_stack) }; } From 38172e43339dd019a100a2a6de289a831d786bf6 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 02:04:05 +1200 Subject: [PATCH 13/87] Update relocation.rs --- uefi/src/relocation.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/uefi/src/relocation.rs b/uefi/src/relocation.rs index c5d5db9..bb34375 100644 --- a/uefi/src/relocation.rs +++ b/uefi/src/relocation.rs @@ -4,7 +4,6 @@ //! Credits Satoshi Tanda: https://github.com/tandasat/Hello-VT-rp/blob/main/hypervisor/src/switch_stack.rs use { - hypervisor::allocate::record_image_allocation, log::debug, uefi::{prelude::BootServices, proto::loaded_image::LoadedImage}, }; @@ -30,7 +29,6 @@ pub fn zap_relocations(boot_service: &BootServices) -> uefi::Result<()> { let (image_base, image_size) = loaded_image.info(); let image_base = image_base as usize; let image_range = image_base..image_base + image_size as usize; - record_image_allocation(image_base as u64, image_size); // Log the image base address range for debugging purposes. debug!("Image base: {:#x?}", image_range); From 4639d4252bcf53c6f11b8e73cfb63564303200bd Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 02:04:08 +1200 Subject: [PATCH 14/87] Update vm.rs --- hypervisor/src/intel/vm.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/hypervisor/src/intel/vm.rs b/hypervisor/src/intel/vm.rs index 6ac4dba..a863d37 100644 --- a/hypervisor/src/intel/vm.rs +++ b/hypervisor/src/intel/vm.rs @@ -7,7 +7,7 @@ use { crate::{ - allocate::{box_zeroed, create_dummy_page}, + allocator::box_zeroed, error::HypervisorError, intel::{ bitmap::{MsrAccessType, MsrBitmap, MsrOperation}, @@ -15,6 +15,7 @@ use { descriptor::Descriptors, ept::Ept, hooks::hook_manager::HookManager, + page::Page, paging::PageTables, support::{rdmsr, vmclear, vmptrld, vmread}, vmcs::Vmcs, @@ -112,7 +113,8 @@ impl Vm { let hook_manager = HookManager::new()?; trace!("Creating dummy page filled with 0xffs"); - let dummy_page_pa = create_dummy_page(0xff); + let dummy_page = unsafe { box_zeroed::() }; + let dummy_page_pa = Box::into_raw(dummy_page) as u64; trace!("VM created"); From f52d28f2cf31d3fe96a2764cad2cdca421d46a1e Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 02:04:11 +1200 Subject: [PATCH 15/87] Update hook_manager.rs --- hypervisor/src/intel/hooks/hook_manager.rs | 29 ++++++++++------------ 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/hypervisor/src/intel/hooks/hook_manager.rs b/hypervisor/src/intel/hooks/hook_manager.rs index 7e30e2d..2ff9f7c 100644 --- a/hypervisor/src/intel/hooks/hook_manager.rs +++ b/hypervisor/src/intel/hooks/hook_manager.rs @@ -1,6 +1,6 @@ use { crate::{ - allocate::ALLOCATED_MEMORY, + allocator::GLOBAL_ALLOCATOR, error::HypervisorError, intel::{ addresses::PhysicalAddress, @@ -15,7 +15,7 @@ use { }, windows::kernel::KernelHook, }, - alloc::{boxed::Box, vec::Vec}, + alloc::boxed::Box, core::intrinsics::copy_nonoverlapping, log::*, x86::bits64::paging::{PAddr, BASE_PAGE_SIZE}, @@ -81,32 +81,29 @@ impl HookManager { /// Hides the hypervisor memory from the guest by installing EPT hooks on all allocated memory regions. /// - /// This function iterates through the `ALLOCATED_MEMORY` set and calls `ept_hide_hypervisor_memory` + /// This function iterates through the used memory in the global allocator and calls `ept_hide_hypervisor_memory` /// for each page to split the 2MB pages into 4KB pages and fill the shadow page with a specified value. /// It then swaps the guest page with the shadow page and sets the desired permissions. /// /// # Arguments /// /// * `vm` - The virtual machine instance of the hypervisor. - /// * `dummy_page_pa` - The physical address of the dummy page. /// * `page_permissions` - The desired permissions for the hooked page. /// /// # Returns /// - /// * Returns `Ok(())` if the hooks were successfully installed, `Err(HypervisorError)` otherwise. + /// Returns `Ok(())` if the hooks were successfully installed, `Err(HypervisorError)` otherwise. pub fn hide_hypervisor_memory(vm: &mut Vm, page_permissions: AccessType) -> Result<(), HypervisorError> { - let allocated_memory: Vec<(u64, u64)> = { - let allocated_memory = ALLOCATED_MEMORY.lock(); - allocated_memory.iter().copied().collect() - }; - - debug!("Allocated memory ranges:"); - for &(base, end) in &allocated_memory { - debug!("Memory range: {:#x} - {:#x}", base, end); - } + // Get the used memory from the global allocator. + let used_memory = GLOBAL_ALLOCATOR.used(); + + // Get the base address of the heap. + let heap_base_address = GLOBAL_ALLOCATOR.heap_base(); - for &(base, _end) in &allocated_memory { - HookManager::ept_hide_hypervisor_memory(vm, base, page_permissions)?; + // Iterate through the used memory and hide each page. + for offset in (0..used_memory).step_by(4096) { + let guest_page_pa = unsafe { heap_base_address.add(offset) }; + HookManager::ept_hide_hypervisor_memory(vm, PAddr::from(guest_page_pa as usize).align_down_to_base_page().as_u64(), page_permissions)?; } Ok(()) From 4e72045419241d0ee7820d95690d378cf3ec6c73 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 02:12:41 +1200 Subject: [PATCH 16/87] Update allocator.rs --- hypervisor/src/allocator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hypervisor/src/allocator.rs b/hypervisor/src/allocator.rs index e939c99..2b911b6 100644 --- a/hypervisor/src/allocator.rs +++ b/hypervisor/src/allocator.rs @@ -19,7 +19,7 @@ use { }; /// The size of the heap in bytes. -const HEAP_SIZE: usize = 0x800000; // 4MB +const HEAP_SIZE: usize = 0x800000; // 8MB /// Reference to the system table, used to call the boot services pool memory /// allocation functions. From a128bf91a9b65def3dae8df6e4d543bea13326d6 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 02:18:42 +1200 Subject: [PATCH 17/87] Update hook_manager.rs --- hypervisor/src/intel/hooks/hook_manager.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/hypervisor/src/intel/hooks/hook_manager.rs b/hypervisor/src/intel/hooks/hook_manager.rs index 2ff9f7c..1830d27 100644 --- a/hypervisor/src/intel/hooks/hook_manager.rs +++ b/hypervisor/src/intel/hooks/hook_manager.rs @@ -123,19 +123,19 @@ impl HookManager { /// * Returns `Ok(())` if the hook was successfully installed, `Err(HypervisorError)` otherwise. fn ept_hide_hypervisor_memory(vm: &mut Vm, guest_page_pa: u64, page_permissions: AccessType) -> Result<(), HypervisorError> { let guest_page_pa = PAddr::from(guest_page_pa).align_down_to_base_page(); - debug!("Guest page PA: {:#x}", guest_page_pa.as_u64()); + trace!("Guest page PA: {:#x}", guest_page_pa.as_u64()); let guest_large_page_pa = guest_page_pa.align_down_to_large_page(); - debug!("Guest large page PA: {:#x}", guest_large_page_pa.as_u64()); + trace!("Guest large page PA: {:#x}", guest_large_page_pa.as_u64()); let dummy_page_pa = vm.dummy_page_pa; trace!("Dummy page PA: {:#x}", dummy_page_pa); - debug!("Mapping large page"); + trace!("Mapping large page"); // Map the large page to the pre-allocated page table, if it hasn't been mapped already. vm.hook_manager.memory_manager.map_large_page_to_pt(guest_large_page_pa.as_u64())?; - debug!("Filling shadow page with 0xff"); + trace!("Filling shadow page with 0xff"); Self::unsafe_fill_shadow_page(PAddr::from(dummy_page_pa), 0xff); let pre_alloc_pt = vm @@ -146,18 +146,18 @@ impl HookManager { // Check if a guest page has already been split. if vm.primary_ept.is_large_page(guest_page_pa.as_u64()) { - debug!("Splitting 2MB page to 4KB pages for Primary EPT: {:#x}", guest_large_page_pa); + trace!("Splitting 2MB page to 4KB pages for Primary EPT: {:#x}", guest_large_page_pa); vm.primary_ept.split_2mb_to_4kb(guest_large_page_pa.as_u64(), pre_alloc_pt)?; } - debug!("Swapping guest page: {:#x} with dummy page: {:#x}", guest_page_pa.as_u64(), dummy_page_pa); + trace!("Swapping guest page: {:#x} with dummy page: {:#x}", guest_page_pa.as_u64(), dummy_page_pa); vm.primary_ept .swap_page(guest_page_pa.as_u64(), dummy_page_pa, page_permissions, pre_alloc_pt)?; invept_all_contexts(); invvpid_all_contexts(); - debug!("EPT hide hypervisor memory completed successfully"); + trace!("EPT hide hypervisor memory completed successfully"); Ok(()) } From 17a5903b35ed18231ac996fc09274cacd2b0bd67 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 02:25:00 +1200 Subject: [PATCH 18/87] Update bitmap.rs --- hypervisor/src/intel/bitmap.rs | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/hypervisor/src/intel/bitmap.rs b/hypervisor/src/intel/bitmap.rs index 6a55764..93fa96e 100644 --- a/hypervisor/src/intel/bitmap.rs +++ b/hypervisor/src/intel/bitmap.rs @@ -47,28 +47,6 @@ pub struct MsrBitmap { } impl MsrBitmap { - /// Creates a new MSR bitmap with all bits cleared. - /// - /// # Returns - /// - /// * A `Result` indicating the success or failure of the setup process. - pub fn new() -> Box { - log::trace!("Setting up MSR Bitmap"); - - let _instance = Self { - read_low_msrs: [0; 0x400], - read_high_msrs: [0; 0x400], - write_low_msrs: [0; 0x400], - write_high_msrs: [0; 0x400], - }; - - let msr_bitmap = Box::new(_instance); - - log::trace!("MSR Bitmap setup successfully!"); - - msr_bitmap - } - /// Modifies the interception for a specific MSR based on the specified operation and access type. /// /// # Arguments From 369572dbdac8b5577de2de7add43486d4204fb0c Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 02:25:02 +1200 Subject: [PATCH 19/87] Update vm.rs --- hypervisor/src/intel/vm.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hypervisor/src/intel/vm.rs b/hypervisor/src/intel/vm.rs index a863d37..b5bd3cf 100644 --- a/hypervisor/src/intel/vm.rs +++ b/hypervisor/src/intel/vm.rs @@ -95,7 +95,7 @@ impl Vm { host_paging.build_identity(); trace!("Allocating MSR Bitmap"); - let mut msr_bitmap = MsrBitmap::new(); + let mut msr_bitmap = unsafe { box_zeroed::() }; trace!("Allocating Primary EPT"); let mut primary_ept = unsafe { box_zeroed::() }; From 0d4405f3b6ed2846e640dd8e67facdacc9d8ff19 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 02:36:11 +1200 Subject: [PATCH 20/87] Update bitmap.rs --- hypervisor/src/intel/bitmap.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hypervisor/src/intel/bitmap.rs b/hypervisor/src/intel/bitmap.rs index 93fa96e..721c548 100644 --- a/hypervisor/src/intel/bitmap.rs +++ b/hypervisor/src/intel/bitmap.rs @@ -1,4 +1,4 @@ -use {alloc::boxed::Box, bitfield::BitMut}; +use bitfield::BitMut; /// Enum representing the type of MSR access. /// From 05aec50ad78bc27c705dc8b53de9a09dd133dd55 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 02:36:14 +1200 Subject: [PATCH 21/87] Update mod.rs --- hypervisor/src/intel/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/hypervisor/src/intel/mod.rs b/hypervisor/src/intel/mod.rs index 726c425..ecf58bf 100644 --- a/hypervisor/src/intel/mod.rs +++ b/hypervisor/src/intel/mod.rs @@ -19,5 +19,4 @@ pub mod vmcs; pub mod vmerror; pub mod vmexit; pub mod vmlaunch; -pub mod vmx; pub mod vmxon; From 6b2248cb45e8907cf352d029d7190b8018009789 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 02:36:18 +1200 Subject: [PATCH 22/87] Update vm.rs --- hypervisor/src/intel/vm.rs | 79 +++++++++++++++++++++++++++++++++++--- 1 file changed, 74 insertions(+), 5 deletions(-) diff --git a/hypervisor/src/intel/vm.rs b/hypervisor/src/intel/vm.rs index b5bd3cf..a1e41ef 100644 --- a/hypervisor/src/intel/vm.rs +++ b/hypervisor/src/intel/vm.rs @@ -17,16 +17,21 @@ use { hooks::hook_manager::HookManager, page::Page, paging::PageTables, - support::{rdmsr, vmclear, vmptrld, vmread}, + support::{rdmsr, vmclear, vmptrld, vmread, vmxon}, vmcs::Vmcs, vmerror::{VmInstructionError, VmxBasicExitReason}, vmlaunch::launch_vm, + vmxon::Vmxon, }, }, alloc::boxed::Box, bit_field::BitField, log::*, - x86::{bits64::rflags::RFlags, vmx::vmcs}, + x86::{ + bits64::{paging::BASE_PAGE_SIZE, rflags::RFlags}, + msr, + vmx::vmcs, + }, }; /// Represents a Virtual Machine (VM) instance, encapsulating its state and control mechanisms. @@ -35,6 +40,9 @@ use { /// It holds the VMCS region, guest and host descriptor tables, paging information, MSR bitmaps, /// and the state of guest registers. Additionally, it tracks whether the VM has been launched. pub struct Vm { + /// The VMXON (Virtual Machine Extensions On) region for the VM. + pub vmxon_region: Box, + /// The VMCS (Virtual Machine Control Structure) for the VM. pub vmcs_region: Box, @@ -85,8 +93,12 @@ impl Vm { /// any part of the setup fails. pub fn new(guest_registers: &GuestRegisters) -> Result { trace!("Creating VM"); - let mut vmcs_region = unsafe { box_zeroed::() }; - vmcs_region.revision_id = rdmsr(x86::msr::IA32_VMX_BASIC) as u32; + + trace!("Allocating VMXON region"); + let vmxon_region = unsafe { box_zeroed::() }; + + trace!("Allocating VMCS region"); + let vmcs_region = unsafe { box_zeroed::() }; trace!("Allocating Memory for Host Paging"); let mut host_paging = unsafe { box_zeroed::() }; @@ -107,7 +119,7 @@ impl Vm { let primary_eptp = primary_ept.create_eptp_with_wb_and_4lvl_walk()?; trace!("Modifying MSR interception for LSTAR MSR write access"); - msr_bitmap.modify_msr_interception(x86::msr::IA32_LSTAR, MsrAccessType::Write, MsrOperation::Hook); + msr_bitmap.modify_msr_interception(msr::IA32_LSTAR, MsrAccessType::Write, MsrOperation::Hook); trace!("Creating EPT hook manager"); let hook_manager = HookManager::new()?; @@ -119,6 +131,7 @@ impl Vm { trace!("VM created"); Ok(Self { + vmxon_region, vmcs_region, host_paging, hook_manager, @@ -133,6 +146,61 @@ impl Vm { }) } + /// Activates the VMXON region to enable VMX operation. + /// + /// Sets up the VMXON region and executes the VMXON instruction. This involves configuring control registers, + /// adjusting the IA32_FEATURE_CONTROL MSR, and validating the VMXON region's revision ID to ensure the CPU is ready + /// for VMX operation mode. + /// + /// # Returns + /// + /// Returns `Ok(())` on successful activation, or an `Err(HypervisorError)` if any step in the activation process fails. + pub fn activate_vmxon(&mut self) -> Result<(), HypervisorError> { + trace!("Setting up VMXON region"); + self.vmxon_region.revision_id = rdmsr(msr::IA32_VMX_BASIC) as u32; + self.vmxon_region.data = [0; BASE_PAGE_SIZE - 4]; + + self.setup_vmxon()?; + trace!("VMXON region setup successfully!"); + + trace!("Executing VMXON instruction"); + vmxon(&mut self.vmxon_region as *const _ as _); + trace!("VMXON executed successfully!"); + + Ok(()) + } + + /// Prepares the system for VMX operation by configuring necessary control registers and MSRs. + /// + /// Ensures that the system meets all prerequisites for VMX operation as defined by Intel's specifications. + /// This includes enabling VMX operation through control register modifications, setting the lock bit in + /// IA32_FEATURE_CONTROL MSR, and adjusting mandatory CR0 and CR4 bits. + /// + /// # Returns + /// + /// Returns `Ok(())` if all configurations are successfully applied, or an `Err(HypervisorError)` if adjustments fail. + fn setup_vmxon(&mut self) -> Result<(), HypervisorError> { + trace!("Enabling Virtual Machine Extensions (VMX)"); + Vmxon::enable_vmx_operation(); + trace!("VMX enabled"); + + trace!("Adjusting IA32_FEATURE_CONTROL MSR"); + Vmxon::adjust_feature_control_msr()?; + trace!("IA32_FEATURE_CONTROL MSR adjusted"); + + trace!("Setting CR0 bits"); + Vmxon::set_cr0_bits(); + trace!("CR0 bits set"); + + trace!("Setting CR4 bits"); + Vmxon::set_cr4_bits(); + trace!("CR4 bits set"); + + self.vmxon_region.revision_id.set_bit(31, false); + + Ok(()) + } + /// Activates the VMCS region for the VM, preparing it for execution. /// /// Clears and loads the VMCS region, setting it as the current VMCS for VMX operations. @@ -143,6 +211,7 @@ impl Vm { /// Returns `Ok(())` on successful activation, or an `Err(HypervisorError)` if activation fails. pub fn activate_vmcs(&mut self) -> Result<(), HypervisorError> { trace!("Activating VMCS"); + self.vmcs_region.revision_id = rdmsr(msr::IA32_VMX_BASIC) as u32; self.vmcs_region.revision_id.set_bit(31, false); // Clear the VMCS region. From 298d449a21d1f53ff3e33d90a6b4c40cc645c831 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 02:36:20 +1200 Subject: [PATCH 23/87] Delete vmx.rs --- hypervisor/src/intel/vmx.rs | 89 ------------------------------------- 1 file changed, 89 deletions(-) delete mode 100644 hypervisor/src/intel/vmx.rs diff --git a/hypervisor/src/intel/vmx.rs b/hypervisor/src/intel/vmx.rs deleted file mode 100644 index 885b00d..0000000 --- a/hypervisor/src/intel/vmx.rs +++ /dev/null @@ -1,89 +0,0 @@ -//! Enables VMXON region management for VMX operations. -//! -//! Offers the `Vmx` struct to facilitate the setup and activation of the VMXON region, -//! supporting hypervisor development by adhering to Intel's specifications for virtualization. - -use { - crate::{ - error::HypervisorError, - intel::{support::vmxon, vmxon::Vmxon}, - }, - bit_field::BitField, -}; - -/// Manages VMX operations, including the activation of the VMXON region. -/// -/// This struct is responsible for initializing and activating the VMXON region, which is essential -/// for enabling VMX (Virtual Machine Extensions) operations on the CPU. It includes functionalities -/// to set up the environment required for VMX operations by configuring system and model-specific -/// registers (MSRs) as per Intel's virtualization technology requirements. -pub struct Vmx { - pub vmxon_region: Vmxon, -} - -impl Vmx { - /// Creates a new instance of `Vmx`. - /// - /// Initializes the VMXON region with default settings to prepare the system for VMX operation activation. - /// - /// # Returns - /// - /// Returns a new instance of `Vmx`.. - pub fn new() -> Self { - Self { - vmxon_region: Vmxon::default(), - } - } - - /// Activates the VMXON region to enable VMX operation. - /// - /// Sets up the VMXON region and executes the VMXON instruction. This involves configuring control registers, - /// adjusting the IA32_FEATURE_CONTROL MSR, and validating the VMXON region's revision ID to ensure the CPU is ready - /// for VMX operation mode. - /// - /// # Returns - /// - /// Returns `Ok(())` on successful activation, or an `Err(HypervisorError)` if any step in the activation process fails. - pub fn activate_vmxon(&mut self) -> Result<(), HypervisorError> { - log::trace!("Setting up VMXON region"); - self.setup_vmxon()?; - log::trace!("VMXON region setup successfully!"); - - log::trace!("Executing VMXON instruction"); - vmxon(&mut self.vmxon_region as *const _ as _); - log::trace!("VMXON executed successfully!"); - - Ok(()) - } - - /// Prepares the system for VMX operation by configuring necessary control registers and MSRs. - /// - /// Ensures that the system meets all prerequisites for VMX operation as defined by Intel's specifications. - /// This includes enabling VMX operation through control register modifications, setting the lock bit in - /// IA32_FEATURE_CONTROL MSR, and adjusting mandatory CR0 and CR4 bits. - /// - /// # Returns - /// - /// Returns `Ok(())` if all configurations are successfully applied, or an `Err(HypervisorError)` if adjustments fail. - fn setup_vmxon(&mut self) -> Result<(), HypervisorError> { - log::trace!("Enabling Virtual Machine Extensions (VMX)"); - Vmxon::enable_vmx_operation(); - log::trace!("VMX enabled"); - - log::trace!("Adjusting IA32_FEATURE_CONTROL MSR"); - Vmxon::adjust_feature_control_msr()?; - log::trace!("IA32_FEATURE_CONTROL MSR adjusted"); - - log::trace!("Setting CR0 bits"); - Vmxon::set_cr0_bits(); - log::trace!("CR0 bits set"); - - log::trace!("Setting CR4 bits"); - Vmxon::set_cr4_bits(); - log::trace!("CR4 bits set"); - - self.vmxon_region.revision_id.set_bit(31, false); - - Ok(()) - } -} From 51ac92c02693aabb14f9ae1b2b352f8798da9541 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 02:36:22 +1200 Subject: [PATCH 24/87] Update vmxon.rs --- hypervisor/src/intel/vmxon.rs | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/hypervisor/src/intel/vmxon.rs b/hypervisor/src/intel/vmxon.rs index 13e0e0d..6fc469f 100644 --- a/hypervisor/src/intel/vmxon.rs +++ b/hypervisor/src/intel/vmxon.rs @@ -4,7 +4,7 @@ //! It covers setting up the VMXON region, adjusting necessary control registers, and handling model-specific registers to meet Intel's virtualization requirements. use { - crate::{error::HypervisorError, intel::support::rdmsr}, + crate::error::HypervisorError, bitfield::BitMut, x86::{controlregs, current::paging::BASE_PAGE_SIZE, msr}, x86_64::registers::control::Cr4, @@ -26,18 +26,6 @@ pub struct Vmxon { pub data: [u8; BASE_PAGE_SIZE - 4], } -impl Default for Vmxon { - /// Constructs a default `Vmxon` instance. - /// - /// Sets the revision ID to the value read from the IA32_VMX_BASIC MSR and initializes the data array to zeros, preparing the VMXON region for use. - fn default() -> Self { - Self { - revision_id: rdmsr(msr::IA32_VMX_BASIC) as u32, - data: [0; BASE_PAGE_SIZE - 4], - } - } -} - impl Vmxon { /// Enables VMX operation by setting the VMX-enable bit in CR4. /// From 0719d805c66951a5cb0a7807a2a8e6fc63bc9c3f Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 02:36:26 +1200 Subject: [PATCH 25/87] Update vmm.rs --- hypervisor/src/vmm.rs | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/hypervisor/src/vmm.rs b/hypervisor/src/vmm.rs index 4f2f617..5500b19 100644 --- a/hypervisor/src/vmm.rs +++ b/hypervisor/src/vmm.rs @@ -32,7 +32,6 @@ use { xsetbv::handle_xsetbv, ExitType, }, - vmx::Vmx, }, }, log::*, @@ -63,27 +62,27 @@ pub fn start_hypervisor(guest_registers: &GuestRegisters) -> ! { Err(e) => panic!("CPU is not supported: {:?}", e), }; - let mut vmx = Vmx::new(); - - match vmx.activate_vmxon() { - Ok(_) => debug!("VMX enabled"), - Err(e) => panic!("Failed to enable VMX: {:?}", e), - }; - let mut vm = match Vm::new(&guest_registers) { Ok(vm) => vm, Err(e) => panic!("Failed to create VM: {:?}", e), }; + match vm.activate_vmxon() { + Ok(_) => debug!("VMX enabled"), + Err(e) => panic!("Failed to enable VMX: {:?}", e), + } + match vm.activate_vmcs() { Ok(_) => debug!("VMCS activated"), Err(e) => panic!("Failed to activate VMCS: {:?}", e), } + /* match HookManager::hide_hypervisor_memory(&mut vm, AccessType::READ_WRITE_EXECUTE) { Ok(_) => debug!("Hypervisor memory hidden"), Err(e) => panic!("Failed to hide hypervisor memory: {:?}", e), }; + */ info!("Launching the VM until a vmexit occurs..."); From 755f10bd0a60ebd16b37531d10aca4c30b87b9cd Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 02:38:34 +1200 Subject: [PATCH 26/87] Update vm.rs --- hypervisor/src/intel/vm.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/hypervisor/src/intel/vm.rs b/hypervisor/src/intel/vm.rs index a1e41ef..93e219f 100644 --- a/hypervisor/src/intel/vm.rs +++ b/hypervisor/src/intel/vm.rs @@ -27,11 +27,7 @@ use { alloc::boxed::Box, bit_field::BitField, log::*, - x86::{ - bits64::{paging::BASE_PAGE_SIZE, rflags::RFlags}, - msr, - vmx::vmcs, - }, + x86::{bits64::rflags::RFlags, msr, vmx::vmcs}, }; /// Represents a Virtual Machine (VM) instance, encapsulating its state and control mechanisms. @@ -158,7 +154,6 @@ impl Vm { pub fn activate_vmxon(&mut self) -> Result<(), HypervisorError> { trace!("Setting up VMXON region"); self.vmxon_region.revision_id = rdmsr(msr::IA32_VMX_BASIC) as u32; - self.vmxon_region.data = [0; BASE_PAGE_SIZE - 4]; self.setup_vmxon()?; trace!("VMXON region setup successfully!"); From 64dc0c58cc2b09fe5d2440f1004d994ab7e4b5fc Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 03:07:22 +1200 Subject: [PATCH 27/87] Update vm.rs --- hypervisor/src/intel/vm.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/hypervisor/src/intel/vm.rs b/hypervisor/src/intel/vm.rs index 93e219f..b854ed4 100644 --- a/hypervisor/src/intel/vm.rs +++ b/hypervisor/src/intel/vm.rs @@ -154,12 +154,13 @@ impl Vm { pub fn activate_vmxon(&mut self) -> Result<(), HypervisorError> { trace!("Setting up VMXON region"); self.vmxon_region.revision_id = rdmsr(msr::IA32_VMX_BASIC) as u32; + self.vmxon_region.revision_id.set_bit(31, false); self.setup_vmxon()?; trace!("VMXON region setup successfully!"); trace!("Executing VMXON instruction"); - vmxon(&mut self.vmxon_region as *const _ as _); + vmxon(self.vmxon_region.as_ref() as *const _ as _); trace!("VMXON executed successfully!"); Ok(()) @@ -191,8 +192,6 @@ impl Vm { Vmxon::set_cr4_bits(); trace!("CR4 bits set"); - self.vmxon_region.revision_id.set_bit(31, false); - Ok(()) } From ac9acd575f58f96709f70769e07dbf6a830c4b6b Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 03:16:46 +1200 Subject: [PATCH 28/87] Update virtualize.rs --- uefi/src/virtualize.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/uefi/src/virtualize.rs b/uefi/src/virtualize.rs index 869d9b1..9998258 100644 --- a/uefi/src/virtualize.rs +++ b/uefi/src/virtualize.rs @@ -7,6 +7,7 @@ use { alloc::alloc::handle_alloc_error, core::{alloc::Layout, arch::global_asm}, hypervisor::{ + allocator::STACK_SIZE, intel::{capture::GuestRegisters, page::Page}, vmm::start_hypervisor, }, @@ -21,7 +22,7 @@ use { pub fn virtualize_system(guest_registers: &GuestRegisters) -> ! { debug!("Allocating stack space for host"); - let layout = Layout::array::(0x10).unwrap(); + let layout = Layout::array::(STACK_SIZE).unwrap(); let stack = unsafe { alloc::alloc::alloc_zeroed(layout) }; if stack.is_null() { handle_alloc_error(layout); From 361c1f572ae4a6fc7b06083cc38692cfaa1eb437 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 03:44:42 +1200 Subject: [PATCH 29/87] Update lib.rs --- hypervisor/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/hypervisor/src/lib.rs b/hypervisor/src/lib.rs index e632d3e..5c5cbc9 100644 --- a/hypervisor/src/lib.rs +++ b/hypervisor/src/lib.rs @@ -15,6 +15,7 @@ extern crate static_assertions; pub mod allocator; pub mod error; +pub mod global_const; pub mod intel; pub mod logger; pub mod vmm; From a2f3bb2f8a0115e56e25fe5e50011be6d5a6eb1c Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 03:44:44 +1200 Subject: [PATCH 30/87] Create global_const.rs --- hypervisor/src/global_const.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 hypervisor/src/global_const.rs diff --git a/hypervisor/src/global_const.rs b/hypervisor/src/global_const.rs new file mode 100644 index 0000000..ff6ea2c --- /dev/null +++ b/hypervisor/src/global_const.rs @@ -0,0 +1,11 @@ +/// The size of the heap in bytes. +pub const HEAP_SIZE: usize = 0x5000000; // 80 MB + +/// The size of the stack in bytes. +pub const STACK_SIZE: usize = 0x3000; // 48 MB + +/// The maximum number of hooks supported by the hypervisor. Change this value as needed. +pub const MAX_HOOK_ENTRIES: usize = 64; + +/// The maximum number of hooks per page supported by the hypervisor. Change this value as needed. +pub const MAX_HOOKS_PER_PAGE: usize = 16; From 6b0c5ec5dcb31295959e849c1ce618d4f0d7aaf4 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 03:44:46 +1200 Subject: [PATCH 31/87] Update virtualize.rs --- uefi/src/virtualize.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/uefi/src/virtualize.rs b/uefi/src/virtualize.rs index 9998258..eff79d3 100644 --- a/uefi/src/virtualize.rs +++ b/uefi/src/virtualize.rs @@ -7,7 +7,7 @@ use { alloc::alloc::handle_alloc_error, core::{alloc::Layout, arch::global_asm}, hypervisor::{ - allocator::STACK_SIZE, + global_const::STACK_SIZE, intel::{capture::GuestRegisters, page::Page}, vmm::start_hypervisor, }, From 7a408ca6ae3a719b25a871a32110845adb9f4b81 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 03:44:49 +1200 Subject: [PATCH 32/87] Update memory_manager.rs --- hypervisor/src/intel/hooks/memory_manager.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/hypervisor/src/intel/hooks/memory_manager.rs b/hypervisor/src/intel/hooks/memory_manager.rs index f25a5d5..6404f4d 100644 --- a/hypervisor/src/intel/hooks/memory_manager.rs +++ b/hypervisor/src/intel/hooks/memory_manager.rs @@ -6,6 +6,7 @@ use { crate::{ allocator::box_zeroed, error::HypervisorError, + global_const::{MAX_HOOKS_PER_PAGE, MAX_HOOK_ENTRIES}, intel::{ept::Pt, hooks::hook_manager::EptHookType, page::Page}, }, alloc::boxed::Box, @@ -13,12 +14,6 @@ use { log::{error, trace}, }; -/// The maximum number of hooks supported by the hypervisor. Change this value as needed. -const MAX_HOOK_ENTRIES: usize = 64; - -/// The maximum number of hooks per page supported by the hypervisor. Change this value as needed. -const MAX_HOOKS_PER_PAGE: usize = 64; - /// Represents the hook information for a specific guest virtual address and EPT hook type. #[derive(Debug, Clone)] pub struct HookInfo { From 25aa0e6dd31003961f84d7ce069e47c60455d925 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 03:44:51 +1200 Subject: [PATCH 33/87] Update allocator.rs --- hypervisor/src/allocator.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/hypervisor/src/allocator.rs b/hypervisor/src/allocator.rs index 2b911b6..018f062 100644 --- a/hypervisor/src/allocator.rs +++ b/hypervisor/src/allocator.rs @@ -2,6 +2,7 @@ //! It tracks memory usage and ensures thread-safe operations. use { + crate::global_const::HEAP_SIZE, alloc::{ alloc::{alloc_zeroed, handle_alloc_error}, boxed::Box, @@ -18,9 +19,6 @@ use { }, }; -/// The size of the heap in bytes. -const HEAP_SIZE: usize = 0x800000; // 8MB - /// Reference to the system table, used to call the boot services pool memory /// allocation functions. static SYSTEM_TABLE: AtomicPtr = AtomicPtr::new(ptr::null_mut()); From b6208e07afccac1a2382a4ecdfadcec3e9886bf2 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 12:27:59 +1200 Subject: [PATCH 34/87] Update allocator.rs --- hypervisor/src/allocator.rs | 314 +++++++++++++++++++++++++++++++----- 1 file changed, 275 insertions(+), 39 deletions(-) diff --git a/hypervisor/src/allocator.rs b/hypervisor/src/allocator.rs index 018f062..e208f7f 100644 --- a/hypervisor/src/allocator.rs +++ b/hypervisor/src/allocator.rs @@ -10,9 +10,11 @@ use { core::{ alloc::{GlobalAlloc, Layout}, ffi::c_void, + mem::{align_of, size_of}, ptr, - sync::atomic::{AtomicPtr, AtomicU32, AtomicUsize, Ordering}, + sync::atomic::{AtomicPtr, AtomicU32, Ordering}, }, + log::trace, uefi::{ proto::loaded_image::LoadedImage, table::{boot::MemoryType, Boot, SystemTable}, @@ -28,12 +30,8 @@ static MEMORY_TYPE: AtomicU32 = AtomicU32::new(MemoryType::LOADER_DATA.0); /// A global allocator that uses UEFI's pool allocation functions and tracks memory usage. pub struct GlobalAllocator { - /// Atomic counter to track used memory. - used_memory: AtomicUsize, - /// Base address of the allocated heap. - heap_base_address: AtomicPtr, - /// Size of the allocated heap. - heap_size: usize, + /// Heap allocator instance + heap: ListHeap<{ HEAP_SIZE }>, } impl GlobalAllocator { @@ -43,11 +41,7 @@ impl GlobalAllocator { /// /// A new instance of `GlobalAllocator`. pub const fn new() -> Self { - Self { - used_memory: AtomicUsize::new(0), - heap_base_address: AtomicPtr::new(ptr::null_mut()), - heap_size: HEAP_SIZE, - } + Self { heap: ListHeap::new() } } /// Initializes the allocator and sets the system table. @@ -60,6 +54,8 @@ impl GlobalAllocator { /// /// * `system_table` - A reference to the UEFI system table. pub unsafe fn init(&self, system_table: &SystemTable) { + trace!("Initializing global allocator"); + // Store the system table pointer for later use in allocation and deallocation. SYSTEM_TABLE.store(system_table.as_ptr().cast_mut(), Ordering::Release); @@ -71,10 +67,10 @@ impl GlobalAllocator { // Allocate the initial heap pool and set the base address. let heap_base = boot_services - .allocate_pool(MemoryType::LOADER_DATA, self.heap_size) + .allocate_pool(MemoryType::LOADER_DATA, HEAP_SIZE) .expect("Failed to allocate heap pool"); - self.heap_base_address.store(heap_base, Ordering::Release); + self.heap.initialize(heap_base); } /// Returns the amount of memory currently in use. @@ -83,7 +79,7 @@ impl GlobalAllocator { /// /// The amount of memory currently in use, in bytes. pub fn used(&self) -> usize { - self.used_memory.load(Ordering::SeqCst) + unsafe { self.heap.used_memory() } } /// Returns the base address of the heap. @@ -92,7 +88,7 @@ impl GlobalAllocator { /// /// The base address of the heap. pub fn heap_base(&self) -> *mut u8 { - self.heap_base_address.load(Ordering::Acquire) + self.heap.base_address() } } @@ -111,41 +107,249 @@ unsafe impl GlobalAlloc for GlobalAllocator { /// /// A pointer to the allocated memory. unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - let size = layout.size(); - let align = layout.align(); - log::debug!("Requested allocation: size = {:#x}, align = {:#x}", size, align); - - // Ensure the alignment and size fit within the heap bounds - let used = self.used(); - log::debug!("Current used memory: {:#x}", used); - let start = self.heap_base().add(used); - let aligned_start = start.add(start.align_offset(align)); - let end = aligned_start.add(size); - - if end > self.heap_base().add(self.heap_size) { - log::error!("Out of memory: requested end = {:#x}, heap end = {:#x}", end as usize, self.heap_base().add(self.heap_size) as usize); - return ptr::null_mut(); // Out of memory + trace!("Allocating memory: size = {:#x}, align = {:#x}", layout.size(), layout.align()); + self.heap.alloc(layout) + } + + /// Deallocates memory within the pre-allocated heap. + /// + /// # Arguments + /// + /// * `ptr` - A pointer to the memory to be deallocated. + /// * `layout` - The layout of the memory to be deallocated. + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + trace!("Deallocating memory: ptr = {:p}, size = {:#x}", ptr, layout.size()); + self.heap.dealloc(ptr, layout) + } + + /// Allocates zeroed memory from the pre-allocated heap. + /// + /// # Arguments + /// + /// * `layout` - The layout of the memory to be allocated. + /// + /// # Returns + /// + /// A pointer to the allocated and zeroed memory. + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { + trace!("Allocating zeroed memory: size = {:#x}, align = {:#x}", layout.size(), layout.align()); + self.heap.alloc_zeroed(layout) + } +} + +/// A heap allocator based on a linked list of free chunks. +#[repr(align(0x10))] +pub struct ListHeap(core::mem::MaybeUninit<[u8; SIZE]>); + +impl ListHeap { + /// Creates a new, uninitialized ListHeap. + /// + /// # Returns + /// + /// A new instance of `ListHeap`. + pub const fn new() -> Self { + Self(core::mem::MaybeUninit::uninit()) + } + + /// Initializes the heap with the given base address. + /// + /// # Safety + /// + /// This function must be called exactly once before any allocations are made. + /// + /// # Arguments + /// + /// * `base_address` - The base address of the allocated heap. + pub unsafe fn initialize(&self, base_address: *mut u8) { + trace!("Initializing heap at base address: {:p}", base_address); + + let start = self.first_link(); + let last = self.last_link(base_address); + (&mut *start).size = 0; + (&mut *start).next = last; + (&mut *last).size = 0; + (&mut *last).next = last; + } + + /// Returns the first link in the heap. + /// + /// # Returns + /// + /// A pointer to the first link in the heap. + fn first_link(&self) -> *mut Link { + self.0.as_ptr() as *mut _ + } + + /// Returns the last link in the heap. + /// + /// # Arguments + /// + /// * `base_address` - The base address of the allocated heap. + /// + /// # Returns + /// + /// A pointer to the last link in the heap. + fn last_link(&self, base_address: *mut u8) -> *mut Link { + unsafe { (base_address as *const u8).add(SIZE).sub(Link::SIZE) as *mut _ } + } + + /// Returns the amount of memory currently in use. + /// + /// # Safety + /// + /// This function must be called from a safe context where memory is not concurrently modified. + /// + /// # Returns + /// + /// The amount of memory currently in use, in bytes. + pub unsafe fn used_memory(&self) -> usize { + let mut used = 0; + let mut link = self.first_link(); + while !(&*link).is_last() { + used += (&*link).size as usize; + link = (&*link).next; } + used + } + + /// Returns the base address of the heap. + /// + /// # Returns + /// + /// The base address of the heap. + pub fn base_address(&self) -> *mut u8 { + self.0.as_ptr() as *mut _ + } +} + +/// A structure representing a link in a linked list heap. +/// +/// This struct is used to manage free and allocated memory chunks in the heap. +/// Each link points to the next chunk and tracks the size of the current chunk. +#[repr(C, align(0x10))] +struct Link { + /// Pointer to the next link in the list. + next: *mut Link, + /// Size of the current chunk. + size: isize, +} - self.used_memory.fetch_add(end as usize - start as usize, Ordering::SeqCst); - log::debug!("Allocated memory: start = {:#x}, end = {:#x}", start as usize, end as usize); +impl Link { + const SIZE: usize = size_of::(); + const ALIGN: usize = align_of::(); - aligned_start + /// Gets the start of the buffer. + /// + /// # Returns + /// + /// The start position of the buffer. + pub fn position(&self) -> usize { + self as *const _ as usize + Link::SIZE } - /// Deallocates memory within the pre-allocated heap. + /// Checks if the link is the last in the list. + /// + /// # Returns + /// + /// `true` if the link is the last, `false` otherwise. + pub fn is_last(&self) -> bool { + self.next as *const _ == self + } + + /// Returns the maximum size available for allocation. + /// + /// # Returns + /// + /// The maximum size available for allocation. + pub fn max_size(&self) -> isize { + (self.next as usize - self.position()) as isize + } + + /// Returns the free space available for allocation. + /// + /// # Returns + /// + /// The free space available for allocation. + pub fn free_space(&self) -> isize { + self.max_size() - self.size + } + + /// Returns the start position of the free space. + /// + /// # Returns + /// + /// The start position of the free space. + pub fn free_space_start(&self) -> usize { + self.position() + self.size as usize + } +} + +unsafe impl GlobalAlloc for ListHeap { + /// Allocates memory from the linked list heap. + /// + /// # Arguments + /// + /// * `layout` - The layout of the memory to be allocated. + /// + /// # Returns + /// + /// A pointer to the allocated memory. + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + let mut link = self.first_link(); + + let required_align = layout.align().max(Link::ALIGN); + let required_size = layout.size() as isize; + + while !(&*link).is_last() { + if (&*link).free_space() > required_size { + let effective_start = (&*link).free_space_start() + Link::SIZE; + let effective_size = (&*link).free_space() - Link::SIZE as isize; + + let mask = required_align - 1; + let aligned_pointer = (effective_start + mask) & !mask; + let aligned_size = effective_size - (aligned_pointer - effective_start) as isize; + + if required_size < aligned_size { + let new_link = (aligned_pointer - Link::SIZE) as *mut Link; + (&mut *new_link).next = (&mut *link).next; + (&mut *new_link).size = required_size; + (&mut *link).next = new_link; + + trace!("Allocated memory: ptr = {:p}, size = {:#x}", aligned_pointer as *mut u8, layout.size()); + return aligned_pointer as *mut _; + } + } + link = (&mut *link).next; + } + + ptr::null_mut() + } + + /// Deallocates memory within the linked list heap. /// /// # Arguments /// /// * `ptr` - A pointer to the memory to be deallocated. /// * `layout` - The layout of the memory to be deallocated. - unsafe fn dealloc(&self, _ptr: *mut u8, layout: Layout) { - // Note: In a simple bump allocator, deallocation is often a no-op. - // You might want to implement more complex free logic if needed. - self.used_memory.fetch_sub(layout.size(), Ordering::SeqCst); + unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { + if ptr.is_null() { + return; + } + let link = &mut *(ptr.sub(size_of::()) as *mut Link); + + if link.is_last() { + return; + } + + let mut prev = self.first_link(); + while (&*prev).next != link { + prev = (&*prev).next; + } + + (&mut *prev).next = link.next; } - /// Allocates zeroed memory from the pre-allocated heap. + /// Allocates zeroed memory from the linked list heap. /// /// # Arguments /// @@ -161,6 +365,38 @@ unsafe impl GlobalAlloc for GlobalAllocator { } ptr } + + /// Reallocates memory within the linked list heap. + /// + /// # Arguments + /// + /// * `ptr` - A pointer to the memory to be reallocated. + /// * `layout` - The current layout of the memory. + /// * `new_size` - The new size of the memory to be allocated. + /// + /// # Returns + /// + /// A pointer to the reallocated memory. + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + let link = &mut *(ptr.sub(size_of::()) as *mut Link); + + if link.max_size() > new_size as isize { + link.size = new_size as isize; + return ptr; + } + + let nlayout = Layout::from_size_align_unchecked(new_size, layout.align()); + let new_ptr = self.alloc(nlayout); + + if new_ptr.is_null() { + return new_ptr; + } + + ptr::copy_nonoverlapping(ptr, new_ptr, layout.size()); + self.dealloc(ptr, layout); + + new_ptr + } } /// Initializes the global heap allocator with the UEFI system table. From 3ca1a3ce5754b4e666670cc6d107d4ce020d33d3 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 12:59:51 +1200 Subject: [PATCH 35/87] Update allocator.rs --- hypervisor/src/allocator.rs | 383 ++++++++---------------------------- 1 file changed, 82 insertions(+), 301 deletions(-) diff --git a/hypervisor/src/allocator.rs b/hypervisor/src/allocator.rs index e208f7f..b462cba 100644 --- a/hypervisor/src/allocator.rs +++ b/hypervisor/src/allocator.rs @@ -1,427 +1,208 @@ -//! This module provides a global allocator using UEFI's memory allocation functions. -//! It tracks memory usage and ensures thread-safe operations. - -use { - crate::global_const::HEAP_SIZE, - alloc::{ - alloc::{alloc_zeroed, handle_alloc_error}, - boxed::Box, - }, - core::{ - alloc::{GlobalAlloc, Layout}, - ffi::c_void, - mem::{align_of, size_of}, - ptr, - sync::atomic::{AtomicPtr, AtomicU32, Ordering}, - }, - log::trace, - uefi::{ - proto::loaded_image::LoadedImage, - table::{boot::MemoryType, Boot, SystemTable}, - }, -}; - -/// Reference to the system table, used to call the boot services pool memory -/// allocation functions. -static SYSTEM_TABLE: AtomicPtr = AtomicPtr::new(ptr::null_mut()); - -/// The memory type used for pool memory allocations. -static MEMORY_TYPE: AtomicU32 = AtomicU32::new(MemoryType::LOADER_DATA.0); - -/// A global allocator that uses UEFI's pool allocation functions and tracks memory usage. -pub struct GlobalAllocator { - /// Heap allocator instance - heap: ListHeap<{ HEAP_SIZE }>, -} - -impl GlobalAllocator { - /// Creates a new, uninitialized GlobalAllocator. - /// - /// # Returns - /// - /// A new instance of `GlobalAllocator`. - pub const fn new() -> Self { - Self { heap: ListHeap::new() } - } - - /// Initializes the allocator and sets the system table. - /// - /// # Safety - /// - /// This function must be called exactly once before any allocations are made. - /// - /// # Arguments - /// - /// * `system_table` - A reference to the UEFI system table. - pub unsafe fn init(&self, system_table: &SystemTable) { - trace!("Initializing global allocator"); - - // Store the system table pointer for later use in allocation and deallocation. - SYSTEM_TABLE.store(system_table.as_ptr().cast_mut(), Ordering::Release); - - // Set the memory type based on the loaded image data type. - let boot_services = system_table.boot_services(); - if let Ok(loaded_image) = boot_services.open_protocol_exclusive::(boot_services.image_handle()) { - MEMORY_TYPE.store(loaded_image.data_type().0, Ordering::Release); - } - - // Allocate the initial heap pool and set the base address. - let heap_base = boot_services - .allocate_pool(MemoryType::LOADER_DATA, HEAP_SIZE) - .expect("Failed to allocate heap pool"); +#![allow(unused)] - self.heap.initialize(heap_base); - } - - /// Returns the amount of memory currently in use. - /// - /// # Returns - /// - /// The amount of memory currently in use, in bytes. - pub fn used(&self) -> usize { - unsafe { self.heap.used_memory() } - } +use core::alloc::{GlobalAlloc, Layout}; +use log::debug; - /// Returns the base address of the heap. - /// - /// # Returns - /// - /// The base address of the heap. - pub fn heap_base(&self) -> *mut u8 { - self.heap.base_address() - } -} - -/// Global allocator instance. #[global_allocator] -pub static GLOBAL_ALLOCATOR: GlobalAllocator = GlobalAllocator::new(); - -unsafe impl GlobalAlloc for GlobalAllocator { - /// Allocates memory from the pre-allocated heap. - /// - /// # Arguments - /// - /// * `layout` - The layout of the memory to be allocated. - /// - /// # Returns - /// - /// A pointer to the allocated memory. - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - trace!("Allocating memory: size = {:#x}, align = {:#x}", layout.size(), layout.align()); - self.heap.alloc(layout) - } - - /// Deallocates memory within the pre-allocated heap. - /// - /// # Arguments - /// - /// * `ptr` - A pointer to the memory to be deallocated. - /// * `layout` - The layout of the memory to be deallocated. - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - trace!("Deallocating memory: ptr = {:p}, size = {:#x}", ptr, layout.size()); - self.heap.dealloc(ptr, layout) - } +pub static mut HEAP: ListHeap<0x180000> = ListHeap::new(); - /// Allocates zeroed memory from the pre-allocated heap. - /// - /// # Arguments - /// - /// * `layout` - The layout of the memory to be allocated. - /// - /// # Returns - /// - /// A pointer to the allocated and zeroed memory. - unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { - trace!("Allocating zeroed memory: size = {:#x}, align = {:#x}", layout.size(), layout.align()); - self.heap.alloc_zeroed(layout) - } -} - -/// A heap allocator based on a linked list of free chunks. #[repr(align(0x10))] pub struct ListHeap(core::mem::MaybeUninit<[u8; SIZE]>); impl ListHeap { - /// Creates a new, uninitialized ListHeap. - /// - /// # Returns - /// - /// A new instance of `ListHeap`. pub const fn new() -> Self { Self(core::mem::MaybeUninit::uninit()) } - /// Initializes the heap with the given base address. - /// - /// # Safety - /// - /// This function must be called exactly once before any allocations are made. - /// - /// # Arguments - /// - /// * `base_address` - The base address of the allocated heap. - pub unsafe fn initialize(&self, base_address: *mut u8) { - trace!("Initializing heap at base address: {:p}", base_address); + pub fn as_slice(&self) -> &[u8] { + unsafe { &self.0.assume_init_ref()[..] } + } + /// resets the heap to its default state, this MUST be called at the start + pub unsafe fn reset(&mut self) { let start = self.first_link(); - let last = self.last_link(base_address); + let last = self.last_link(); (&mut *start).size = 0; (&mut *start).next = last; (&mut *last).size = 0; (&mut *last).next = last; } - /// Returns the first link in the heap. - /// - /// # Returns - /// - /// A pointer to the first link in the heap. fn first_link(&self) -> *mut Link { self.0.as_ptr() as *mut _ } - /// Returns the last link in the heap. - /// - /// # Arguments - /// - /// * `base_address` - The base address of the allocated heap. - /// - /// # Returns - /// - /// A pointer to the last link in the heap. - fn last_link(&self, base_address: *mut u8) -> *mut Link { - unsafe { (base_address as *const u8).add(SIZE).sub(Link::SIZE) as *mut _ } + fn last_link(&self) -> *mut Link { + unsafe { (self.0.as_ptr() as *const u8).add(SIZE).sub(Link::SIZE) as *mut _ } } - /// Returns the amount of memory currently in use. - /// - /// # Safety - /// - /// This function must be called from a safe context where memory is not concurrently modified. - /// - /// # Returns - /// - /// The amount of memory currently in use, in bytes. - pub unsafe fn used_memory(&self) -> usize { - let mut used = 0; - let mut link = self.first_link(); - while !(&*link).is_last() { - used += (&*link).size as usize; - link = (&*link).next; - } - used - } + pub fn _debug(&self) { + unsafe { + let mut total_freespace = 0usize; + let mut total_allocations = 0usize; + let mut total_allocation_size = 0usize; - /// Returns the base address of the heap. - /// - /// # Returns - /// - /// The base address of the heap. - pub fn base_address(&self) -> *mut u8 { - self.0.as_ptr() as *mut _ + let mut max_freespace = 0usize; + let mut largest_allocation = 0usize; + + let mut link = self.first_link(); + while (*link).next != link { + let free = (&*link).free_space() as usize; + let used = (&*link).size as usize; + + total_allocations += 1; + total_allocation_size += used; + total_freespace += free; + max_freespace = max_freespace.max(free); + largest_allocation = largest_allocation.max(used); + + link = (*link).next; + } + + // skip the first link + total_allocations -= 1; + + let wasted = (total_allocations + 2) * Link::SIZE; + debug!("Total Heap Size: 0x{:X}", SIZE); + debug!("Space wasted on memory management: 0x{wasted:X} bytes"); + debug!("Total memory allocated: 0x{total_allocation_size:X} bytes"); + debug!("Total memory available: 0x{total_freespace:X} bytes"); + debug!("Largest allocated buffer: 0x{largest_allocation:X} bytes"); + debug!("Largest available buffer: 0x{max_freespace:X} bytes"); + debug!("Total allocation count: 0x{total_allocations:X}"); + } } } -/// A structure representing a link in a linked list heap. -/// -/// This struct is used to manage free and allocated memory chunks in the heap. -/// Each link points to the next chunk and tracks the size of the current chunk. #[repr(C, align(0x10))] struct Link { - /// Pointer to the next link in the list. next: *mut Link, - /// Size of the current chunk. size: isize, } impl Link { - const SIZE: usize = size_of::(); - const ALIGN: usize = align_of::(); - - /// Gets the start of the buffer. - /// - /// # Returns - /// - /// The start position of the buffer. + const SIZE: usize = core::mem::size_of::(); + const ALIGN: usize = core::mem::align_of::(); + + // gets the start of the buffer pub fn position(&self) -> usize { self as *const _ as usize + Link::SIZE } - /// Checks if the link is the last in the list. - /// - /// # Returns - /// - /// `true` if the link is the last, `false` otherwise. pub fn is_last(&self) -> bool { self.next as *const _ == self } - /// Returns the maximum size available for allocation. - /// - /// # Returns - /// - /// The maximum size available for allocation. pub fn max_size(&self) -> isize { (self.next as usize - self.position()) as isize } - /// Returns the free space available for allocation. - /// - /// # Returns - /// - /// The free space available for allocation. pub fn free_space(&self) -> isize { self.max_size() - self.size } - /// Returns the start position of the free space. - /// - /// # Returns - /// - /// The start position of the free space. pub fn free_space_start(&self) -> usize { self.position() + self.size as usize } } unsafe impl GlobalAlloc for ListHeap { - /// Allocates memory from the linked list heap. - /// - /// # Arguments - /// - /// * `layout` - The layout of the memory to be allocated. - /// - /// # Returns - /// - /// A pointer to the allocated memory. unsafe fn alloc(&self, layout: Layout) -> *mut u8 { let mut link = self.first_link(); + // the required alignment and size for this type + // we don't support alignments less than 0x10 because of the Link let required_align = layout.align().max(Link::ALIGN); let required_size = layout.size() as isize; while !(&*link).is_last() { + if ((*link).next as usize) < (&*link).position() { + debug!("Last: {:p}", self.last_link()); + debug!("link: {:p}", link); + debug!("next: {:p}", (*link).next); + debug!("size: 0x{:x}", (*link).size); + } + if (&*link).free_space() > required_size { + // the effective size and start address after we account for our link let effective_start = (&*link).free_space_start() + Link::SIZE; let effective_size = (&*link).free_space() - Link::SIZE as isize; + // align the pointer, and adjust the size to account for the bytes we lost let mask = required_align - 1; let aligned_pointer = (effective_start + mask) & !mask; let aligned_size = effective_size - (aligned_pointer - effective_start) as isize; + // if the required size is less than the effect size after alignment... use it if required_size < aligned_size { let new_link = (aligned_pointer - Link::SIZE) as *mut Link; (&mut *new_link).next = (&mut *link).next; (&mut *new_link).size = required_size; (&mut *link).next = new_link; - trace!("Allocated memory: ptr = {:p}, size = {:#x}", aligned_pointer as *mut u8, layout.size()); return aligned_pointer as *mut _; } } + + // not enough room, keep looking link = (&mut *link).next; } - ptr::null_mut() + self._debug(); + // no free memory for this allocation :( + 0 as *mut _ } - /// Deallocates memory within the linked list heap. - /// - /// # Arguments - /// - /// * `ptr` - A pointer to the memory to be deallocated. - /// * `layout` - The layout of the memory to be deallocated. unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { if ptr.is_null() { return; } - let link = &mut *(ptr.sub(size_of::()) as *mut Link); + let link = &mut *(ptr.sub(core::mem::size_of::()) as *mut Link); + // sanity check, don't de-alloc the last link if link.is_last() { return; } + // find the previous link let mut prev = self.first_link(); while (&*prev).next != link { - prev = (&*prev).next; + prev = (&*prev).next } + // remove the link from the list, and its de-allocated (&mut *prev).next = link.next; } - /// Allocates zeroed memory from the linked list heap. - /// - /// # Arguments - /// - /// * `layout` - The layout of the memory to be allocated. - /// - /// # Returns - /// - /// A pointer to the allocated and zeroed memory. - unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { - let ptr = self.alloc(layout); - if !ptr.is_null() { - ptr::write_bytes(ptr, 0, layout.size()); - } - ptr - } - - /// Reallocates memory within the linked list heap. - /// - /// # Arguments - /// - /// * `ptr` - A pointer to the memory to be reallocated. - /// * `layout` - The current layout of the memory. - /// * `new_size` - The new size of the memory to be allocated. - /// - /// # Returns - /// - /// A pointer to the reallocated memory. + /// Tries to grow the current allocator if it can, + /// if not just re-allocates and copies the buffer to the new allocation unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { - let link = &mut *(ptr.sub(size_of::()) as *mut Link); + let link = &mut *(ptr.sub(core::mem::size_of::()) as *mut Link); + // just resize the buffer if link.max_size() > new_size as isize { link.size = new_size as isize; return ptr; } + // construct the new layout and try to allocate it let nlayout = Layout::from_size_align_unchecked(new_size, layout.align()); let new_ptr = self.alloc(nlayout); + // failed to alloc a new buffer, don't alter original data and abort if new_ptr.is_null() { return new_ptr; } - ptr::copy_nonoverlapping(ptr, new_ptr, layout.size()); + // copy data to the new array + core::ptr::copy_nonoverlapping(ptr, new_ptr, layout.size()); + self.dealloc(ptr, layout); new_ptr } } -/// Initializes the global heap allocator with the UEFI system table. -/// -/// This function must be called before any memory allocation operations are performed. -/// -/// # Safety -/// -/// This function is unsafe because it must be called exactly once and must be called -/// before any allocations are made. -/// -/// # Arguments -/// -/// * `system_table` - A reference to the UEFI system table. -pub unsafe fn init_heap(system_table: &SystemTable) { - GLOBAL_ALLOCATOR.init(system_table); -} - -/// Notifies the allocator library that boot services are no longer available. -/// -/// This function must be called before exiting UEFI boot services. -pub fn exit_boot_services() { - SYSTEM_TABLE.store(ptr::null_mut(), Ordering::Release); -} - /// Allocates and zeros memory for a given type, returning a boxed instance. /// /// # Safety @@ -443,4 +224,4 @@ pub unsafe fn box_zeroed() -> Box { handle_alloc_error(layout); } unsafe { Box::from_raw(ptr) } -} +} \ No newline at end of file From a94ce6b6900f7626792ab48c83eefb3f7b916718 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 13:02:55 +1200 Subject: [PATCH 36/87] Update allocator.rs --- hypervisor/src/allocator.rs | 153 +++++++++++++++++++++++++++++------- 1 file changed, 124 insertions(+), 29 deletions(-) diff --git a/hypervisor/src/allocator.rs b/hypervisor/src/allocator.rs index b462cba..02880e5 100644 --- a/hypervisor/src/allocator.rs +++ b/hypervisor/src/allocator.rs @@ -1,24 +1,53 @@ -#![allow(unused)] - -use core::alloc::{GlobalAlloc, Layout}; -use log::debug; - +//! This module provides a global allocator using a linked list heap allocation strategy. +//! The allocator is initialized with a fixed-size memory pool and supports memory allocation, +//! deallocation, and reallocation operations. The allocator tracks memory usage and provides +//! debugging information. + +use { + alloc::{ + alloc::{alloc_zeroed, handle_alloc_error}, + boxed::Box, + }, + core::alloc::{GlobalAlloc, Layout}, + log::debug, +}; + +/// Global allocator instance with a heap size of 1.5MB #[global_allocator] pub static mut HEAP: ListHeap<0x180000> = ListHeap::new(); +/// A heap allocator based on a linked list of free chunks. +/// +/// This struct manages a heap of a fixed size using a linked list +/// of free chunks. It supports memory allocation, deallocation, and +/// reallocation. #[repr(align(0x10))] pub struct ListHeap(core::mem::MaybeUninit<[u8; SIZE]>); impl ListHeap { + /// Creates a new, uninitialized ListHeap. + /// + /// # Returns + /// + /// A new instance of `ListHeap`. pub const fn new() -> Self { Self(core::mem::MaybeUninit::uninit()) } + /// Returns the heap as a slice. + /// + /// # Returns + /// + /// A slice representing the heap. pub fn as_slice(&self) -> &[u8] { unsafe { &self.0.assume_init_ref()[..] } } - /// resets the heap to its default state, this MUST be called at the start + /// Resets the heap to its default state. This must be called at the start. + /// + /// # Safety + /// + /// This function is unsafe because it must be called exactly once before any allocations are made. pub unsafe fn reset(&mut self) { let start = self.first_link(); let last = self.last_link(); @@ -28,14 +57,25 @@ impl ListHeap { (&mut *last).next = last; } + /// Returns the first link in the heap. + /// + /// # Returns + /// + /// A pointer to the first link in the heap. fn first_link(&self) -> *mut Link { self.0.as_ptr() as *mut _ } + /// Returns the last link in the heap. + /// + /// # Returns + /// + /// A pointer to the last link in the heap. fn last_link(&self) -> *mut Link { unsafe { (self.0.as_ptr() as *const u8).add(SIZE).sub(Link::SIZE) as *mut _ } } + /// Debugging function to print the current state of the heap. pub fn _debug(&self) { unsafe { let mut total_freespace = 0usize; @@ -74,44 +114,83 @@ impl ListHeap { } } +/// A structure representing a link in a linked list heap. +/// +/// This struct is used to manage free and allocated memory chunks in the heap. +/// Each link points to the next chunk and tracks the size of the current chunk. #[repr(C, align(0x10))] struct Link { + /// Pointer to the next link in the list. next: *mut Link, + /// Size of the current chunk. size: isize, } impl Link { - const SIZE: usize = core::mem::size_of::(); - const ALIGN: usize = core::mem::align_of::(); - - // gets the start of the buffer + const SIZE: usize = size_of::(); + const ALIGN: usize = align_of::(); + + /// Gets the start of the buffer. + /// + /// # Returns + /// + /// The start position of the buffer. pub fn position(&self) -> usize { self as *const _ as usize + Link::SIZE } + /// Checks if the link is the last in the list. + /// + /// # Returns + /// + /// `true` if the link is the last, `false` otherwise. pub fn is_last(&self) -> bool { self.next as *const _ == self } + /// Returns the maximum size available for allocation. + /// + /// # Returns + /// + /// The maximum size available for allocation. pub fn max_size(&self) -> isize { (self.next as usize - self.position()) as isize } + /// Returns the free space available for allocation. + /// + /// # Returns + /// + /// The free space available for allocation. pub fn free_space(&self) -> isize { self.max_size() - self.size } + /// Returns the start position of the free space. + /// + /// # Returns + /// + /// The start position of the free space. pub fn free_space_start(&self) -> usize { self.position() + self.size as usize } } unsafe impl GlobalAlloc for ListHeap { + /// Allocates memory from the linked list heap. + /// + /// # Arguments + /// + /// * `layout` - The layout of the memory to be allocated. + /// + /// # Returns + /// + /// A pointer to the allocated memory. unsafe fn alloc(&self, layout: Layout) -> *mut u8 { let mut link = self.first_link(); - // the required alignment and size for this type - // we don't support alignments less than 0x10 because of the Link + // The required alignment and size for this type + // We don't support alignments less than 0x10 because of the Link let required_align = layout.align().max(Link::ALIGN); let required_size = layout.size() as isize; @@ -124,16 +203,16 @@ unsafe impl GlobalAlloc for ListHeap { } if (&*link).free_space() > required_size { - // the effective size and start address after we account for our link + // The effective size and start address after we account for our link let effective_start = (&*link).free_space_start() + Link::SIZE; let effective_size = (&*link).free_space() - Link::SIZE as isize; - // align the pointer, and adjust the size to account for the bytes we lost + // Align the pointer, and adjust the size to account for the bytes we lost let mask = required_align - 1; let aligned_pointer = (effective_start + mask) & !mask; let aligned_size = effective_size - (aligned_pointer - effective_start) as isize; - // if the required size is less than the effect size after alignment... use it + // If the required size is less than the effective size after alignment, use it if required_size < aligned_size { let new_link = (aligned_pointer - Link::SIZE) as *mut Link; (&mut *new_link).next = (&mut *link).next; @@ -144,57 +223,73 @@ unsafe impl GlobalAlloc for ListHeap { } } - // not enough room, keep looking + // Not enough room, keep looking link = (&mut *link).next; } self._debug(); - // no free memory for this allocation :( + // No free memory for this allocation 0 as *mut _ } + /// Deallocates memory within the linked list heap. + /// + /// # Arguments + /// + /// * `ptr` - A pointer to the memory to be deallocated. + /// * `layout` - The layout of the memory to be deallocated. unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { if ptr.is_null() { return; } - let link = &mut *(ptr.sub(core::mem::size_of::()) as *mut Link); + let link = &mut *(ptr.sub(size_of::()) as *mut Link); - // sanity check, don't de-alloc the last link + // Sanity check, don't deallocate the last link if link.is_last() { return; } - // find the previous link + // Find the previous link let mut prev = self.first_link(); while (&*prev).next != link { - prev = (&*prev).next + prev = (&*prev).next; } - // remove the link from the list, and its de-allocated + // Remove the link from the list, and it's deallocated (&mut *prev).next = link.next; } /// Tries to grow the current allocator if it can, - /// if not just re-allocates and copies the buffer to the new allocation + /// if not just reallocates and copies the buffer to the new allocation. + /// + /// # Arguments + /// + /// * `ptr` - A pointer to the memory to be reallocated. + /// * `layout` - The current layout of the memory. + /// * `new_size` - The new size of the memory to be allocated. + /// + /// # Returns + /// + /// A pointer to the reallocated memory. unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { - let link = &mut *(ptr.sub(core::mem::size_of::()) as *mut Link); + let link = &mut *(ptr.sub(size_of::()) as *mut Link); - // just resize the buffer + // Just resize the buffer if link.max_size() > new_size as isize { link.size = new_size as isize; return ptr; } - // construct the new layout and try to allocate it + // Construct the new layout and try to allocate it let nlayout = Layout::from_size_align_unchecked(new_size, layout.align()); let new_ptr = self.alloc(nlayout); - // failed to alloc a new buffer, don't alter original data and abort + // Failed to allocate a new buffer, don't alter original data and abort if new_ptr.is_null() { return new_ptr; } - // copy data to the new array + // Copy data to the new array core::ptr::copy_nonoverlapping(ptr, new_ptr, layout.size()); self.dealloc(ptr, layout); @@ -224,4 +319,4 @@ pub unsafe fn box_zeroed() -> Box { handle_alloc_error(layout); } unsafe { Box::from_raw(ptr) } -} \ No newline at end of file +} From a271aa714b4c8447a4080b93b09d518a8e478976 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 13:08:13 +1200 Subject: [PATCH 37/87] Update allocator.rs --- hypervisor/src/allocator.rs | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/hypervisor/src/allocator.rs b/hypervisor/src/allocator.rs index 02880e5..e833303 100644 --- a/hypervisor/src/allocator.rs +++ b/hypervisor/src/allocator.rs @@ -4,17 +4,15 @@ //! debugging information. use { - alloc::{ - alloc::{alloc_zeroed, handle_alloc_error}, - boxed::Box, - }, + crate::global_const::HEAP_SIZE, + alloc::boxed::Box, core::alloc::{GlobalAlloc, Layout}, log::debug, }; -/// Global allocator instance with a heap size of 1.5MB +/// Global allocator instance with a heap size of `HEAP_SIZE`. #[global_allocator] -pub static mut HEAP: ListHeap<0x180000> = ListHeap::new(); +pub static mut HEAP: ListHeap = ListHeap::new(); /// A heap allocator based on a linked list of free chunks. /// @@ -99,7 +97,7 @@ impl ListHeap { link = (*link).next; } - // skip the first link + // Skip the first link total_allocations -= 1; let wasted = (total_allocations + 2) * Link::SIZE; @@ -313,10 +311,5 @@ unsafe impl GlobalAlloc for ListHeap { /// /// Panics if memory allocation fails. pub unsafe fn box_zeroed() -> Box { - let layout = Layout::new::(); - let ptr = unsafe { alloc_zeroed(layout) }.cast::(); - if ptr.is_null() { - handle_alloc_error(layout); - } - unsafe { Box::from_raw(ptr) } + unsafe { Box::::new_zeroed().assume_init() } } From fb96e930486b96456bfb751a0de2439749609e57 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 13:17:23 +1200 Subject: [PATCH 38/87] Update Cargo.toml --- hypervisor/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hypervisor/Cargo.toml b/hypervisor/Cargo.toml index 9fed21e..c003208 100644 --- a/hypervisor/Cargo.toml +++ b/hypervisor/Cargo.toml @@ -13,7 +13,7 @@ path = "src/lib.rs" x86 = "0.52.0" # https://crates.io/crates/x86 x86_64 = "0.15.0" # https://crates.io/crates/x86_64 uefi = { version = "0.28.0", features = ["alloc"] } # https://crates.io/crates/uefi -#uefi-services = { version = "0.25.0", default-features = false } # https://crates.io/crates/uefi-services +uefi-services = { version = "0.25.0", default-features = false } # https://crates.io/crates/uefi-services thiserror-no-std = "2.0.2" # https://crates.io/crates/thiserror-no-std bitfield = "0.15.0" # https://crates.io/crates/bitfield bit_field = "0.10.2" # https://crates.io/crates/bit_field From dafa12a9abb20e7b9ecdf96350939be9e2c30c0d Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 13:17:25 +1200 Subject: [PATCH 39/87] Update Cargo.toml --- uefi/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/uefi/Cargo.toml b/uefi/Cargo.toml index d455d71..6ef1820 100644 --- a/uefi/Cargo.toml +++ b/uefi/Cargo.toml @@ -11,7 +11,7 @@ path = "src/main.rs" [dependencies] uefi = { version = "0.28.0", features = ["alloc"] } # https://crates.io/crates/uefi -#uefi-services = { version = "0.25.0", default-features = false } # https://crates.io/crates/uefi-services +uefi-services = { version = "0.25.0", default-features = false } # https://crates.io/crates/uefi-services log = { version = "0.4.20", default-features = false } # https://crates.io/crates/log once_cell = "1.19.0" # https://crates.io/crates/once_cell spin = "0.9" # https://crates.io/crates/spin From 892bb92091a00840af1d3103454576a79e9ed476 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 13:32:11 +1200 Subject: [PATCH 40/87] Update allocator.rs --- hypervisor/src/allocator.rs | 59 +++++++++++++++++++++++++++++++++++-- 1 file changed, 56 insertions(+), 3 deletions(-) diff --git a/hypervisor/src/allocator.rs b/hypervisor/src/allocator.rs index e833303..3f694e6 100644 --- a/hypervisor/src/allocator.rs +++ b/hypervisor/src/allocator.rs @@ -4,10 +4,16 @@ //! debugging information. use { - crate::global_const::HEAP_SIZE, - alloc::boxed::Box, - core::alloc::{GlobalAlloc, Layout}, + crate::global_const::{HEAP_SIZE, STACK_MEMORY_TYPE, STACK_NUMBER_OF_PAGES}, + alloc::{boxed::Box, vec::Vec}, + core::{ + alloc::{GlobalAlloc, Layout}, + sync::atomic::{AtomicUsize, Ordering}, + }, log::debug, + spin::Mutex, + uefi::{prelude::BootServices, table::boot::AllocateType}, + x86::bits64::paging::BASE_PAGE_SIZE, }; /// Global allocator instance with a heap size of `HEAP_SIZE`. @@ -313,3 +319,50 @@ unsafe impl GlobalAlloc for ListHeap { pub unsafe fn box_zeroed() -> Box { unsafe { Box::::new_zeroed().assume_init() } } + +/// Allocates a block of memory pages using UEFI's allocate_pages function. +/// +/// This function allocates memory pages that are not part of the global allocator. +/// The allocated memory is of type `RUNTIME_SERVICES_DATA` and is allocated anywhere in memory. +/// +/// # Arguments +/// +/// * `boot_services` - A reference to the UEFI boot services table. +/// +/// # Returns +/// +/// A pointer to the allocated memory block. +/// +/// # Panics +/// +/// This function will panic if memory allocation fails. +pub fn allocate_uefi_pages(boot_services: &BootServices) -> *mut u8 { + // Allocate the pages using UEFI's allocate_pages function + let allocated_pages = boot_services + .allocate_pages(AllocateType::AnyPages, STACK_MEMORY_TYPE, STACK_NUMBER_OF_PAGES) + .expect("Failed to allocate UEFI pages"); + + // Record the allocation + record_allocation(allocated_pages as usize, STACK_NUMBER_OF_PAGES * BASE_PAGE_SIZE); // Assuming 4KB pages + + // Return the pointer to the allocated memory block + allocated_pages as *mut u8 +} + +// Structure to store allocated memory ranges +#[derive(Debug)] +struct MemoryRange { + start: usize, + size: usize, +} + +// Global list to store allocated memory ranges +static ALLOCATED_MEMORY: Mutex> = Mutex::new(Vec::new()); +static TOTAL_ALLOCATED_MEMORY: AtomicUsize = AtomicUsize::new(0); + +// Function to record an allocation +fn record_allocation(start: usize, size: usize) { + let mut allocated_memory = ALLOCATED_MEMORY.lock(); + allocated_memory.push(MemoryRange { start, size }); + TOTAL_ALLOCATED_MEMORY.fetch_add(size, Ordering::SeqCst); +} From 33f84e75609eb3ce8d009ee7778bf334f0f45ed8 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 13:32:13 +1200 Subject: [PATCH 41/87] Update global_const.rs --- hypervisor/src/global_const.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/hypervisor/src/global_const.rs b/hypervisor/src/global_const.rs index ff6ea2c..841ad42 100644 --- a/hypervisor/src/global_const.rs +++ b/hypervisor/src/global_const.rs @@ -1,8 +1,13 @@ +use uefi::table::boot::MemoryType; + /// The size of the heap in bytes. -pub const HEAP_SIZE: usize = 0x5000000; // 80 MB +pub const HEAP_SIZE: usize = 0x180000; /// The size of the stack in bytes. -pub const STACK_SIZE: usize = 0x3000; // 48 MB +pub const STACK_NUMBER_OF_PAGES: usize = 0x20; + +/// The memory type for the stack allocated pages +pub const STACK_MEMORY_TYPE: MemoryType = MemoryType::RUNTIME_SERVICES_DATA; /// The maximum number of hooks supported by the hypervisor. Change this value as needed. pub const MAX_HOOK_ENTRIES: usize = 64; From e0b24d81b56f74a88ad197517e1e01060835e8ea Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 13:43:50 +1200 Subject: [PATCH 42/87] Update allocator.rs --- hypervisor/src/allocator.rs | 48 ++++++++++++++++++++++++++++++------- 1 file changed, 39 insertions(+), 9 deletions(-) diff --git a/hypervisor/src/allocator.rs b/hypervisor/src/allocator.rs index 3f694e6..601f214 100644 --- a/hypervisor/src/allocator.rs +++ b/hypervisor/src/allocator.rs @@ -8,11 +8,12 @@ use { alloc::{boxed::Box, vec::Vec}, core::{ alloc::{GlobalAlloc, Layout}, - sync::atomic::{AtomicUsize, Ordering}, + ptr, + sync::atomic::{AtomicPtr, AtomicUsize, Ordering}, }, log::debug, spin::Mutex, - uefi::{prelude::BootServices, table::boot::AllocateType}, + uefi::table::{boot::AllocateType, Boot, SystemTable}, x86::bits64::paging::BASE_PAGE_SIZE, }; @@ -294,7 +295,7 @@ unsafe impl GlobalAlloc for ListHeap { } // Copy data to the new array - core::ptr::copy_nonoverlapping(ptr, new_ptr, layout.size()); + ptr::copy_nonoverlapping(ptr, new_ptr, layout.size()); self.dealloc(ptr, layout); @@ -320,14 +321,30 @@ pub unsafe fn box_zeroed() -> Box { unsafe { Box::::new_zeroed().assume_init() } } -/// Allocates a block of memory pages using UEFI's allocate_pages function. +/// Reference to the system table, used to call the boot services pool memory +/// allocation functions. +static SYSTEM_TABLE: AtomicPtr> = AtomicPtr::new(ptr::null_mut()); + +/// Initializes the system table. /// -/// This function allocates memory pages that are not part of the global allocator. -/// The allocated memory is of type `RUNTIME_SERVICES_DATA` and is allocated anywhere in memory. +/// This function must be called before any memory allocation operations are performed. +/// +/// # Safety +/// +/// This function is unsafe because it must be called exactly once and must be called +/// before any allocations are made. /// /// # Arguments /// -/// * `boot_services` - A reference to the UEFI boot services table. +/// * `system_table` - A reference to the UEFI system table. +pub unsafe fn init_system_table(system_table: &SystemTable) { + SYSTEM_TABLE.store(system_table as *const _ as *mut _, Ordering::Release); +} + +/// Allocates a block of memory pages using UEFI's allocate_pages function. +/// +/// This function allocates memory pages that are not part of the global allocator. +/// The allocated memory is of type `RUNTIME_SERVICES_DATA` and is allocated anywhere in memory. /// /// # Returns /// @@ -336,7 +353,11 @@ pub unsafe fn box_zeroed() -> Box { /// # Panics /// /// This function will panic if memory allocation fails. -pub fn allocate_uefi_pages(boot_services: &BootServices) -> *mut u8 { +pub fn allocate_host_stack() -> *mut u8 { + // Get the system table and boot services + let system_table = SYSTEM_TABLE.load(Ordering::Acquire); + let boot_services = unsafe { &(*system_table).boot_services() }; + // Allocate the pages using UEFI's allocate_pages function let allocated_pages = boot_services .allocate_pages(AllocateType::AnyPages, STACK_MEMORY_TYPE, STACK_NUMBER_OF_PAGES) @@ -361,8 +382,17 @@ static ALLOCATED_MEMORY: Mutex> = Mutex::new(Vec::new()); static TOTAL_ALLOCATED_MEMORY: AtomicUsize = AtomicUsize::new(0); // Function to record an allocation -fn record_allocation(start: usize, size: usize) { +pub fn record_allocation(start: usize, size: usize) { let mut allocated_memory = ALLOCATED_MEMORY.lock(); allocated_memory.push(MemoryRange { start, size }); TOTAL_ALLOCATED_MEMORY.fetch_add(size, Ordering::SeqCst); } + +/// Prints the tracked memory allocations. +pub fn print_tracked_allocations() { + let allocated_memory = ALLOCATED_MEMORY.lock(); + for range in allocated_memory.iter() { + debug!("Allocated memory range: start = {:#x}, size = {:#x}", range.start, range.size); + } + debug!("Total allocated memory: {:#x} bytes", TOTAL_ALLOCATED_MEMORY.load(Ordering::SeqCst)); +} From b0367dfb517f16503c1174b3933597356f3a9c35 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 13:45:56 +1200 Subject: [PATCH 43/87] Update relocation.rs --- uefi/src/relocation.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/uefi/src/relocation.rs b/uefi/src/relocation.rs index bb34375..6f2e489 100644 --- a/uefi/src/relocation.rs +++ b/uefi/src/relocation.rs @@ -4,6 +4,7 @@ //! Credits Satoshi Tanda: https://github.com/tandasat/Hello-VT-rp/blob/main/hypervisor/src/switch_stack.rs use { + hypervisor::allocator::record_allocation, log::debug, uefi::{prelude::BootServices, proto::loaded_image::LoadedImage}, }; @@ -29,6 +30,7 @@ pub fn zap_relocations(boot_service: &BootServices) -> uefi::Result<()> { let (image_base, image_size) = loaded_image.info(); let image_base = image_base as usize; let image_range = image_base..image_base + image_size as usize; + record_allocation(image_base, image_size as usize); // Log the image base address range for debugging purposes. debug!("Image base: {:#x?}", image_range); From d7f2d3afc3318fdb3372cd944c79e6507d8f5356 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 13:46:00 +1200 Subject: [PATCH 44/87] Update virtualize.rs --- uefi/src/virtualize.rs | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/uefi/src/virtualize.rs b/uefi/src/virtualize.rs index eff79d3..18ed4c1 100644 --- a/uefi/src/virtualize.rs +++ b/uefi/src/virtualize.rs @@ -7,7 +7,8 @@ use { alloc::alloc::handle_alloc_error, core::{alloc::Layout, arch::global_asm}, hypervisor::{ - global_const::STACK_SIZE, + allocator::allocate_host_stack, + global_const::STACK_NUMBER_OF_PAGES, intel::{capture::GuestRegisters, page::Page}, vmm::start_hypervisor, }, @@ -22,15 +23,10 @@ use { pub fn virtualize_system(guest_registers: &GuestRegisters) -> ! { debug!("Allocating stack space for host"); - let layout = Layout::array::(STACK_SIZE).unwrap(); - let stack = unsafe { alloc::alloc::alloc_zeroed(layout) }; - if stack.is_null() { - handle_alloc_error(layout); - } - let host_stack = stack as u64 + layout.size() as u64 - 0x10; - debug!("Stack range: {:#x?}", stack as u64..host_stack); + let host_stack = allocate_host_stack() as usize; + debug!("Stack range: {:#x?}", host_stack..STACK_NUMBER_OF_PAGES); - unsafe { switch_stack(guest_registers, start_hypervisor as usize, host_stack) }; + unsafe { switch_stack(guest_registers, start_hypervisor as usize, host_stack as _) }; } extern "efiapi" { From b69a68ec20ab0c0d16539a482d75afaff32f6780 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 13:46:08 +1200 Subject: [PATCH 45/87] Update main.rs --- uefi/src/main.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/uefi/src/main.rs b/uefi/src/main.rs index 7c6f405..7e4d4a6 100644 --- a/uefi/src/main.rs +++ b/uefi/src/main.rs @@ -12,7 +12,7 @@ extern crate alloc; use { crate::{processor::start_hypervisor_on_all_processors, relocation::zap_relocations}, hypervisor::{ - allocator::init_heap, + allocator::init_system_table, logger::{self, SerialPort}, }, log::*, @@ -57,12 +57,8 @@ fn panic_handler(info: &core::panic::PanicInfo) -> ! { /// or `Status::ABORTED` if the hypervisor fails to install. #[entry] fn main(_image_handle: Handle, system_table: SystemTable) -> Status { - // Initialize the allocator BEFORE it's used. - // - // This unsafe block is necessary because the `init_heap` function must be called exactly once - // before any allocations are made. It initializes the heap allocator with the system table. unsafe { - init_heap(&system_table); + init_system_table(&system_table); } // Initialize logging with the COM2 port and set the level filter to Debug. From 1ba11695c08fb32f2003fa2af30a22845f10bb4c Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 13:51:32 +1200 Subject: [PATCH 46/87] Update Cargo.toml --- uefi/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/uefi/Cargo.toml b/uefi/Cargo.toml index 6ef1820..d455d71 100644 --- a/uefi/Cargo.toml +++ b/uefi/Cargo.toml @@ -11,7 +11,7 @@ path = "src/main.rs" [dependencies] uefi = { version = "0.28.0", features = ["alloc"] } # https://crates.io/crates/uefi -uefi-services = { version = "0.25.0", default-features = false } # https://crates.io/crates/uefi-services +#uefi-services = { version = "0.25.0", default-features = false } # https://crates.io/crates/uefi-services log = { version = "0.4.20", default-features = false } # https://crates.io/crates/log once_cell = "1.19.0" # https://crates.io/crates/once_cell spin = "0.9" # https://crates.io/crates/spin From 59e6f9ccd8972ec4c18fd2d8b1d5c003054a44d5 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 13:51:33 +1200 Subject: [PATCH 47/87] Update Cargo.toml --- hypervisor/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hypervisor/Cargo.toml b/hypervisor/Cargo.toml index c003208..9fed21e 100644 --- a/hypervisor/Cargo.toml +++ b/hypervisor/Cargo.toml @@ -13,7 +13,7 @@ path = "src/lib.rs" x86 = "0.52.0" # https://crates.io/crates/x86 x86_64 = "0.15.0" # https://crates.io/crates/x86_64 uefi = { version = "0.28.0", features = ["alloc"] } # https://crates.io/crates/uefi -uefi-services = { version = "0.25.0", default-features = false } # https://crates.io/crates/uefi-services +#uefi-services = { version = "0.25.0", default-features = false } # https://crates.io/crates/uefi-services thiserror-no-std = "2.0.2" # https://crates.io/crates/thiserror-no-std bitfield = "0.15.0" # https://crates.io/crates/bitfield bit_field = "0.10.2" # https://crates.io/crates/bit_field From 4c14b2a0536c0bd07e23fc27eb907937a3f3ae83 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 13:51:39 +1200 Subject: [PATCH 48/87] Update allocator.rs --- hypervisor/src/allocator.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hypervisor/src/allocator.rs b/hypervisor/src/allocator.rs index 601f214..95fd76c 100644 --- a/hypervisor/src/allocator.rs +++ b/hypervisor/src/allocator.rs @@ -372,13 +372,13 @@ pub fn allocate_host_stack() -> *mut u8 { // Structure to store allocated memory ranges #[derive(Debug)] -struct MemoryRange { - start: usize, - size: usize, +pub struct MemoryRange { + pub start: usize, + pub size: usize, } // Global list to store allocated memory ranges -static ALLOCATED_MEMORY: Mutex> = Mutex::new(Vec::new()); +pub static ALLOCATED_MEMORY: Mutex> = Mutex::new(Vec::new()); static TOTAL_ALLOCATED_MEMORY: AtomicUsize = AtomicUsize::new(0); // Function to record an allocation From cff59204bb919f4e43708549e370cecae3ad0190 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 13:51:42 +1200 Subject: [PATCH 49/87] Update hook_manager.rs --- hypervisor/src/intel/hooks/hook_manager.rs | 23 +++++++++++----------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/hypervisor/src/intel/hooks/hook_manager.rs b/hypervisor/src/intel/hooks/hook_manager.rs index 1830d27..7a65695 100644 --- a/hypervisor/src/intel/hooks/hook_manager.rs +++ b/hypervisor/src/intel/hooks/hook_manager.rs @@ -1,6 +1,6 @@ use { crate::{ - allocator::GLOBAL_ALLOCATOR, + allocator::ALLOCATED_MEMORY, error::HypervisorError, intel::{ addresses::PhysicalAddress, @@ -81,7 +81,7 @@ impl HookManager { /// Hides the hypervisor memory from the guest by installing EPT hooks on all allocated memory regions. /// - /// This function iterates through the used memory in the global allocator and calls `ept_hide_hypervisor_memory` + /// This function iterates through the recorded memory allocations and calls `ept_hide_hypervisor_memory` /// for each page to split the 2MB pages into 4KB pages and fill the shadow page with a specified value. /// It then swaps the guest page with the shadow page and sets the desired permissions. /// @@ -94,16 +94,15 @@ impl HookManager { /// /// Returns `Ok(())` if the hooks were successfully installed, `Err(HypervisorError)` otherwise. pub fn hide_hypervisor_memory(vm: &mut Vm, page_permissions: AccessType) -> Result<(), HypervisorError> { - // Get the used memory from the global allocator. - let used_memory = GLOBAL_ALLOCATOR.used(); - - // Get the base address of the heap. - let heap_base_address = GLOBAL_ALLOCATOR.heap_base(); - - // Iterate through the used memory and hide each page. - for offset in (0..used_memory).step_by(4096) { - let guest_page_pa = unsafe { heap_base_address.add(offset) }; - HookManager::ept_hide_hypervisor_memory(vm, PAddr::from(guest_page_pa as usize).align_down_to_base_page().as_u64(), page_permissions)?; + // Lock the allocated memory list to ensure thread safety. + let allocated_memory = ALLOCATED_MEMORY.lock(); + + // Iterate through the recorded memory allocations and hide each page. + for range in allocated_memory.iter() { + for offset in (0..range.size).step_by(BASE_PAGE_SIZE) { + let guest_page_pa = range.start + offset; + HookManager::ept_hide_hypervisor_memory(vm, PAddr::from(guest_page_pa).align_down_to_base_page().as_u64(), page_permissions)?; + } } Ok(()) From 68bc078e676b732760162b6a8bc93bf8d868387f Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 13:51:44 +1200 Subject: [PATCH 50/87] Update vmm.rs --- hypervisor/src/vmm.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/hypervisor/src/vmm.rs b/hypervisor/src/vmm.rs index 5500b19..a3ebf55 100644 --- a/hypervisor/src/vmm.rs +++ b/hypervisor/src/vmm.rs @@ -77,12 +77,10 @@ pub fn start_hypervisor(guest_registers: &GuestRegisters) -> ! { Err(e) => panic!("Failed to activate VMCS: {:?}", e), } - /* match HookManager::hide_hypervisor_memory(&mut vm, AccessType::READ_WRITE_EXECUTE) { Ok(_) => debug!("Hypervisor memory hidden"), Err(e) => panic!("Failed to hide hypervisor memory: {:?}", e), }; - */ info!("Launching the VM until a vmexit occurs..."); From ff707946b57ee9bdc72d783b1517448e4f13997e Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 13:51:46 +1200 Subject: [PATCH 51/87] Update virtualize.rs --- uefi/src/virtualize.rs | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/uefi/src/virtualize.rs b/uefi/src/virtualize.rs index 18ed4c1..9f83c85 100644 --- a/uefi/src/virtualize.rs +++ b/uefi/src/virtualize.rs @@ -4,14 +4,8 @@ //! Credits to Satoshi Tanda: https://github.com/tandasat/Hello-VT-rp/blob/main/hypervisor/src/switch_stack.rs use { - alloc::alloc::handle_alloc_error, - core::{alloc::Layout, arch::global_asm}, - hypervisor::{ - allocator::allocate_host_stack, - global_const::STACK_NUMBER_OF_PAGES, - intel::{capture::GuestRegisters, page::Page}, - vmm::start_hypervisor, - }, + core::arch::global_asm, + hypervisor::{allocator::allocate_host_stack, global_const::STACK_NUMBER_OF_PAGES, intel::capture::GuestRegisters, vmm::start_hypervisor}, log::debug, }; From 5359308e28c20d54e6a3de78a4d59c71d96d528b Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 13:59:30 +1200 Subject: [PATCH 52/87] Update allocator.rs --- hypervisor/src/allocator.rs | 30 +++++++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/hypervisor/src/allocator.rs b/hypervisor/src/allocator.rs index 95fd76c..61cf79c 100644 --- a/hypervisor/src/allocator.rs +++ b/hypervisor/src/allocator.rs @@ -370,18 +370,39 @@ pub fn allocate_host_stack() -> *mut u8 { allocated_pages as *mut u8 } -// Structure to store allocated memory ranges +/// Structure to store allocated memory ranges. +/// +/// This struct is used to keep track of memory allocations by storing the +/// start address and size of each allocated memory block. #[derive(Debug)] pub struct MemoryRange { + /// The start address of the allocated memory range. pub start: usize, + /// The size of the allocated memory range. pub size: usize, } -// Global list to store allocated memory ranges +/// Global list to store allocated memory ranges. +/// +/// This global mutex-protected vector keeps track of all allocated memory ranges +/// for monitoring and debugging purposes. pub static ALLOCATED_MEMORY: Mutex> = Mutex::new(Vec::new()); + +/// Atomic counter to track the total allocated memory size. +/// +/// This atomic counter is incremented whenever a new memory block is allocated +/// and provides a quick way to get the total allocated memory size. static TOTAL_ALLOCATED_MEMORY: AtomicUsize = AtomicUsize::new(0); -// Function to record an allocation +/// Records an allocation by adding the memory range to the global list and updating the total allocated memory. +/// +/// This function is called whenever a new memory block is allocated. It stores the start address +/// and size of the allocated memory in the global list and updates the total allocated memory counter. +/// +/// # Arguments +/// +/// * `start` - The start address of the allocated memory range. +/// * `size` - The size of the allocated memory range. pub fn record_allocation(start: usize, size: usize) { let mut allocated_memory = ALLOCATED_MEMORY.lock(); allocated_memory.push(MemoryRange { start, size }); @@ -389,6 +410,9 @@ pub fn record_allocation(start: usize, size: usize) { } /// Prints the tracked memory allocations. +/// +/// This function iterates over all recorded memory allocations and prints the start address +/// and size of each allocated memory range. It also prints the total allocated memory size. pub fn print_tracked_allocations() { let allocated_memory = ALLOCATED_MEMORY.lock(); for range in allocated_memory.iter() { From f4c36c438f81aee865852c816bdf529809a1c8d4 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 14:03:01 +1200 Subject: [PATCH 53/87] Update allocator.rs --- hypervisor/src/allocator.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/hypervisor/src/allocator.rs b/hypervisor/src/allocator.rs index 61cf79c..cb05056 100644 --- a/hypervisor/src/allocator.rs +++ b/hypervisor/src/allocator.rs @@ -325,20 +325,26 @@ pub unsafe fn box_zeroed() -> Box { /// allocation functions. static SYSTEM_TABLE: AtomicPtr> = AtomicPtr::new(ptr::null_mut()); -/// Initializes the system table. +/// Initializes the system table and resets the global heap. /// -/// This function must be called before any memory allocation operations are performed. +/// This function must be called before any memory allocation operations are performed. It initializes +/// the system table reference and resets the global heap to its default state. /// /// # Safety /// /// This function is unsafe because it must be called exactly once and must be called /// before any allocations are made. /// +/// # Important +/// +/// This function must be called to ensure that the global allocator is properly initialized and reset. +/// /// # Arguments /// /// * `system_table` - A reference to the UEFI system table. -pub unsafe fn init_system_table(system_table: &SystemTable) { +pub unsafe fn initialize_system_table_and_heap(system_table: &SystemTable) { SYSTEM_TABLE.store(system_table as *const _ as *mut _, Ordering::Release); + HEAP.reset(); } /// Allocates a block of memory pages using UEFI's allocate_pages function. From 8c06645bcec3b1c211e0694900dea869ec485041 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 14:03:03 +1200 Subject: [PATCH 54/87] Update main.rs --- uefi/src/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/uefi/src/main.rs b/uefi/src/main.rs index 7e4d4a6..8f63939 100644 --- a/uefi/src/main.rs +++ b/uefi/src/main.rs @@ -12,7 +12,7 @@ extern crate alloc; use { crate::{processor::start_hypervisor_on_all_processors, relocation::zap_relocations}, hypervisor::{ - allocator::init_system_table, + allocator::initialize_system_table_and_heap, logger::{self, SerialPort}, }, log::*, @@ -58,7 +58,7 @@ fn panic_handler(info: &core::panic::PanicInfo) -> ! { #[entry] fn main(_image_handle: Handle, system_table: SystemTable) -> Status { unsafe { - init_system_table(&system_table); + initialize_system_table_and_heap(&system_table); } // Initialize logging with the COM2 port and set the level filter to Debug. From 98ff7e55941671a65adb18e72206eb1fb522c303 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 14:22:05 +1200 Subject: [PATCH 55/87] Put Vmcs and Vmxon on stack --- hypervisor/src/intel/vm.rs | 23 ++++++++--------------- hypervisor/src/intel/vmcs.rs | 11 ++++++----- hypervisor/src/intel/vmxon.rs | 14 +++++++++++++- 3 files changed, 27 insertions(+), 21 deletions(-) diff --git a/hypervisor/src/intel/vm.rs b/hypervisor/src/intel/vm.rs index b854ed4..ff2cbc9 100644 --- a/hypervisor/src/intel/vm.rs +++ b/hypervisor/src/intel/vm.rs @@ -17,7 +17,7 @@ use { hooks::hook_manager::HookManager, page::Page, paging::PageTables, - support::{rdmsr, vmclear, vmptrld, vmread, vmxon}, + support::{vmclear, vmptrld, vmread, vmxon}, vmcs::Vmcs, vmerror::{VmInstructionError, VmxBasicExitReason}, vmlaunch::launch_vm, @@ -25,7 +25,6 @@ use { }, }, alloc::boxed::Box, - bit_field::BitField, log::*, x86::{bits64::rflags::RFlags, msr, vmx::vmcs}, }; @@ -37,10 +36,10 @@ use { /// and the state of guest registers. Additionally, it tracks whether the VM has been launched. pub struct Vm { /// The VMXON (Virtual Machine Extensions On) region for the VM. - pub vmxon_region: Box, + pub vmxon_region: Vmxon, /// The VMCS (Virtual Machine Control Structure) for the VM. - pub vmcs_region: Box, + pub vmcs_region: Vmcs, /// Descriptor tables for the guest state. pub guest_descriptor: Descriptors, @@ -91,10 +90,10 @@ impl Vm { trace!("Creating VM"); trace!("Allocating VMXON region"); - let vmxon_region = unsafe { box_zeroed::() }; + let vmxon_region = Vmxon::new(); trace!("Allocating VMCS region"); - let vmcs_region = unsafe { box_zeroed::() }; + let vmcs_region = Vmcs::new(); trace!("Allocating Memory for Host Paging"); let mut host_paging = unsafe { box_zeroed::() }; @@ -153,14 +152,11 @@ impl Vm { /// Returns `Ok(())` on successful activation, or an `Err(HypervisorError)` if any step in the activation process fails. pub fn activate_vmxon(&mut self) -> Result<(), HypervisorError> { trace!("Setting up VMXON region"); - self.vmxon_region.revision_id = rdmsr(msr::IA32_VMX_BASIC) as u32; - self.vmxon_region.revision_id.set_bit(31, false); - self.setup_vmxon()?; trace!("VMXON region setup successfully!"); trace!("Executing VMXON instruction"); - vmxon(self.vmxon_region.as_ref() as *const _ as _); + vmxon(&self.vmxon_region as *const _ as _); trace!("VMXON executed successfully!"); Ok(()) @@ -205,15 +201,12 @@ impl Vm { /// Returns `Ok(())` on successful activation, or an `Err(HypervisorError)` if activation fails. pub fn activate_vmcs(&mut self) -> Result<(), HypervisorError> { trace!("Activating VMCS"); - self.vmcs_region.revision_id = rdmsr(msr::IA32_VMX_BASIC) as u32; - self.vmcs_region.revision_id.set_bit(31, false); - // Clear the VMCS region. - vmclear(self.vmcs_region.as_ref() as *const _ as _); + vmclear(&self.vmcs_region as *const _ as _); trace!("VMCLEAR successful!"); // Load current VMCS pointer. - vmptrld(self.vmcs_region.as_ref() as *const _ as _); + vmptrld(&self.vmcs_region as *const _ as _); trace!("VMPTRLD successful!"); self.setup_vmcs()?; diff --git a/hypervisor/src/intel/vmcs.rs b/hypervisor/src/intel/vmcs.rs index a64ebd5..9b35cdf 100644 --- a/hypervisor/src/intel/vmcs.rs +++ b/hypervisor/src/intel/vmcs.rs @@ -19,6 +19,7 @@ use { }, }, alloc::boxed::Box, + bit_field::BitField, core::fmt, x86::{ bits64::{paging::BASE_PAGE_SIZE, rflags}, @@ -44,21 +45,21 @@ pub struct Vmcs { pub reserved: [u8; BASE_PAGE_SIZE - 8], } -impl Default for Vmcs { +impl Vmcs { /// Constructs a default `Vmcs` instance with the necessary revision ID. /// /// Initializes the VMCS with the appropriate revision identifier obtained from the IA32_VMX_BASIC MSR, /// sets the abort indicator to 0, and fills the reserved area with zeros, preparing the VMCS for use. - fn default() -> Self { + pub fn new() -> Self { + let mut revision_id = rdmsr(msr::IA32_VMX_BASIC) as u32; + revision_id.set_bit(31, false); Self { - revision_id: rdmsr(msr::IA32_VMX_BASIC) as u32, + revision_id, abort_indicator: 0, reserved: [0; BASE_PAGE_SIZE - 8], } } -} -impl Vmcs { /// Initialize the guest state for the currently loaded VMCS. /// /// The method sets up various guest state fields in the VMCS as per the diff --git a/hypervisor/src/intel/vmxon.rs b/hypervisor/src/intel/vmxon.rs index 6fc469f..f80895b 100644 --- a/hypervisor/src/intel/vmxon.rs +++ b/hypervisor/src/intel/vmxon.rs @@ -4,7 +4,7 @@ //! It covers setting up the VMXON region, adjusting necessary control registers, and handling model-specific registers to meet Intel's virtualization requirements. use { - crate::error::HypervisorError, + crate::{error::HypervisorError, intel::support::rdmsr}, bitfield::BitMut, x86::{controlregs, current::paging::BASE_PAGE_SIZE, msr}, x86_64::registers::control::Cr4, @@ -27,6 +27,18 @@ pub struct Vmxon { } impl Vmxon { + /// Constructs a default `Vmxon` instance. + /// + /// Sets the revision ID to the value read from the IA32_VMX_BASIC MSR and initializes the data array to zeros, preparing the VMXON region for use. + pub fn new() -> Self { + let mut revision_id = rdmsr(msr::IA32_VMX_BASIC) as u32; + revision_id.set_bit(31, false); + Self { + revision_id, + data: [0; BASE_PAGE_SIZE - 4], + } + } + /// Enables VMX operation by setting the VMX-enable bit in CR4. /// /// Sets the CR4_VMX_ENABLE_BIT to enable VMX operations, preparing the processor to enter VMX operation mode. From dc63e809a87a0e56bd84b23ccd241dd0a9853124 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 14:40:06 +1200 Subject: [PATCH 56/87] Host PageTables use stack not heap (box) --- hypervisor/src/intel/paging.rs | 11 +++++++++++ hypervisor/src/intel/vm.rs | 4 ++-- hypervisor/src/intel/vmcs.rs | 3 +-- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/hypervisor/src/intel/paging.rs b/hypervisor/src/intel/paging.rs index 2525943..4052924 100644 --- a/hypervisor/src/intel/paging.rs +++ b/hypervisor/src/intel/paging.rs @@ -40,6 +40,17 @@ pub struct PageTables { } impl PageTables { + /// Constructs a new `PageTables` instance with default-initialized entries. + /// + /// Initializes all entries in PML4, PDPT, and PD tables to zero, preparing the Page Tables for use. + pub fn new() -> Self { + Self { + pml4: Pml4(Table { entries: [Entry(0); 512] }), + pdpt: Pdpt(Table { entries: [Entry(0); 512] }), + pd: [Pd(Table { entries: [Entry(0); 512] }); 512], + } + } + /// Builds a basic identity map for the page tables. /// /// This setup ensures that each virtual address directly maps to the same physical address, diff --git a/hypervisor/src/intel/vm.rs b/hypervisor/src/intel/vm.rs index ff2cbc9..0b31c8d 100644 --- a/hypervisor/src/intel/vm.rs +++ b/hypervisor/src/intel/vm.rs @@ -48,7 +48,7 @@ pub struct Vm { pub host_descriptor: Descriptors, /// Paging tables for the host. - pub host_paging: Box, + pub host_paging: PageTables, /// The hook manager for the VM. pub hook_manager: Box, @@ -96,7 +96,7 @@ impl Vm { let vmcs_region = Vmcs::new(); trace!("Allocating Memory for Host Paging"); - let mut host_paging = unsafe { box_zeroed::() }; + let mut host_paging = PageTables::new(); trace!("Building Identity Paging for Host"); host_paging.build_identity(); diff --git a/hypervisor/src/intel/vmcs.rs b/hypervisor/src/intel/vmcs.rs index 9b35cdf..65b4e59 100644 --- a/hypervisor/src/intel/vmcs.rs +++ b/hypervisor/src/intel/vmcs.rs @@ -18,7 +18,6 @@ use { support::{cr0, cr3, rdmsr, sidt, vmread, vmwrite}, }, }, - alloc::boxed::Box, bit_field::BitField, core::fmt, x86::{ @@ -133,7 +132,7 @@ impl Vmcs { /// # Arguments /// * `host_descriptor` - Descriptor tables for the host. /// * `host_paging` - Paging tables for the host. - pub fn setup_host_registers_state(host_descriptor: &Descriptors, host_paging: &Box) -> Result<(), HypervisorError> { + pub fn setup_host_registers_state(host_descriptor: &Descriptors, host_paging: &PageTables) -> Result<(), HypervisorError> { log::debug!("Setting up Host Registers State"); let pml4_pa = host_paging.get_pml4_pa()?; From 73cdfcf0f6ed68dc638df9148b124fb1dbd87741 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 14:43:49 +1200 Subject: [PATCH 57/87] EPT uses stack not heap (box) anymore --- hypervisor/src/intel/ept.rs | 12 ++++++++++++ hypervisor/src/intel/vm.rs | 4 ++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/hypervisor/src/intel/ept.rs b/hypervisor/src/intel/ept.rs index 38a6e04..4359660 100644 --- a/hypervisor/src/intel/ept.rs +++ b/hypervisor/src/intel/ept.rs @@ -39,6 +39,18 @@ pub struct Ept { } impl Ept { + /// Constructs a new `Ept` instance with default-initialized entries. + /// + /// Initializes all entries in PML4, PDPT, PD, and PT tables to zero, preparing the EPT for use. + pub fn new() -> Self { + Self { + pml4: Pml4(Table { entries: [Entry(0); 512] }), + pdpt: Pdpt(Table { entries: [Entry(0); 512] }), + pd: [Pd(Table { entries: [Entry(0); 512] }); 512], + pt: Pt(Table { entries: [Entry(0); 512] }), + } + } + /// Builds an identity-mapped Extended Page Table (EPT) structure with considerations for Memory Type Range Registers (MTRR). /// This function initializes the EPT with a 1:1 physical-to-virtual memory mapping, /// setting up the required PML4, PDPT, and PD entries for the initial memory range. diff --git a/hypervisor/src/intel/vm.rs b/hypervisor/src/intel/vm.rs index 0b31c8d..ce89203 100644 --- a/hypervisor/src/intel/vm.rs +++ b/hypervisor/src/intel/vm.rs @@ -57,7 +57,7 @@ pub struct Vm { pub msr_bitmap: Box, /// The primary EPT (Extended Page Tables) for the VM. - pub primary_ept: Box, + pub primary_ept: Ept, /// The primary EPTP (Extended Page Tables Pointer) for the VM. pub primary_eptp: u64, @@ -105,7 +105,7 @@ impl Vm { let mut msr_bitmap = unsafe { box_zeroed::() }; trace!("Allocating Primary EPT"); - let mut primary_ept = unsafe { box_zeroed::() }; + let mut primary_ept = Ept::new(); trace!("Identity Mapping Primary EPT"); primary_ept.build_identity()?; From 83aa1146b840c50626e960d9aa7ab7903a9880d1 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 14:46:09 +1200 Subject: [PATCH 58/87] MsrBitMap uses stack no more heap (box) --- hypervisor/src/intel/bitmap.rs | 14 ++++++++++++++ hypervisor/src/intel/vm.rs | 6 +++--- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/hypervisor/src/intel/bitmap.rs b/hypervisor/src/intel/bitmap.rs index 721c548..57040b7 100644 --- a/hypervisor/src/intel/bitmap.rs +++ b/hypervisor/src/intel/bitmap.rs @@ -47,6 +47,20 @@ pub struct MsrBitmap { } impl MsrBitmap { + /// Creates a new MSR bitmap with all bits cleared. + /// + /// # Returns + /// + /// * A `MsrBitmap` instance with all bits initialized to zero. + pub fn new() -> Self { + Self { + read_low_msrs: [0; 0x400], + read_high_msrs: [0; 0x400], + write_low_msrs: [0; 0x400], + write_high_msrs: [0; 0x400], + } + } + /// Modifies the interception for a specific MSR based on the specified operation and access type. /// /// # Arguments diff --git a/hypervisor/src/intel/vm.rs b/hypervisor/src/intel/vm.rs index ce89203..8401a04 100644 --- a/hypervisor/src/intel/vm.rs +++ b/hypervisor/src/intel/vm.rs @@ -54,7 +54,7 @@ pub struct Vm { pub hook_manager: Box, /// A bitmap for handling MSRs. - pub msr_bitmap: Box, + pub msr_bitmap: MsrBitmap, /// The primary EPT (Extended Page Tables) for the VM. pub primary_ept: Ept, @@ -102,7 +102,7 @@ impl Vm { host_paging.build_identity(); trace!("Allocating MSR Bitmap"); - let mut msr_bitmap = unsafe { box_zeroed::() }; + let mut msr_bitmap = MsrBitmap::new(); trace!("Allocating Primary EPT"); let mut primary_ept = Ept::new(); @@ -225,7 +225,7 @@ impl Vm { trace!("Setting up VMCS"); let primary_eptp = self.primary_eptp; - let msr_bitmap = self.msr_bitmap.as_ref() as *const _ as u64; + let msr_bitmap = &self.msr_bitmap as *const _ as u64; Vmcs::setup_guest_registers_state(&self.guest_descriptor, &self.guest_registers); Vmcs::setup_host_registers_state(&self.host_descriptor, &self.host_paging)?; From ed68721ceaec902811b10f206f573601f46528d5 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 14:49:50 +1200 Subject: [PATCH 59/87] Use stack --- hypervisor/src/intel/descriptor.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/hypervisor/src/intel/descriptor.rs b/hypervisor/src/intel/descriptor.rs index 38de3d3..4dadb43 100644 --- a/hypervisor/src/intel/descriptor.rs +++ b/hypervisor/src/intel/descriptor.rs @@ -7,7 +7,7 @@ use { crate::intel::support::sgdt, - alloc::{boxed::Box, vec::Vec}, + alloc::vec::Vec, x86::{ dtables::DescriptorTablePointer, segmentation::{ @@ -157,7 +157,7 @@ impl Descriptors { /// /// A slice of the GDT entries represented as `u64` values. pub fn from_pointer(pointer: &DescriptorTablePointer) -> &[u64] { - unsafe { core::slice::from_raw_parts(pointer.base.cast::(), (pointer.limit + 1) as usize / core::mem::size_of::()) } + unsafe { core::slice::from_raw_parts(pointer.base.cast::(), (pointer.limit + 1) as usize / size_of::()) } } } @@ -180,7 +180,7 @@ pub struct TaskStateSegment { /// The actual TSS data. #[allow(dead_code)] #[derivative(Debug = "ignore")] - segment: Box, + segment: TaskStateSegmentRaw, } /// Initializes a default TSS. @@ -192,10 +192,11 @@ pub struct TaskStateSegment { /// A default `TaskStateSegment` instance. impl Default for TaskStateSegment { fn default() -> Self { - let segment = Box::new(TaskStateSegmentRaw([0; 104])); + let segment = TaskStateSegmentRaw([0; 104]); + let base = &segment as *const TaskStateSegmentRaw as u64; Self { - base: segment.as_ref() as *const _ as u64, - limit: core::mem::size_of_val(segment.as_ref()) as u64 - 1, + base, + limit: size_of_val(&segment) as u64 - 1, ar: 0x8b00, segment, } From ab9756da99df1bf2260d1dc2debca5fec06f9f8a Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 14:50:56 +1200 Subject: [PATCH 60/87] Stack size needs to be bigger --- hypervisor/src/global_const.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hypervisor/src/global_const.rs b/hypervisor/src/global_const.rs index 841ad42..89f18b8 100644 --- a/hypervisor/src/global_const.rs +++ b/hypervisor/src/global_const.rs @@ -4,7 +4,7 @@ use uefi::table::boot::MemoryType; pub const HEAP_SIZE: usize = 0x180000; /// The size of the stack in bytes. -pub const STACK_NUMBER_OF_PAGES: usize = 0x20; +pub const STACK_NUMBER_OF_PAGES: usize = 0x80; /// The memory type for the stack allocated pages pub const STACK_MEMORY_TYPE: MemoryType = MemoryType::RUNTIME_SERVICES_DATA; From 4f81ed0f7b4623e5f46ee1de26c3c646584700d5 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 15:04:35 +1200 Subject: [PATCH 61/87] Added mutex --- hypervisor/src/allocator.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/hypervisor/src/allocator.rs b/hypervisor/src/allocator.rs index cb05056..8808317 100644 --- a/hypervisor/src/allocator.rs +++ b/hypervisor/src/allocator.rs @@ -29,6 +29,9 @@ pub static mut HEAP: ListHeap = ListHeap::new(); #[repr(align(0x10))] pub struct ListHeap(core::mem::MaybeUninit<[u8; SIZE]>); +/// Static mutex to ensure thread safety during allocation and deallocation. +static ALLOCATOR_MUTEX: Mutex<()> = Mutex::new(()); + impl ListHeap { /// Creates a new, uninitialized ListHeap. /// @@ -192,6 +195,8 @@ unsafe impl GlobalAlloc for ListHeap { /// /// A pointer to the allocated memory. unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + let _guard = ALLOCATOR_MUTEX.lock(); // Ensure thread safety + let mut link = self.first_link(); // The required alignment and size for this type @@ -247,6 +252,8 @@ unsafe impl GlobalAlloc for ListHeap { if ptr.is_null() { return; } + let _guard = ALLOCATOR_MUTEX.lock(); // Ensure thread safety + let link = &mut *(ptr.sub(size_of::()) as *mut Link); // Sanity check, don't deallocate the last link @@ -277,6 +284,8 @@ unsafe impl GlobalAlloc for ListHeap { /// /// A pointer to the reallocated memory. unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + let _guard = ALLOCATOR_MUTEX.lock(); // Ensure thread safety + let link = &mut *(ptr.sub(size_of::()) as *mut Link); // Just resize the buffer @@ -360,6 +369,8 @@ pub unsafe fn initialize_system_table_and_heap(system_table: &SystemTable) /// /// This function will panic if memory allocation fails. pub fn allocate_host_stack() -> *mut u8 { + let _guard = ALLOCATOR_MUTEX.lock(); // Ensure thread safety + // Get the system table and boot services let system_table = SYSTEM_TABLE.load(Ordering::Acquire); let boot_services = unsafe { &(*system_table).boot_services() }; From 575a1f1c04ac6f910fcdb2b2e1350085529e4a0c Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 15:13:15 +1200 Subject: [PATCH 62/87] KernelHook, Memory & Hook Manger on stack --- hypervisor/src/intel/hooks/hook_manager.rs | 15 +++++++-------- hypervisor/src/intel/vm.rs | 2 +- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/hypervisor/src/intel/hooks/hook_manager.rs b/hypervisor/src/intel/hooks/hook_manager.rs index 7a65695..185e670 100644 --- a/hypervisor/src/intel/hooks/hook_manager.rs +++ b/hypervisor/src/intel/hooks/hook_manager.rs @@ -15,7 +15,6 @@ use { }, windows::kernel::KernelHook, }, - alloc::boxed::Box, core::intrinsics::copy_nonoverlapping, log::*, x86::bits64::paging::{PAddr, BASE_PAGE_SIZE}, @@ -38,10 +37,10 @@ pub enum EptHookType { #[derive(Debug, Clone)] pub struct HookManager { /// The memory manager instance for the pre-allocated shadow pages and page tables. - pub memory_manager: Box, + pub memory_manager: MemoryManager, /// The hook instance for the Windows kernel, storing the VA and PA of ntoskrnl.exe. This is retrieved from the first LSTAR_MSR write operation, intercepted by the hypervisor. - pub kernel_hook: Option>, + pub kernel_hook: Option, /// A flag indicating whether the CPUID cache information has been called. This will be used to perform hooks at boot time when SSDT has been initialized. /// KiSetCacheInformation -> KiSetCacheInformationIntel -> KiSetStandardizedCacheInformation -> __cpuid(4, 0) @@ -64,19 +63,19 @@ impl HookManager { /// /// # Returns /// A result containing a boxed `HookManager` instance or an error of type `HypervisorError`. - pub fn new() -> Result, HypervisorError> { + pub fn new() -> Result { trace!("Initializing hook manager"); - let memory_manager = Box::new(MemoryManager::new()?); - let kernel_hook = Some(Box::new(KernelHook::new()?)); + let memory_manager = MemoryManager::new()?; + let kernel_hook = Some(KernelHook::new()?); - Ok(Box::new(Self { + Ok(Self { memory_manager, has_cpuid_cache_info_been_called: false, kernel_hook, old_rflags: None, mtf_counter: None, - })) + }) } /// Hides the hypervisor memory from the guest by installing EPT hooks on all allocated memory regions. diff --git a/hypervisor/src/intel/vm.rs b/hypervisor/src/intel/vm.rs index 8401a04..fdcc7ce 100644 --- a/hypervisor/src/intel/vm.rs +++ b/hypervisor/src/intel/vm.rs @@ -51,7 +51,7 @@ pub struct Vm { pub host_paging: PageTables, /// The hook manager for the VM. - pub hook_manager: Box, + pub hook_manager: HookManager, /// A bitmap for handling MSRs. pub msr_bitmap: MsrBitmap, From 79a37f6566b884c94ccaa2191d84b4bef65a68c6 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 15:17:43 +1200 Subject: [PATCH 63/87] Dummy Page no longer boxed --- hypervisor/src/intel/hooks/hook_manager.rs | 2 +- hypervisor/src/intel/page.rs | 18 ++++++++++++++++++ hypervisor/src/intel/vm.rs | 11 ++++------- 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/hypervisor/src/intel/hooks/hook_manager.rs b/hypervisor/src/intel/hooks/hook_manager.rs index 185e670..cf06d5d 100644 --- a/hypervisor/src/intel/hooks/hook_manager.rs +++ b/hypervisor/src/intel/hooks/hook_manager.rs @@ -126,7 +126,7 @@ impl HookManager { let guest_large_page_pa = guest_page_pa.align_down_to_large_page(); trace!("Guest large page PA: {:#x}", guest_large_page_pa.as_u64()); - let dummy_page_pa = vm.dummy_page_pa; + let dummy_page_pa = vm.dummy_page.as_ptr() as u64; trace!("Dummy page PA: {:#x}", dummy_page_pa); trace!("Mapping large page"); diff --git a/hypervisor/src/intel/page.rs b/hypervisor/src/intel/page.rs index a5eedd1..0f9c4b9 100644 --- a/hypervisor/src/intel/page.rs +++ b/hypervisor/src/intel/page.rs @@ -49,4 +49,22 @@ impl Page { pub fn size() -> usize { BASE_PAGE_SIZE } + + /// Returns a pointer to the page buffer. + /// + /// # Returns + /// + /// * `*const u8` - A pointer to the page buffer. + pub fn as_ptr(&self) -> *const u8 { + self.0.as_ptr() + } + + /// Returns a mutable pointer to the page buffer. + /// + /// # Returns + /// + /// * `*mut u8` - A mutable pointer to the page buffer. + pub fn as_mut_ptr(&mut self) -> *mut u8 { + self.0.as_mut_ptr() + } } diff --git a/hypervisor/src/intel/vm.rs b/hypervisor/src/intel/vm.rs index fdcc7ce..c4e45b2 100644 --- a/hypervisor/src/intel/vm.rs +++ b/hypervisor/src/intel/vm.rs @@ -7,7 +7,6 @@ use { crate::{ - allocator::box_zeroed, error::HypervisorError, intel::{ bitmap::{MsrAccessType, MsrBitmap, MsrOperation}, @@ -24,7 +23,6 @@ use { vmxon::Vmxon, }, }, - alloc::boxed::Box, log::*, x86::{bits64::rflags::RFlags, msr, vmx::vmcs}, }; @@ -68,8 +66,8 @@ pub struct Vm { /// Flag indicating if the VM has been launched. pub has_launched: bool, - /// Physical address of a dummy page. - pub dummy_page_pa: u64, + /// The dummy page to use for hooking. + pub dummy_page: Page, } impl Vm { @@ -120,8 +118,7 @@ impl Vm { let hook_manager = HookManager::new()?; trace!("Creating dummy page filled with 0xffs"); - let dummy_page = unsafe { box_zeroed::() }; - let dummy_page_pa = Box::into_raw(dummy_page) as u64; + let dummy_page = Page::new(); trace!("VM created"); @@ -137,7 +134,7 @@ impl Vm { primary_eptp, guest_registers: guest_registers.clone(), has_launched: false, - dummy_page_pa, + dummy_page, }) } From 45e34153a149c0cd97350e3476e76f9cfee310f4 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 15:25:59 +1200 Subject: [PATCH 64/87] Fill dummy page --- hypervisor/src/intel/page.rs | 11 +++++++++++ hypervisor/src/intel/vm.rs | 3 ++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/hypervisor/src/intel/page.rs b/hypervisor/src/intel/page.rs index 0f9c4b9..90153e3 100644 --- a/hypervisor/src/intel/page.rs +++ b/hypervisor/src/intel/page.rs @@ -67,4 +67,15 @@ impl Page { pub fn as_mut_ptr(&mut self) -> *mut u8 { self.0.as_mut_ptr() } + + /// Fills the page with a specified byte value. + /// + /// # Arguments + /// + /// * `value` - The byte value to fill the page with. + pub fn fill(&mut self, value: u8) { + for byte in self.0.iter_mut() { + *byte = value; + } + } } diff --git a/hypervisor/src/intel/vm.rs b/hypervisor/src/intel/vm.rs index c4e45b2..f79f4ec 100644 --- a/hypervisor/src/intel/vm.rs +++ b/hypervisor/src/intel/vm.rs @@ -118,7 +118,8 @@ impl Vm { let hook_manager = HookManager::new()?; trace!("Creating dummy page filled with 0xffs"); - let dummy_page = Page::new(); + let mut dummy_page = Page::new(); + dummy_page.fill(0xff); trace!("VM created"); From 2f02dd56696f973392227e36638f07cc4319c622 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 15:35:39 +1200 Subject: [PATCH 65/87] Dummy page is already filled --- hypervisor/src/intel/hooks/hook_manager.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/hypervisor/src/intel/hooks/hook_manager.rs b/hypervisor/src/intel/hooks/hook_manager.rs index cf06d5d..8767758 100644 --- a/hypervisor/src/intel/hooks/hook_manager.rs +++ b/hypervisor/src/intel/hooks/hook_manager.rs @@ -1,6 +1,6 @@ use { crate::{ - allocator::ALLOCATED_MEMORY, + allocator::{print_tracked_allocations, ALLOCATED_MEMORY}, error::HypervisorError, intel::{ addresses::PhysicalAddress, @@ -93,6 +93,9 @@ impl HookManager { /// /// Returns `Ok(())` if the hooks were successfully installed, `Err(HypervisorError)` otherwise. pub fn hide_hypervisor_memory(vm: &mut Vm, page_permissions: AccessType) -> Result<(), HypervisorError> { + // Print the tracked memory allocations for debugging purposes. + print_tracked_allocations(); + // Lock the allocated memory list to ensure thread safety. let allocated_memory = ALLOCATED_MEMORY.lock(); @@ -132,10 +135,7 @@ impl HookManager { trace!("Mapping large page"); // Map the large page to the pre-allocated page table, if it hasn't been mapped already. vm.hook_manager.memory_manager.map_large_page_to_pt(guest_large_page_pa.as_u64())?; - - trace!("Filling shadow page with 0xff"); - Self::unsafe_fill_shadow_page(PAddr::from(dummy_page_pa), 0xff); - + let pre_alloc_pt = vm .hook_manager .memory_manager From 8b69ae677d1e7f989a26693f7003a317f0f8e100 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 17:04:32 +1200 Subject: [PATCH 66/87] Removed heapless. GlobalAlloc Heap is used now. --- hypervisor/Cargo.toml | 1 - hypervisor/src/global_const.rs | 8 +- hypervisor/src/intel/hooks/hook_manager.rs | 4 +- hypervisor/src/intel/hooks/memory_manager.rs | 131 ++++++------------- uefi/Cargo.toml | 1 - 5 files changed, 45 insertions(+), 100 deletions(-) diff --git a/hypervisor/Cargo.toml b/hypervisor/Cargo.toml index 9fed21e..6d555c9 100644 --- a/hypervisor/Cargo.toml +++ b/hypervisor/Cargo.toml @@ -27,5 +27,4 @@ bstr = { version = "1.9.0", default-features = false } # https://crates.io/crate derivative = { version = "2.2.0", features = ["use_core"]} # https://crates.io/crates/derivative spin = "0.9" # https://crates.io/crates/spin lde = "0.3.0" # https://crates.io/crates/lde -heapless = "0.8.0" # https://crates.io/crates/heapless shared = { path = "../shared" } \ No newline at end of file diff --git a/hypervisor/src/global_const.rs b/hypervisor/src/global_const.rs index 89f18b8..500a6e3 100644 --- a/hypervisor/src/global_const.rs +++ b/hypervisor/src/global_const.rs @@ -4,13 +4,7 @@ use uefi::table::boot::MemoryType; pub const HEAP_SIZE: usize = 0x180000; /// The size of the stack in bytes. -pub const STACK_NUMBER_OF_PAGES: usize = 0x80; +pub const STACK_NUMBER_OF_PAGES: usize = 0x4000; /// The memory type for the stack allocated pages pub const STACK_MEMORY_TYPE: MemoryType = MemoryType::RUNTIME_SERVICES_DATA; - -/// The maximum number of hooks supported by the hypervisor. Change this value as needed. -pub const MAX_HOOK_ENTRIES: usize = 64; - -/// The maximum number of hooks per page supported by the hypervisor. Change this value as needed. -pub const MAX_HOOKS_PER_PAGE: usize = 16; diff --git a/hypervisor/src/intel/hooks/hook_manager.rs b/hypervisor/src/intel/hooks/hook_manager.rs index 8767758..ee70be4 100644 --- a/hypervisor/src/intel/hooks/hook_manager.rs +++ b/hypervisor/src/intel/hooks/hook_manager.rs @@ -66,7 +66,7 @@ impl HookManager { pub fn new() -> Result { trace!("Initializing hook manager"); - let memory_manager = MemoryManager::new()?; + let memory_manager = MemoryManager::new(); let kernel_hook = Some(KernelHook::new()?); Ok(Self { @@ -135,7 +135,7 @@ impl HookManager { trace!("Mapping large page"); // Map the large page to the pre-allocated page table, if it hasn't been mapped already. vm.hook_manager.memory_manager.map_large_page_to_pt(guest_large_page_pa.as_u64())?; - + let pre_alloc_pt = vm .hook_manager .memory_manager diff --git a/hypervisor/src/intel/hooks/memory_manager.rs b/hypervisor/src/intel/hooks/memory_manager.rs index 6404f4d..f6f553d 100644 --- a/hypervisor/src/intel/hooks/memory_manager.rs +++ b/hypervisor/src/intel/hooks/memory_manager.rs @@ -1,25 +1,27 @@ //! Module for managing memory allocations related to Extended Page Tables (EPT) -//! for a hypervisor. Provides pre-allocated memory resources for EPT hooks and -//! management functionalities to maintain and access these resources effectively. +//! for a hypervisor. Provides memory resources for EPT hooks and management functionalities +//! to maintain and access these resources effectively. use { crate::{ allocator::box_zeroed, error::HypervisorError, - global_const::{MAX_HOOKS_PER_PAGE, MAX_HOOK_ENTRIES}, intel::{ept::Pt, hooks::hook_manager::EptHookType, page::Page}, }, - alloc::boxed::Box, - heapless::{LinearMap, Vec}, - log::{error, trace}, + alloc::{boxed::Box, collections::BTreeMap, vec::Vec}, + log::trace, }; /// Represents the hook information for a specific guest virtual address and EPT hook type. #[derive(Debug, Clone)] pub struct HookInfo { + /// Guest virtual address of the function to be hooked. pub guest_function_va: u64, + /// Guest physical address of the function to be hooked. pub guest_function_pa: u64, + /// Type of EPT hook to be applied. pub ept_hook_type: EptHookType, + /// Hash of the function to be hooked. pub function_hash: u32, } @@ -28,72 +30,32 @@ pub struct HookInfo { pub struct HookMapping { /// The shadow page. pub shadow_page: Box, - /// The list of hooks associated with this page. - pub hooks: Vec, + pub hooks: Vec, } -/// Represents a memory management system that pre-allocates and manages page tables -/// and shadow pages for a hypervisor, using fixed-size arrays to avoid runtime allocation. +/// Represents a memory management system that manages page tables and shadow pages +/// for a hypervisor, allocating memory as needed at runtime. #[derive(Debug, Clone)] pub struct MemoryManager { /// Active mappings of guest physical addresses to their respective hook mappings. - active_mappings: LinearMap, - + active_mappings: BTreeMap, /// Mappings of large guest physical addresses to their respective page tables. - large_pt_mappings: LinearMap, MAX_HOOK_ENTRIES>, - - /// Free slots for hook mappings. - free_slots_hm: Vec, - - /// Free slots for page tables. - free_slots_pt: Vec, + large_pt_mappings: BTreeMap>, } impl MemoryManager { - /// Constructs a new `MemoryManager` instance, pre-allocating all necessary resources. + /// Constructs a new `MemoryManager` instance. /// /// # Returns - /// A new instance of `MemoryManager` or an error if initial allocation fails. - pub fn new() -> Result { + /// A new instance of `MemoryManager`. + pub fn new() -> Self { trace!("Initializing memory manager"); - let mut active_mappings = LinearMap::::new(); - let mut large_pt_mappings = LinearMap::, MAX_HOOK_ENTRIES>::new(); - let mut free_slots_hm = Vec::::new(); - let mut free_slots_pt = Vec::::new(); - - trace!("Pre-allocating shadow pages and page tables"); - - // Pre-allocate shadow pages for hooks and page tables for large pages. - for i in 0..MAX_HOOK_ENTRIES { - let sp = unsafe { box_zeroed::() }; - - active_mappings - .insert( - i as u64, - HookMapping { - shadow_page: sp, - hooks: Vec::::new(), - }, - ) - .map_err(|_| HypervisorError::ActiveMappingError)?; - - let pt = unsafe { box_zeroed::() }; - large_pt_mappings.insert(i as u64, pt).map_err(|_| HypervisorError::LargePtMappingError)?; - - free_slots_hm.push(i).map_err(|_| HypervisorError::ActiveMappingError)?; - free_slots_pt.push(i).map_err(|_| HypervisorError::LargePtMappingError)?; + Self { + active_mappings: BTreeMap::new(), + large_pt_mappings: BTreeMap::new(), } - - trace!("Memory manager initialized"); - - Ok(Self { - active_mappings, - large_pt_mappings, - free_slots_hm, - free_slots_pt, - }) } /// Checks if a guest page is already processed (split and copied). @@ -107,8 +69,7 @@ impl MemoryManager { self.active_mappings.contains_key(&guest_page_pa) } - /// Maps a free page table and shadow page to a guest physical address, removing them from the free pool. - /// Maps the Large Page to the Page Table if not already mapped. + /// Maps a shadow page to a guest physical address and adds hook information, allocating memory as needed. /// /// # Arguments /// * `guest_page_pa` - The guest physical address to map. @@ -128,6 +89,7 @@ impl MemoryManager { function_hash: u32, ) -> Result<(), HypervisorError> { trace!("Mapping guest page and shadow page for PA: {:#x}", guest_page_pa); + let hook_info = HookInfo { guest_function_va, guest_function_pa, @@ -137,48 +99,39 @@ impl MemoryManager { if let Some(mapping) = self.active_mappings.get_mut(&guest_page_pa) { trace!("Mapping already exists, adding hook info"); - mapping.hooks.push(hook_info).map_err(|_| HypervisorError::TooManyHooks)?; - } else { - trace!("Mapping does not exist, creating new mapping"); - if let Some(free_slot) = self.free_slots_hm.pop() { - trace!("Found free slot at index: {}", free_slot); - let key = free_slot as u64; - let mut mapping = self.active_mappings.remove(&key).unwrap(); - mapping.hooks.push(hook_info).map_err(|_| HypervisorError::TooManyHooks)?; - self.active_mappings - .insert(guest_page_pa, mapping) - .map_err(|_| HypervisorError::ActiveMappingError)?; - trace!("Guest page mapped to shadow page successfully"); + if mapping.hooks.iter().any(|hook| hook.guest_function_pa == guest_function_pa) { + trace!("Hook already exists for function PA: {:#x}", guest_function_pa); } else { - error!("No free pages available for mapping"); - return Err(HypervisorError::OutOfMemory); + mapping.hooks.push(hook_info); } + } else { + trace!("Mapping does not exist, creating new mapping"); + let shadow_page = unsafe { box_zeroed::() }; + let mut hooks = Vec::new(); + hooks.push(hook_info); + + self.active_mappings.insert(guest_page_pa, HookMapping { shadow_page, hooks }); + trace!("Guest page mapped to shadow page successfully"); } Ok(()) } - /// Maps a free page table to a large guest physical address, removing it from the free pool. + /// Maps a free page table to a large guest physical address, allocating memory as needed. /// /// # Arguments - /// /// * `guest_large_page_pa` - The large guest physical address to map. + /// + /// # Returns + /// `Ok(())` if successful, or an error if no free page tables are available. pub fn map_large_page_to_pt(&mut self, guest_large_page_pa: u64) -> Result<(), HypervisorError> { - // Ensure the large page has a page table (Pt) if !self.large_pt_mappings.contains_key(&guest_large_page_pa) { trace!("Large page not mapped to page table, mapping now"); - if let Some(free_slot) = self.free_slots_pt.pop() { - trace!("Found free slot for page table at index: {}", free_slot); - let pt_key = free_slot as u64; - let pt = self.large_pt_mappings.remove(&pt_key).unwrap(); - self.large_pt_mappings - .insert(guest_large_page_pa, pt) - .map_err(|_| HypervisorError::ActiveMappingError)?; - trace!("Large page mapped to page table successfully"); - } else { - error!("No free page tables available for mapping"); - return Err(HypervisorError::OutOfMemory); - } + let pt = unsafe { box_zeroed::() }; + self.large_pt_mappings.insert(guest_large_page_pa, pt); + trace!("Large page mapped to page table successfully"); + } else { + trace!("Large page PA: {:#x} is already mapped to a page table", guest_large_page_pa); } Ok(()) @@ -215,7 +168,7 @@ impl MemoryManager { /// /// # Returns /// An `Option` containing a reference to the `HookInfo` if found. - pub fn get_hook_info(&self, guest_page_pa: u64) -> Option<&Vec> { + pub fn get_hook_info(&self, guest_page_pa: u64) -> Option<&Vec> { self.active_mappings.get(&guest_page_pa).map(|mapping| &mapping.hooks) } diff --git a/uefi/Cargo.toml b/uefi/Cargo.toml index d455d71..bc9107b 100644 --- a/uefi/Cargo.toml +++ b/uefi/Cargo.toml @@ -16,5 +16,4 @@ log = { version = "0.4.20", default-features = false } # https://crates.io/crate once_cell = "1.19.0" # https://crates.io/crates/once_cell spin = "0.9" # https://crates.io/crates/spin com_logger = "0.1.1" # https://crates.io/crates/com_logger -heapless = "0.8.0" # https://crates.io/crates/heapless hypervisor = { path = "../hypervisor" } \ No newline at end of file From 3e853e4eec62d27ba92d049cd60fee3201e3c993 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 18:44:32 +1200 Subject: [PATCH 67/87] Update global_const.rs --- hypervisor/src/global_const.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hypervisor/src/global_const.rs b/hypervisor/src/global_const.rs index 500a6e3..72c674f 100644 --- a/hypervisor/src/global_const.rs +++ b/hypervisor/src/global_const.rs @@ -4,7 +4,7 @@ use uefi::table::boot::MemoryType; pub const HEAP_SIZE: usize = 0x180000; /// The size of the stack in bytes. -pub const STACK_NUMBER_OF_PAGES: usize = 0x4000; +pub const STACK_NUMBER_OF_PAGES: usize = 0x1000; /// The memory type for the stack allocated pages pub const STACK_MEMORY_TYPE: MemoryType = MemoryType::RUNTIME_SERVICES_DATA; From 795f889a046aea8a50f4293fd0f61c7296420db7 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 18:45:04 +1200 Subject: [PATCH 68/87] Create only 1 dummy page for all cores --- hypervisor/src/intel/hooks/hook_manager.rs | 12 +++- hypervisor/src/intel/vm.rs | 9 --- uefi/src/relocation.rs | 47 ------------- uefi/src/setup.rs | 82 ++++++++++++++++++++++ uefi/src/virtualize.rs | 3 +- 5 files changed, 94 insertions(+), 59 deletions(-) delete mode 100644 uefi/src/relocation.rs create mode 100644 uefi/src/setup.rs diff --git a/hypervisor/src/intel/hooks/hook_manager.rs b/hypervisor/src/intel/hooks/hook_manager.rs index ee70be4..c924929 100644 --- a/hypervisor/src/intel/hooks/hook_manager.rs +++ b/hypervisor/src/intel/hooks/hook_manager.rs @@ -15,11 +15,18 @@ use { }, windows::kernel::KernelHook, }, - core::intrinsics::copy_nonoverlapping, + core::{ + intrinsics::copy_nonoverlapping, + sync::atomic::{AtomicU64, Ordering}, + }, log::*, x86::bits64::paging::{PAddr, BASE_PAGE_SIZE}, }; +/// Global variable to store the address of the created dummy page. +/// This variable can be accessed by multiple cores/threads/processors. +pub static DUMMY_PAGE_ADDRESS: AtomicU64 = AtomicU64::new(0); + /// Enum representing different types of hooks that can be applied. #[derive(Debug, Clone, Copy)] pub enum EptHookType { @@ -129,7 +136,8 @@ impl HookManager { let guest_large_page_pa = guest_page_pa.align_down_to_large_page(); trace!("Guest large page PA: {:#x}", guest_large_page_pa.as_u64()); - let dummy_page_pa = vm.dummy_page.as_ptr() as u64; + let dummy_page_pa = DUMMY_PAGE_ADDRESS.load(Ordering::SeqCst); + trace!("Dummy page PA: {:#x}", dummy_page_pa); trace!("Mapping large page"); diff --git a/hypervisor/src/intel/vm.rs b/hypervisor/src/intel/vm.rs index f79f4ec..d27b6bd 100644 --- a/hypervisor/src/intel/vm.rs +++ b/hypervisor/src/intel/vm.rs @@ -14,7 +14,6 @@ use { descriptor::Descriptors, ept::Ept, hooks::hook_manager::HookManager, - page::Page, paging::PageTables, support::{vmclear, vmptrld, vmread, vmxon}, vmcs::Vmcs, @@ -65,9 +64,6 @@ pub struct Vm { /// Flag indicating if the VM has been launched. pub has_launched: bool, - - /// The dummy page to use for hooking. - pub dummy_page: Page, } impl Vm { @@ -117,10 +113,6 @@ impl Vm { trace!("Creating EPT hook manager"); let hook_manager = HookManager::new()?; - trace!("Creating dummy page filled with 0xffs"); - let mut dummy_page = Page::new(); - dummy_page.fill(0xff); - trace!("VM created"); Ok(Self { @@ -135,7 +127,6 @@ impl Vm { primary_eptp, guest_registers: guest_registers.clone(), has_launched: false, - dummy_page, }) } diff --git a/uefi/src/relocation.rs b/uefi/src/relocation.rs deleted file mode 100644 index 6f2e489..0000000 --- a/uefi/src/relocation.rs +++ /dev/null @@ -1,47 +0,0 @@ -//! Provides functionality to nullify the relocation table of a loaded UEFI image, -//! preventing UEFI from relocating hypervisor code during the transition from -//! physical to virtual addressing. Useful for ensuring stable memory layout in hypervisor development. -//! Credits Satoshi Tanda: https://github.com/tandasat/Hello-VT-rp/blob/main/hypervisor/src/switch_stack.rs - -use { - hypervisor::allocator::record_allocation, - log::debug, - uefi::{prelude::BootServices, proto::loaded_image::LoadedImage}, -}; - -/// Nullifies the relocation table of the loaded UEFI image to prevent relocation. -/// -/// This function manipulates the loaded image's PE header to zero out the relocation table, -/// preventing UEFI from applying patches to the hypervisor code during the transition -/// from physical-mode to virtual-mode addressing by the operating system. -/// -/// # Arguments -/// -/// * `system_table` - Reference to the UEFI System Table. -/// -/// # Returns -/// -/// The result of the operation. Returns `uefi::Result::SUCCESS` on success, or an error -pub fn zap_relocations(boot_service: &BootServices) -> uefi::Result<()> { - // Obtain the current loaded image protocol. - let loaded_image = boot_service.open_protocol_exclusive::(boot_service.image_handle())?; - - // Extract the image base address and size. - let (image_base, image_size) = loaded_image.info(); - let image_base = image_base as usize; - let image_range = image_base..image_base + image_size as usize; - record_allocation(image_base, image_size as usize); - - // Log the image base address range for debugging purposes. - debug!("Image base: {:#x?}", image_range); - - // Unsafe block to directly modify the PE header of the loaded image. - // This operation nullifies the relocation table to prevent UEFI from - // applying relocations to the hypervisor code. - unsafe { - *((image_base + 0x128) as *mut u32) = 0; // Zero out the relocation table offset. - *((image_base + 0x12c) as *mut u32) = 0; // Zero out the relocation table size. - } - - Ok(()) -} diff --git a/uefi/src/setup.rs b/uefi/src/setup.rs new file mode 100644 index 0000000..598086e --- /dev/null +++ b/uefi/src/setup.rs @@ -0,0 +1,82 @@ +//! Provides functionality to nullify the relocation table of a loaded UEFI image, +//! preventing UEFI from relocating hypervisor code during the transition from +//! physical to virtual addressing. This is useful for ensuring a stable memory layout in hypervisor development. + +use { + alloc::boxed::Box, + core::sync::atomic::Ordering, + hypervisor::{ + allocator::{box_zeroed, record_allocation}, + intel::{hooks::hook_manager::DUMMY_PAGE_ADDRESS, page::Page}, + }, + log::debug, + uefi::{prelude::BootServices, proto::loaded_image::LoadedImage}, +}; + +/// Sets up the hypervisor by recording the image base, creating a dummy page, +/// and nullifying the relocation table. +/// +/// # Arguments +/// +/// * `boot_services` - A reference to the UEFI boot services table. +/// +/// # Returns +/// +/// Returns a `uefi::Result` indicating success or failure. +pub fn setup(boot_services: &BootServices) -> uefi::Result<()> { + let loaded_image = boot_services.open_protocol_exclusive::(boot_services.image_handle())?; + record_image_base(&loaded_image); + create_dummy_page(0xFF); + let image_base = loaded_image.info().0 as u64; + zap_relocations(image_base); + Ok(()) +} + +/// Records the base address and size of the loaded UEFI image. +/// +/// This function retrieves the base address and size of the loaded UEFI image +/// and records this information for memory tracking purposes. +/// +/// # Arguments +/// +/// * `loaded_image` - A reference to the loaded UEFI image. +pub fn record_image_base(loaded_image: &LoadedImage) { + let (image_base, image_size) = loaded_image.info(); + let image_range = image_base as usize..(image_base as usize + image_size as usize); + debug!("Loaded image base: {:#x?}", image_range); + record_allocation(image_base as usize, image_size as usize); +} + +/// Creates a dummy page filled with a specific byte value. +/// +/// This function allocates a page of memory and fills it with a specified byte value. +/// The address of the dummy page is stored in a global variable for access by multiple cores/threads/processors. +/// +/// # Arguments +/// +/// * `fill_byte` - The byte value to fill the page with. +pub fn create_dummy_page(fill_byte: u8) { + let mut dummy_page = unsafe { box_zeroed::() }; + dummy_page.0.iter_mut().for_each(|byte| *byte = fill_byte); + let dummy_page_pa = Box::into_raw(dummy_page) as u64; + DUMMY_PAGE_ADDRESS.store(dummy_page_pa, Ordering::SeqCst); +} + +/// Nullifies the relocation table of the loaded UEFI image to prevent relocation. +/// +/// This function modifies the loaded image's PE header to zero out the relocation table, +/// preventing UEFI from applying patches to the hypervisor code during the transition +/// from physical to virtual addressing by the operating system. +/// +/// # Arguments +/// +/// * `image_base` - The base address of the loaded UEFI image. +pub fn zap_relocations(image_base: u64) { + // Unsafe block to directly modify the PE header of the loaded image. + // This operation nullifies the relocation table to prevent UEFI from + // applying relocations to the hypervisor code. + unsafe { + *((image_base + 0x128) as *mut u32) = 0; // Zero out the relocation table offset. + *((image_base + 0x12c) as *mut u32) = 0; // Zero out the relocation table size. + } +} diff --git a/uefi/src/virtualize.rs b/uefi/src/virtualize.rs index 9f83c85..75192d0 100644 --- a/uefi/src/virtualize.rs +++ b/uefi/src/virtualize.rs @@ -18,7 +18,8 @@ pub fn virtualize_system(guest_registers: &GuestRegisters) -> ! { debug!("Allocating stack space for host"); let host_stack = allocate_host_stack() as usize; - debug!("Stack range: {:#x?}", host_stack..STACK_NUMBER_OF_PAGES); + let range = host_stack..(host_stack + STACK_NUMBER_OF_PAGES * 4096); + debug!("Host stack allocated at {:#x?}", range); unsafe { switch_stack(guest_registers, start_hypervisor as usize, host_stack as _) }; } From 72a3e8f1aed91d31cf979ff620f5b30ec0ca352e Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 18:45:08 +1200 Subject: [PATCH 69/87] Update main.rs --- uefi/src/main.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/uefi/src/main.rs b/uefi/src/main.rs index 8f63939..abbdec1 100644 --- a/uefi/src/main.rs +++ b/uefi/src/main.rs @@ -10,7 +10,7 @@ extern crate alloc; use { - crate::{processor::start_hypervisor_on_all_processors, relocation::zap_relocations}, + crate::{processor::start_hypervisor_on_all_processors, setup::setup}, hypervisor::{ allocator::initialize_system_table_and_heap, logger::{self, SerialPort}, @@ -20,7 +20,7 @@ use { }; pub mod processor; -pub mod relocation; +pub mod setup; pub mod virtualize; /// Custom panic handler for the UEFI application. @@ -68,10 +68,10 @@ fn main(_image_handle: Handle, system_table: SystemTable) -> Status { let boot_services = system_table.boot_services(); - // Attempt to zap relocations in the UEFI environment. - debug!("Zapping relocations"); - if let Err(e) = zap_relocations(boot_services) { - error!("Failed to zap relocations: {:?}", e); + // Set up the hypervisor + debug!("Setting up the hypervisor"); + if let Err(e) = setup(boot_services) { + error!("Failed to set up the hypervisor: {:?}", e); return Status::ABORTED; } From b58ebedab4d6cfb5e097909b7fec64dbe0eb3ecc Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 19:07:45 +1200 Subject: [PATCH 70/87] Update global_const.rs --- hypervisor/src/global_const.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hypervisor/src/global_const.rs b/hypervisor/src/global_const.rs index 72c674f..fe4f411 100644 --- a/hypervisor/src/global_const.rs +++ b/hypervisor/src/global_const.rs @@ -4,7 +4,7 @@ use uefi::table::boot::MemoryType; pub const HEAP_SIZE: usize = 0x180000; /// The size of the stack in bytes. -pub const STACK_NUMBER_OF_PAGES: usize = 0x1000; +pub const STACK_NUMBER_OF_PAGES: usize = 0x300; /// The memory type for the stack allocated pages pub const STACK_MEMORY_TYPE: MemoryType = MemoryType::RUNTIME_SERVICES_DATA; From 7e8738a6b50b7d7226be379f75eef0c8cc52801d Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 16 Jun 2024 19:13:01 +1200 Subject: [PATCH 71/87] Update kernel.rs --- hypervisor/src/windows/kernel.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hypervisor/src/windows/kernel.rs b/hypervisor/src/windows/kernel.rs index a54b4ac..78b6773 100644 --- a/hypervisor/src/windows/kernel.rs +++ b/hypervisor/src/windows/kernel.rs @@ -112,7 +112,7 @@ impl KernelHook { /// * `Ok(())` - The hook was installed successfully. /// * `Err(HypervisorError)` - If the hook installation fails. pub fn enable_kernel_ept_hook(&mut self, vm: &mut Vm, function_hash: u32, ept_hook_type: EptHookType) -> Result<(), HypervisorError> { - debug!("Setting up EPT hook for function: {}", function_hash); + debug!("Setting up EPT hook for function: {:#x}", function_hash); let function_va = unsafe { get_export_by_hash(self.ntoskrnl_base_pa as _, self.ntoskrnl_base_va as _, function_hash) @@ -138,7 +138,7 @@ impl KernelHook { /// * `Ok(())` - The hook was removed successfully. /// * `Err(HypervisorError)` - If the hook removal fails. pub fn disable_kernel_ept_hook(&mut self, vm: &mut Vm, function_hash: u32, ept_hook_type: EptHookType) -> Result<(), HypervisorError> { - debug!("Disabling EPT hook for function: {}", function_hash); + debug!("Disabling EPT hook for function: {:#x}", function_hash); let function_va = unsafe { get_export_by_hash(self.ntoskrnl_base_pa as _, self.ntoskrnl_base_va as _, function_hash) @@ -170,7 +170,7 @@ impl KernelHook { syscall_number: u16, ept_hook_type: EptHookType, ) -> Result<(), HypervisorError> { - debug!("Setting up EPT hook for syscall: {}", syscall_number); + debug!("Setting up EPT hook for syscall: {:#x}", syscall_number); let ssdt = SsdtHook::find_ssdt_function_address(syscall_number as _, false, self.ntoskrnl_base_pa as _, self.ntoskrnl_size as _) .or_else(|_| SsdtHook::find_ssdt_function_address(syscall_number as _, true, self.ntoskrnl_base_pa as _, self.ntoskrnl_size as _)) @@ -196,7 +196,7 @@ impl KernelHook { /// * `Ok(())` - The hook was removed successfully. /// * `Err(HypervisorError)` - If the hook removal fails. pub fn disable_syscall_ept_hook(&mut self, vm: &mut Vm, syscall_number: u16, ept_hook_type: EptHookType) -> Result<(), HypervisorError> { - debug!("Disabling EPT hook for syscall: {}", syscall_number); + debug!("Disabling EPT hook for syscall: {:#x}", syscall_number); let ssdt = SsdtHook::find_ssdt_function_address(syscall_number as _, false, self.ntoskrnl_base_pa as _, self.ntoskrnl_size as _) .or_else(|_| SsdtHook::find_ssdt_function_address(syscall_number as _, true, self.ntoskrnl_base_pa as _, self.ntoskrnl_size as _)) From 402bd9e6ecb74e51cafaf8da67f1309e9ad36399 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Mon, 17 Jun 2024 17:45:08 +1200 Subject: [PATCH 72/87] No need to pass box to setup_host_registers_state --- hypervisor/src/intel/vm.rs | 3 ++- hypervisor/src/intel/vmcs.rs | 5 +---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/hypervisor/src/intel/vm.rs b/hypervisor/src/intel/vm.rs index d27b6bd..a5d1eaa 100644 --- a/hypervisor/src/intel/vm.rs +++ b/hypervisor/src/intel/vm.rs @@ -215,9 +215,10 @@ impl Vm { let primary_eptp = self.primary_eptp; let msr_bitmap = &self.msr_bitmap as *const _ as u64; + let pml4_pa = self.host_paging.get_pml4_pa()?; Vmcs::setup_guest_registers_state(&self.guest_descriptor, &self.guest_registers); - Vmcs::setup_host_registers_state(&self.host_descriptor, &self.host_paging)?; + Vmcs::setup_host_registers_state(&self.host_descriptor, pml4_pa)?; Vmcs::setup_vmcs_control_fields(primary_eptp, msr_bitmap)?; trace!("VMCS setup successfully!"); diff --git a/hypervisor/src/intel/vmcs.rs b/hypervisor/src/intel/vmcs.rs index 65b4e59..bfc4838 100644 --- a/hypervisor/src/intel/vmcs.rs +++ b/hypervisor/src/intel/vmcs.rs @@ -13,7 +13,6 @@ use { descriptor::Descriptors, invept::invept_single_context, invvpid::{invvpid_single_context, VPID_TAG}, - paging::PageTables, segmentation::{access_rights_from_native, lar, lsl}, support::{cr0, cr3, rdmsr, sidt, vmread, vmwrite}, }, @@ -132,11 +131,9 @@ impl Vmcs { /// # Arguments /// * `host_descriptor` - Descriptor tables for the host. /// * `host_paging` - Paging tables for the host. - pub fn setup_host_registers_state(host_descriptor: &Descriptors, host_paging: &PageTables) -> Result<(), HypervisorError> { + pub fn setup_host_registers_state(host_descriptor: &Descriptors, pml4_pa: u64) -> Result<(), HypervisorError> { log::debug!("Setting up Host Registers State"); - let pml4_pa = host_paging.get_pml4_pa()?; - vmwrite(vmcs::host::CR0, cr0().bits() as u64); vmwrite(vmcs::host::CR3, pml4_pa); vmwrite(vmcs::host::CR4, Cr4::read_raw()); From 8c4ee934647cb53543981c8cabc3da28149407e7 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Mon, 17 Jun 2024 17:56:48 +1200 Subject: [PATCH 73/87] removed global_const.rs --- hypervisor/src/allocator.rs | 17 +++++++++++------ hypervisor/src/global_const.rs | 10 ---------- hypervisor/src/lib.rs | 1 - 3 files changed, 11 insertions(+), 17 deletions(-) delete mode 100644 hypervisor/src/global_const.rs diff --git a/hypervisor/src/allocator.rs b/hypervisor/src/allocator.rs index 8808317..5ea064d 100644 --- a/hypervisor/src/allocator.rs +++ b/hypervisor/src/allocator.rs @@ -4,7 +4,6 @@ //! debugging information. use { - crate::global_const::{HEAP_SIZE, STACK_MEMORY_TYPE, STACK_NUMBER_OF_PAGES}, alloc::{boxed::Box, vec::Vec}, core::{ alloc::{GlobalAlloc, Layout}, @@ -13,13 +12,16 @@ use { }, log::debug, spin::Mutex, - uefi::table::{boot::AllocateType, Boot, SystemTable}, + uefi::table::{ + boot::{AllocateType, MemoryType}, + Boot, SystemTable, + }, x86::bits64::paging::BASE_PAGE_SIZE, }; /// Global allocator instance with a heap size of `HEAP_SIZE`. #[global_allocator] -pub static mut HEAP: ListHeap = ListHeap::new(); +pub static mut HEAP: ListHeap<0x980000> = ListHeap::new(); /// A heap allocator based on a linked list of free chunks. /// @@ -327,7 +329,7 @@ unsafe impl GlobalAlloc for ListHeap { /// /// Panics if memory allocation fails. pub unsafe fn box_zeroed() -> Box { - unsafe { Box::::new_zeroed().assume_init() } + Box::::new_zeroed().assume_init() } /// Reference to the system table, used to call the boot services pool memory @@ -371,17 +373,20 @@ pub unsafe fn initialize_system_table_and_heap(system_table: &SystemTable) pub fn allocate_host_stack() -> *mut u8 { let _guard = ALLOCATOR_MUTEX.lock(); // Ensure thread safety + let memory_type = MemoryType::RUNTIME_SERVICES_DATA; + let number_of_pages = 0x300; + // Get the system table and boot services let system_table = SYSTEM_TABLE.load(Ordering::Acquire); let boot_services = unsafe { &(*system_table).boot_services() }; // Allocate the pages using UEFI's allocate_pages function let allocated_pages = boot_services - .allocate_pages(AllocateType::AnyPages, STACK_MEMORY_TYPE, STACK_NUMBER_OF_PAGES) + .allocate_pages(AllocateType::AnyPages, memory_type, number_of_pages) .expect("Failed to allocate UEFI pages"); // Record the allocation - record_allocation(allocated_pages as usize, STACK_NUMBER_OF_PAGES * BASE_PAGE_SIZE); // Assuming 4KB pages + record_allocation(allocated_pages as usize, number_of_pages * BASE_PAGE_SIZE); // Assuming 4KB pages // Return the pointer to the allocated memory block allocated_pages as *mut u8 diff --git a/hypervisor/src/global_const.rs b/hypervisor/src/global_const.rs deleted file mode 100644 index fe4f411..0000000 --- a/hypervisor/src/global_const.rs +++ /dev/null @@ -1,10 +0,0 @@ -use uefi::table::boot::MemoryType; - -/// The size of the heap in bytes. -pub const HEAP_SIZE: usize = 0x180000; - -/// The size of the stack in bytes. -pub const STACK_NUMBER_OF_PAGES: usize = 0x300; - -/// The memory type for the stack allocated pages -pub const STACK_MEMORY_TYPE: MemoryType = MemoryType::RUNTIME_SERVICES_DATA; diff --git a/hypervisor/src/lib.rs b/hypervisor/src/lib.rs index 5c5cbc9..e632d3e 100644 --- a/hypervisor/src/lib.rs +++ b/hypervisor/src/lib.rs @@ -15,7 +15,6 @@ extern crate static_assertions; pub mod allocator; pub mod error; -pub mod global_const; pub mod intel; pub mod logger; pub mod vmm; From 12acebc18a312dfd34155b95861d666e1671ed2b Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Mon, 17 Jun 2024 18:16:00 +1200 Subject: [PATCH 74/87] Revert "removed global_const.rs" This reverts commit 8c4ee934647cb53543981c8cabc3da28149407e7. --- hypervisor/src/allocator.rs | 17 ++++++----------- hypervisor/src/global_const.rs | 10 ++++++++++ hypervisor/src/lib.rs | 1 + 3 files changed, 17 insertions(+), 11 deletions(-) create mode 100644 hypervisor/src/global_const.rs diff --git a/hypervisor/src/allocator.rs b/hypervisor/src/allocator.rs index 5ea064d..8808317 100644 --- a/hypervisor/src/allocator.rs +++ b/hypervisor/src/allocator.rs @@ -4,6 +4,7 @@ //! debugging information. use { + crate::global_const::{HEAP_SIZE, STACK_MEMORY_TYPE, STACK_NUMBER_OF_PAGES}, alloc::{boxed::Box, vec::Vec}, core::{ alloc::{GlobalAlloc, Layout}, @@ -12,16 +13,13 @@ use { }, log::debug, spin::Mutex, - uefi::table::{ - boot::{AllocateType, MemoryType}, - Boot, SystemTable, - }, + uefi::table::{boot::AllocateType, Boot, SystemTable}, x86::bits64::paging::BASE_PAGE_SIZE, }; /// Global allocator instance with a heap size of `HEAP_SIZE`. #[global_allocator] -pub static mut HEAP: ListHeap<0x980000> = ListHeap::new(); +pub static mut HEAP: ListHeap = ListHeap::new(); /// A heap allocator based on a linked list of free chunks. /// @@ -329,7 +327,7 @@ unsafe impl GlobalAlloc for ListHeap { /// /// Panics if memory allocation fails. pub unsafe fn box_zeroed() -> Box { - Box::::new_zeroed().assume_init() + unsafe { Box::::new_zeroed().assume_init() } } /// Reference to the system table, used to call the boot services pool memory @@ -373,20 +371,17 @@ pub unsafe fn initialize_system_table_and_heap(system_table: &SystemTable) pub fn allocate_host_stack() -> *mut u8 { let _guard = ALLOCATOR_MUTEX.lock(); // Ensure thread safety - let memory_type = MemoryType::RUNTIME_SERVICES_DATA; - let number_of_pages = 0x300; - // Get the system table and boot services let system_table = SYSTEM_TABLE.load(Ordering::Acquire); let boot_services = unsafe { &(*system_table).boot_services() }; // Allocate the pages using UEFI's allocate_pages function let allocated_pages = boot_services - .allocate_pages(AllocateType::AnyPages, memory_type, number_of_pages) + .allocate_pages(AllocateType::AnyPages, STACK_MEMORY_TYPE, STACK_NUMBER_OF_PAGES) .expect("Failed to allocate UEFI pages"); // Record the allocation - record_allocation(allocated_pages as usize, number_of_pages * BASE_PAGE_SIZE); // Assuming 4KB pages + record_allocation(allocated_pages as usize, STACK_NUMBER_OF_PAGES * BASE_PAGE_SIZE); // Assuming 4KB pages // Return the pointer to the allocated memory block allocated_pages as *mut u8 diff --git a/hypervisor/src/global_const.rs b/hypervisor/src/global_const.rs new file mode 100644 index 0000000..fe4f411 --- /dev/null +++ b/hypervisor/src/global_const.rs @@ -0,0 +1,10 @@ +use uefi::table::boot::MemoryType; + +/// The size of the heap in bytes. +pub const HEAP_SIZE: usize = 0x180000; + +/// The size of the stack in bytes. +pub const STACK_NUMBER_OF_PAGES: usize = 0x300; + +/// The memory type for the stack allocated pages +pub const STACK_MEMORY_TYPE: MemoryType = MemoryType::RUNTIME_SERVICES_DATA; diff --git a/hypervisor/src/lib.rs b/hypervisor/src/lib.rs index e632d3e..5c5cbc9 100644 --- a/hypervisor/src/lib.rs +++ b/hypervisor/src/lib.rs @@ -15,6 +15,7 @@ extern crate static_assertions; pub mod allocator; pub mod error; +pub mod global_const; pub mod intel; pub mod logger; pub mod vmm; From 2480418fa7440deff911c584d8b2e5a638c604fa Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Mon, 17 Jun 2024 19:10:32 +1200 Subject: [PATCH 75/87] Added comments and changed var names --- hypervisor/src/intel/hooks/memory_manager.rs | 39 ++++++++++++-------- 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/hypervisor/src/intel/hooks/memory_manager.rs b/hypervisor/src/intel/hooks/memory_manager.rs index f6f553d..8ec92cf 100644 --- a/hypervisor/src/intel/hooks/memory_manager.rs +++ b/hypervisor/src/intel/hooks/memory_manager.rs @@ -38,10 +38,10 @@ pub struct HookMapping { /// for a hypervisor, allocating memory as needed at runtime. #[derive(Debug, Clone)] pub struct MemoryManager { - /// Active mappings of guest physical addresses to their respective hook mappings. - active_mappings: BTreeMap, + /// Mappings of guest physical addresses to their respective hook mappings. + guest_page_mappings: BTreeMap, /// Mappings of large guest physical addresses to their respective page tables. - large_pt_mappings: BTreeMap>, + large_page_table_mappings: BTreeMap>, } impl MemoryManager { @@ -53,8 +53,8 @@ impl MemoryManager { trace!("Initializing memory manager"); Self { - active_mappings: BTreeMap::new(), - large_pt_mappings: BTreeMap::new(), + guest_page_mappings: BTreeMap::new(), + large_page_table_mappings: BTreeMap::new(), } } @@ -66,7 +66,7 @@ impl MemoryManager { /// # Returns /// `true` if the guest page is processed, otherwise `false`. pub fn is_guest_page_processed(&self, guest_page_pa: u64) -> bool { - self.active_mappings.contains_key(&guest_page_pa) + self.guest_page_mappings.contains_key(&guest_page_pa) } /// Maps a shadow page to a guest physical address and adds hook information, allocating memory as needed. @@ -97,20 +97,25 @@ impl MemoryManager { function_hash, }; - if let Some(mapping) = self.active_mappings.get_mut(&guest_page_pa) { + // Check if the guest page is already mapped + if let Some(mapping) = self.guest_page_mappings.get_mut(&guest_page_pa) { trace!("Mapping already exists, adding hook info"); + + // Check if the hook already exists for the given function PA if mapping.hooks.iter().any(|hook| hook.guest_function_pa == guest_function_pa) { trace!("Hook already exists for function PA: {:#x}", guest_function_pa); } else { - mapping.hooks.push(hook_info); + mapping.hooks.push(hook_info); // Add new hook info } } else { trace!("Mapping does not exist, creating new mapping"); + // Allocate a new shadow page let shadow_page = unsafe { box_zeroed::() }; let mut hooks = Vec::new(); hooks.push(hook_info); - self.active_mappings.insert(guest_page_pa, HookMapping { shadow_page, hooks }); + // Insert new mapping into guest_page_mappings + self.guest_page_mappings.insert(guest_page_pa, HookMapping { shadow_page, hooks }); trace!("Guest page mapped to shadow page successfully"); } @@ -125,10 +130,12 @@ impl MemoryManager { /// # Returns /// `Ok(())` if successful, or an error if no free page tables are available. pub fn map_large_page_to_pt(&mut self, guest_large_page_pa: u64) -> Result<(), HypervisorError> { - if !self.large_pt_mappings.contains_key(&guest_large_page_pa) { + // Check if the large page is already mapped + if !self.large_page_table_mappings.contains_key(&guest_large_page_pa) { trace!("Large page not mapped to page table, mapping now"); + // Allocate a new page table let pt = unsafe { box_zeroed::() }; - self.large_pt_mappings.insert(guest_large_page_pa, pt); + self.large_page_table_mappings.insert(guest_large_page_pa, pt); trace!("Large page mapped to page table successfully"); } else { trace!("Large page PA: {:#x} is already mapped to a page table", guest_large_page_pa); @@ -145,7 +152,7 @@ impl MemoryManager { /// # Returns /// An `Option` containing a mutable reference to the `Pt` if found. pub fn get_page_table_as_mut(&mut self, guest_large_page_pa: u64) -> Option<&mut Pt> { - self.large_pt_mappings.get_mut(&guest_large_page_pa).map(|pt| &mut **pt) + self.large_page_table_mappings.get_mut(&guest_large_page_pa).map(|pt| &mut **pt) } /// Retrieves a pointer to the shadow page associated with a guest physical address. @@ -156,7 +163,7 @@ impl MemoryManager { /// # Returns /// An `Option` containing the memory address of the `Page` as a `u64` if found. pub fn get_shadow_page_as_ptr(&self, guest_page_pa: u64) -> Option { - self.active_mappings + self.guest_page_mappings .get(&guest_page_pa) .map(|mapping| &*mapping.shadow_page as *const Page as u64) } @@ -169,7 +176,7 @@ impl MemoryManager { /// # Returns /// An `Option` containing a reference to the `HookInfo` if found. pub fn get_hook_info(&self, guest_page_pa: u64) -> Option<&Vec> { - self.active_mappings.get(&guest_page_pa).map(|mapping| &mapping.hooks) + self.guest_page_mappings.get(&guest_page_pa).map(|mapping| &mapping.hooks) } /// Retrieves a reference to the `HookInfo` instance associated with a guest function physical address. @@ -181,7 +188,7 @@ impl MemoryManager { /// # Returns /// An `Option` containing a reference to the `HookInfo` instance if found. pub fn get_hook_info_by_function_pa(&self, guest_page_pa: u64, guest_function_pa: u64) -> Option<&HookInfo> { - self.active_mappings + self.guest_page_mappings .get(&guest_page_pa)? .hooks .iter() @@ -197,7 +204,7 @@ impl MemoryManager { /// # Returns /// An `Option` containing a reference to the `HookInfo` instance if found. pub fn get_hook_info_by_function_va(&self, guest_page_pa: u64, guest_function_va: u64) -> Option<&HookInfo> { - self.active_mappings + self.guest_page_mappings .get(&guest_page_pa)? .hooks .iter() From 7bfe795df7d7bc9ded6bf455eeb74860375b5672 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 23 Jun 2024 18:41:11 +1200 Subject: [PATCH 76/87] HookManager shared as global with multi-cores - Once the stack issue is fixed inside Vm.rs. This will need testing again from single core and multi-core to avoid any issues. --- hypervisor/src/intel/hooks/hook_manager.rs | 205 +++++++++++++++++---- hypervisor/src/intel/vm.rs | 8 - hypervisor/src/intel/vmexit/commands.rs | 42 ++--- hypervisor/src/intel/vmexit/cpuid.rs | 83 +++++---- hypervisor/src/intel/vmexit/ept.rs | 25 +-- hypervisor/src/intel/vmexit/msr.rs | 12 +- hypervisor/src/intel/vmexit/mtf.rs | 22 ++- hypervisor/src/intel/vmexit/vmcall.rs | 15 +- hypervisor/src/windows/kernel.rs | 114 ------------ hypervisor/src/windows/mod.rs | 1 - uefi/Cargo.toml | 1 + uefi/src/setup.rs | 27 +-- 12 files changed, 269 insertions(+), 286 deletions(-) delete mode 100644 hypervisor/src/windows/kernel.rs diff --git a/hypervisor/src/intel/hooks/hook_manager.rs b/hypervisor/src/intel/hooks/hook_manager.rs index c924929..a4de50d 100644 --- a/hypervisor/src/intel/hooks/hook_manager.rs +++ b/hypervisor/src/intel/hooks/hook_manager.rs @@ -1,6 +1,6 @@ use { crate::{ - allocator::{print_tracked_allocations, ALLOCATED_MEMORY}, + allocator::{box_zeroed, print_tracked_allocations, ALLOCATED_MEMORY}, error::HypervisorError, intel::{ addresses::PhysicalAddress, @@ -11,21 +11,26 @@ use { }, invept::invept_all_contexts, invvpid::invvpid_all_contexts, + page::Page, vm::Vm, }, - windows::kernel::KernelHook, - }, - core::{ - intrinsics::copy_nonoverlapping, - sync::atomic::{AtomicU64, Ordering}, + windows::{ + nt::pe::{get_export_by_hash, get_image_base_address, get_size_of_image}, + ssdt::ssdt_hook::SsdtHook, + }, }, + alloc::{boxed::Box, sync::Arc}, + core::intrinsics::copy_nonoverlapping, + lazy_static::lazy_static, log::*, + spin::{Mutex, MutexGuard}, x86::bits64::paging::{PAddr, BASE_PAGE_SIZE}, }; -/// Global variable to store the address of the created dummy page. -/// This variable can be accessed by multiple cores/threads/processors. -pub static DUMMY_PAGE_ADDRESS: AtomicU64 = AtomicU64::new(0); +lazy_static! { + /// Global instance of HookManager wrapped in a Mutex for thread-safe access. + pub static ref GLOBAL_HOOK_MANAGER: Arc> = Arc::new(Mutex::new(HookManager::new().expect("Failed to create HookManager instance"))); +} /// Enum representing different types of hooks that can be applied. #[derive(Debug, Clone, Copy)] @@ -46,8 +51,14 @@ pub struct HookManager { /// The memory manager instance for the pre-allocated shadow pages and page tables. pub memory_manager: MemoryManager, - /// The hook instance for the Windows kernel, storing the VA and PA of ntoskrnl.exe. This is retrieved from the first LSTAR_MSR write operation, intercepted by the hypervisor. - pub kernel_hook: Option, + /// The base address of ntoskrnl.exe. + pub ntoskrnl_base_va: u64, + + /// The physical address of ntoskrnl.exe. + pub ntoskrnl_base_pa: u64, + + /// The size of ntoskrnl.exe. + pub ntoskrnl_size: u64, /// A flag indicating whether the CPUID cache information has been called. This will be used to perform hooks at boot time when SSDT has been initialized. /// KiSetCacheInformation -> KiSetCacheInformationIntel -> KiSetStandardizedCacheInformation -> __cpuid(4, 0) @@ -59,32 +70,57 @@ pub struct HookManager { /// The number of times the MTF (Monitor Trap Flag) should be triggered before disabling it for restoring overwritten instructions. pub mtf_counter: Option, + + pub dummy_page: Box, } impl HookManager { /// Creates a new instance of `HookManager`. /// - /// # Arguments - /// - /// * `primary_ept_pre_alloc_pts` - A mutable reference to a vector of pre-allocated page tables. - /// /// # Returns /// A result containing a boxed `HookManager` instance or an error of type `HypervisorError`. pub fn new() -> Result { trace!("Initializing hook manager"); let memory_manager = MemoryManager::new(); - let kernel_hook = Some(KernelHook::new()?); + let dummy_page = HookManager::create_dummy_page(0xff); Ok(Self { memory_manager, has_cpuid_cache_info_been_called: false, - kernel_hook, + ntoskrnl_base_va: 0, + ntoskrnl_base_pa: 0, + ntoskrnl_size: 0, old_rflags: None, mtf_counter: None, + dummy_page, }) } + /// Returns a reference to the global HookManager instance. + pub fn get_hook_manager_ref() -> Arc> { + Arc::clone(&GLOBAL_HOOK_MANAGER) + } + + /// Locks and returns a mutable reference to the global HookManager instance. + pub fn get_hook_manager_mut() -> MutexGuard<'static, HookManager> { + GLOBAL_HOOK_MANAGER.lock() + } + + /// Creates a dummy page filled with a specific byte value. + /// + /// This function allocates a page of memory and fills it with a specified byte value. + /// The address of the dummy page is stored in a global variable for access by multiple cores/threads/processors. + /// + /// # Arguments + /// + /// * `fill_byte` - The byte value to fill the page with. + pub fn create_dummy_page(fill_byte: u8) -> Box { + let mut dummy_page = unsafe { box_zeroed::() }; + dummy_page.0.iter_mut().for_each(|byte| *byte = fill_byte); + dummy_page + } + /// Hides the hypervisor memory from the guest by installing EPT hooks on all allocated memory regions. /// /// This function iterates through the recorded memory allocations and calls `ept_hide_hypervisor_memory` @@ -99,7 +135,7 @@ impl HookManager { /// # Returns /// /// Returns `Ok(())` if the hooks were successfully installed, `Err(HypervisorError)` otherwise. - pub fn hide_hypervisor_memory(vm: &mut Vm, page_permissions: AccessType) -> Result<(), HypervisorError> { + pub fn hide_hypervisor_memory(hook_manager: &mut HookManager, vm: &mut Vm, page_permissions: AccessType) -> Result<(), HypervisorError> { // Print the tracked memory allocations for debugging purposes. print_tracked_allocations(); @@ -110,7 +146,12 @@ impl HookManager { for range in allocated_memory.iter() { for offset in (0..range.size).step_by(BASE_PAGE_SIZE) { let guest_page_pa = range.start + offset; - HookManager::ept_hide_hypervisor_memory(vm, PAddr::from(guest_page_pa).align_down_to_base_page().as_u64(), page_permissions)?; + HookManager::ept_hide_hypervisor_memory( + hook_manager, + vm, + PAddr::from(guest_page_pa).align_down_to_base_page().as_u64(), + page_permissions, + )?; } } @@ -124,28 +165,32 @@ impl HookManager { /// # Arguments /// /// * `vm` - The virtual machine instance of the hypervisor. + /// * `guest_page_pa` - The physical address of the guest page. /// * `page_permissions` - The desired permissions for the hooked page. /// /// # Returns /// /// * Returns `Ok(())` if the hook was successfully installed, `Err(HypervisorError)` otherwise. - fn ept_hide_hypervisor_memory(vm: &mut Vm, guest_page_pa: u64, page_permissions: AccessType) -> Result<(), HypervisorError> { + fn ept_hide_hypervisor_memory( + hook_manager: &mut HookManager, + vm: &mut Vm, + guest_page_pa: u64, + page_permissions: AccessType, + ) -> Result<(), HypervisorError> { let guest_page_pa = PAddr::from(guest_page_pa).align_down_to_base_page(); trace!("Guest page PA: {:#x}", guest_page_pa.as_u64()); let guest_large_page_pa = guest_page_pa.align_down_to_large_page(); trace!("Guest large page PA: {:#x}", guest_large_page_pa.as_u64()); - let dummy_page_pa = DUMMY_PAGE_ADDRESS.load(Ordering::SeqCst); - + let dummy_page_pa = hook_manager.dummy_page.0.as_mut_ptr() as u64; trace!("Dummy page PA: {:#x}", dummy_page_pa); trace!("Mapping large page"); // Map the large page to the pre-allocated page table, if it hasn't been mapped already. - vm.hook_manager.memory_manager.map_large_page_to_pt(guest_large_page_pa.as_u64())?; + hook_manager.memory_manager.map_large_page_to_pt(guest_large_page_pa.as_u64())?; - let pre_alloc_pt = vm - .hook_manager + let pre_alloc_pt = hook_manager .memory_manager .get_page_table_as_mut(guest_large_page_pa.as_u64()) .ok_or(HypervisorError::PageTableNotFound)?; @@ -199,7 +244,13 @@ impl HookManager { /// # Returns /// /// * Returns `Ok(())` if the hook was successfully installed, `Err(HypervisorError)` otherwise. - pub fn ept_hook_function(vm: &mut Vm, guest_function_va: u64, function_hash: u32, ept_hook_type: EptHookType) -> Result<(), HypervisorError> { + pub fn ept_hook_function( + hook_manager: &mut HookManager, + vm: &mut Vm, + guest_function_va: u64, + function_hash: u32, + ept_hook_type: EptHookType, + ) -> Result<(), HypervisorError> { debug!("Creating EPT hook for function at VA: {:#x}", guest_function_va); let guest_function_pa = PAddr::from(PhysicalAddress::pa_from_va(guest_function_va)); @@ -214,14 +265,13 @@ impl HookManager { // 1. Map the large page to the pre-allocated page table, if it hasn't been mapped already. // We must map the large page to the pre-allocated page table before accessing it. debug!("Mapping large page"); - vm.hook_manager.memory_manager.map_large_page_to_pt(guest_large_page_pa.as_u64())?; + hook_manager.memory_manager.map_large_page_to_pt(guest_large_page_pa.as_u64())?; // 2. Check if the large page has already been split. If not, split it into 4KB pages. debug!("Checking if large page has already been split"); if vm.primary_ept.is_large_page(guest_page_pa.as_u64()) { // We must map the large page to the pre-allocated page table before accessing it. - let pre_alloc_pt = vm - .hook_manager + let pre_alloc_pt = hook_manager .memory_manager .get_page_table_as_mut(guest_large_page_pa.as_u64()) .ok_or(HypervisorError::PageTableNotFound)?; @@ -232,10 +282,10 @@ impl HookManager { // 3. Check if the guest page is already processed. If not, map the guest page to the shadow page. // Ensure the memory manager maintains a set of processed guest pages to track this mapping. - if !vm.hook_manager.memory_manager.is_guest_page_processed(guest_page_pa.as_u64()) { + if !hook_manager.memory_manager.is_guest_page_processed(guest_page_pa.as_u64()) { // We must map the guest page to the shadow page before accessing it. debug!("Mapping guest page and shadow page"); - vm.hook_manager.memory_manager.map_guest_to_shadow_page( + hook_manager.memory_manager.map_guest_to_shadow_page( guest_page_pa.as_u64(), guest_function_va, guest_function_pa.as_u64(), @@ -245,7 +295,7 @@ impl HookManager { // We must map the guest page to the shadow page before accessing it. let shadow_page_pa = PAddr::from( - vm.hook_manager + hook_manager .memory_manager .get_shadow_page_as_ptr(guest_page_pa.as_u64()) .ok_or(HypervisorError::ShadowPageNotFound)?, @@ -253,12 +303,13 @@ impl HookManager { // 4. Copy the guest page to the shadow page if it hasn't been copied already, ensuring the shadow page contains the original function code. debug!("Copying guest page to shadow page: {:#x}", guest_page_pa.as_u64()); - Self::unsafe_copy_guest_to_shadow(guest_page_pa, shadow_page_pa); + HookManager::unsafe_copy_guest_to_shadow(guest_page_pa, shadow_page_pa); // 5. Install the inline hook at the shadow function address if the hook type is `Function`. match ept_hook_type { EptHookType::Function(inline_hook_type) => { - let shadow_function_pa = PAddr::from(Self::calculate_function_offset_in_host_shadow_page(shadow_page_pa, guest_function_pa)); + let shadow_function_pa = + PAddr::from(HookManager::calculate_function_offset_in_host_shadow_page(shadow_page_pa, guest_function_pa)); debug!("Shadow Function PA: {:#x}", shadow_function_pa); debug!("Installing inline hook at shadow function PA: {:#x}", shadow_function_pa.as_u64()); @@ -269,8 +320,7 @@ impl HookManager { } } - let pre_alloc_pt = vm - .hook_manager + let pre_alloc_pt = hook_manager .memory_manager .get_page_table_as_mut(guest_large_page_pa.as_u64()) .ok_or(HypervisorError::PageTableNotFound)?; @@ -303,7 +353,12 @@ impl HookManager { /// # Returns /// /// * Returns `Ok(())` if the hook was successfully removed, `Err(HypervisorError)` otherwise. - pub fn ept_unhook_function(vm: &mut Vm, guest_function_va: u64, _ept_hook_type: EptHookType) -> Result<(), HypervisorError> { + pub fn ept_unhook_function( + hook_manager: &mut HookManager, + vm: &mut Vm, + guest_function_va: u64, + _ept_hook_type: EptHookType, + ) -> Result<(), HypervisorError> { debug!("Removing EPT hook for function at VA: {:#x}", guest_function_va); let guest_function_pa = PAddr::from(PhysicalAddress::pa_from_va(guest_function_va)); @@ -315,8 +370,7 @@ impl HookManager { let guest_large_page_pa = guest_function_pa.align_down_to_large_page(); debug!("Guest large page PA: {:#x}", guest_large_page_pa.as_u64()); - let pre_alloc_pt = vm - .hook_manager + let pre_alloc_pt = hook_manager .memory_manager .get_page_table_as_mut(guest_large_page_pa.as_u64()) .ok_or(HypervisorError::PageTableNotFound)?; @@ -415,4 +469,77 @@ impl HookManager { instruction_count } + + /// Sets the base address and size of the Windows kernel. + /// + /// # Arguments + /// + /// * `guest_va` - The virtual address of the guest. + /// + /// # Returns + /// + /// * `Ok(())` - The kernel base and size were set successfully. + pub fn set_kernel_base_and_size(&mut self, guest_va: u64) -> Result<(), HypervisorError> { + // Get the base address of ntoskrnl.exe. + self.ntoskrnl_base_va = unsafe { get_image_base_address(guest_va).ok_or(HypervisorError::FailedToGetImageBaseAddress)? }; + + // Get the physical address of ntoskrnl.exe using GUEST_CR3 and the virtual address. + self.ntoskrnl_base_pa = PhysicalAddress::pa_from_va(self.ntoskrnl_base_va); + + // Get the size of ntoskrnl.exe. + self.ntoskrnl_size = unsafe { get_size_of_image(self.ntoskrnl_base_pa as _).ok_or(HypervisorError::FailedToGetKernelSize)? } as u64; + + Ok(()) + } + + /// Manages an EPT hook for a kernel function, enabling or disabling it. + /// + /// # Arguments + /// + /// * `vm` - The virtual machine to install/remove the hook on. + /// * `function_hash` - The hash of the function to hook/unhook. + /// * `syscall_number` - The syscall number to use if `get_export_by_hash` fails. + /// * `ept_hook_type` - The type of EPT hook to use. + /// * `enable` - A boolean indicating whether to enable (true) or disable (false) the hook. + /// + /// # Returns + /// + /// * `Ok(())` - The hook was managed successfully. + /// * `Err(HypervisorError)` - If the hook management fails. + pub fn manage_kernel_ept_hook( + hook_manager: &mut HookManager, + vm: &mut Vm, + function_hash: u32, + syscall_number: u16, + ept_hook_type: EptHookType, + enable: bool, + ) -> Result<(), HypervisorError> { + let action = if enable { "Enabling" } else { "Disabling" }; + debug!("{} EPT hook for function: {}", action, function_hash); + + let function_va = unsafe { + if let Some(va) = get_export_by_hash(hook_manager.ntoskrnl_base_pa as _, hook_manager.ntoskrnl_base_va as _, function_hash) { + va + } else { + let ssdt_function_address = SsdtHook::find_ssdt_function_address( + syscall_number as _, + false, + hook_manager.ntoskrnl_base_pa as _, + hook_manager.ntoskrnl_size as _, + ); + match ssdt_function_address { + Ok(ssdt_hook) => ssdt_hook.guest_function_va as *mut u8, + Err(_) => return Err(HypervisorError::FailedToGetExport), + } + } + }; + + if enable { + HookManager::ept_hook_function(hook_manager, vm, function_va as _, function_hash, ept_hook_type)?; + } else { + HookManager::ept_unhook_function(hook_manager, vm, function_va as _, ept_hook_type)?; + } + + Ok(()) + } } diff --git a/hypervisor/src/intel/vm.rs b/hypervisor/src/intel/vm.rs index a5d1eaa..f782ef5 100644 --- a/hypervisor/src/intel/vm.rs +++ b/hypervisor/src/intel/vm.rs @@ -13,7 +13,6 @@ use { capture::GuestRegisters, descriptor::Descriptors, ept::Ept, - hooks::hook_manager::HookManager, paging::PageTables, support::{vmclear, vmptrld, vmread, vmxon}, vmcs::Vmcs, @@ -47,9 +46,6 @@ pub struct Vm { /// Paging tables for the host. pub host_paging: PageTables, - /// The hook manager for the VM. - pub hook_manager: HookManager, - /// A bitmap for handling MSRs. pub msr_bitmap: MsrBitmap, @@ -110,16 +106,12 @@ impl Vm { trace!("Modifying MSR interception for LSTAR MSR write access"); msr_bitmap.modify_msr_interception(msr::IA32_LSTAR, MsrAccessType::Write, MsrOperation::Hook); - trace!("Creating EPT hook manager"); - let hook_manager = HookManager::new()?; - trace!("VM created"); Ok(Self { vmxon_region, vmcs_region, host_paging, - hook_manager, host_descriptor: Descriptors::new_for_host(), guest_descriptor: Descriptors::new_from_current(), msr_bitmap, diff --git a/hypervisor/src/intel/vmexit/commands.rs b/hypervisor/src/intel/vmexit/commands.rs index 8d3aa7c..1571f73 100644 --- a/hypervisor/src/intel/vmexit/commands.rs +++ b/hypervisor/src/intel/vmexit/commands.rs @@ -1,7 +1,10 @@ use { crate::intel::{ addresses::PhysicalAddress, - hooks::{hook_manager::EptHookType, inline::InlineHookType}, + hooks::{ + hook_manager::{EptHookType, HookManager}, + inline::InlineHookType, + }, vm::Vm, }, log::*, @@ -35,29 +38,26 @@ pub fn handle_guest_commands(vm: &mut Vm) -> bool { let result = match client_data.command { Commands::EnableKernelEptHook | Commands::DisableKernelEptHook => { let enable = client_data.command == Commands::EnableKernelEptHook; - if let Some(mut kernel_hook) = vm.hook_manager.kernel_hook.take() { - let result = kernel_hook.manage_kernel_ept_hook( - vm, - client_data.function_hash, - client_data.syscall_number, - EptHookType::Function(InlineHookType::Vmcall), - enable, - ); - // Put the kernel hook back in the box - vm.hook_manager.kernel_hook = Some(kernel_hook); + // Lock the global HookManager once + let mut hook_manager = HookManager::get_hook_manager_mut(); - match result { - Ok(_) => true, - Err(e) => { - let action = if enable { "setup" } else { "disable" }; - error!("Failed to {} kernel EPT hook: {:?}", action, e); - false - } + let result = HookManager::manage_kernel_ept_hook( + &mut hook_manager, + vm, + client_data.function_hash, + client_data.syscall_number, + EptHookType::Function(InlineHookType::Vmcall), + enable, + ); + + match result { + Ok(_) => true, + Err(e) => { + let action = if enable { "setup" } else { "disable" }; + error!("Failed to {} kernel EPT hook: {:?}", action, e); + false } - } else { - error!("KernelHook is missing"); - false } } Commands::Invalid => { diff --git a/hypervisor/src/intel/vmexit/cpuid.rs b/hypervisor/src/intel/vmexit/cpuid.rs index 185cb09..4b1d4a1 100644 --- a/hypervisor/src/intel/vmexit/cpuid.rs +++ b/hypervisor/src/intel/vmexit/cpuid.rs @@ -5,6 +5,7 @@ use { crate::{ error::HypervisorError, intel::{ + hooks::hook_manager::HookManager, vm::Vm, vmexit::{commands::handle_guest_commands, ExitType}, }, @@ -81,7 +82,7 @@ const PASSWORD: u64 = 0xDEADBEEF; /// /// # Arguments /// -/// * `registers` - A mutable reference to the guest's current register state. +/// * `vm` - A mutable reference to the virtual machine (VM) instance. /// /// # Returns /// @@ -91,8 +92,6 @@ const PASSWORD: u64 = 0xDEADBEEF; pub fn handle_cpuid(vm: &mut Vm) -> Result { trace!("Handling CPUID VM exit..."); - // const HYPERV_CPUID_LEAF_RANGE: RangeInclusive = 0x40000000..=0x4FFFFFFF; - let leaf = vm.guest_registers.rax as u32; let sub_leaf = vm.guest_registers.rcx as u32; @@ -125,46 +124,46 @@ pub fn handle_cpuid(vm: &mut Vm) -> Result { } leaf if leaf == CpuidLeaf::CacheInformation as u32 => { trace!("CPUID leaf 0x2 detected (Cache Information)."); - if vm.hook_manager.has_cpuid_cache_info_been_called == false { - /* + + // Lock the global HookManager once + let mut hook_manager = HookManager::get_hook_manager_mut(); + + if !hook_manager.has_cpuid_cache_info_been_called { // Test UEFI boot-time hooks - if let Some(mut kernel_hook) = vm.hook_manager.kernel_hook.take() { - kernel_hook.manage_kernel_ept_hook( - vm, - crate::windows::nt::pe::djb2_hash("NtQuerySystemInformation".as_bytes()), - 0x0036, - crate::intel::hooks::hook_manager::EptHookType::Function(crate::intel::hooks::inline::InlineHookType::Vmcall), - true, - )?; - kernel_hook.manage_kernel_ept_hook( - vm, - crate::windows::nt::pe::djb2_hash("NtCreateFile".as_bytes()), - 0x0055, - crate::intel::hooks::hook_manager::EptHookType::Function(crate::intel::hooks::inline::InlineHookType::Vmcall), - true, - )?; - kernel_hook.manage_kernel_ept_hook( - vm, - crate::windows::nt::pe::djb2_hash("NtAllocateVirtualMemory".as_bytes()), - 0x18, - crate::intel::hooks::hook_manager::EptHookType::Function(crate::intel::hooks::inline::InlineHookType::Vmcall), - true, - )?; - kernel_hook.manage_kernel_ept_hook( - vm, - crate::windows::nt::pe::djb2_hash("NtQueryInformationProcess".as_bytes()), - 0x19, - crate::intel::hooks::hook_manager::EptHookType::Function(crate::intel::hooks::inline::InlineHookType::Vmcall), - true, - )?; - // Place the kernel hook back in the box - vm.hook_manager.kernel_hook = Some(kernel_hook); - // Set the flag - vm.hook_manager.has_cpuid_cache_info_been_called = true; - } else { - return Err(HypervisorError::KernelHookMissing); - } - */ + HookManager::manage_kernel_ept_hook( + &mut hook_manager, + vm, + crate::windows::nt::pe::djb2_hash("NtQuerySystemInformation".as_bytes()), + 0x0036, + crate::intel::hooks::hook_manager::EptHookType::Function(crate::intel::hooks::inline::InlineHookType::Vmcall), + true, + )?; + HookManager::manage_kernel_ept_hook( + &mut hook_manager, + vm, + crate::windows::nt::pe::djb2_hash("NtCreateFile".as_bytes()), + 0x0055, + crate::intel::hooks::hook_manager::EptHookType::Function(crate::intel::hooks::inline::InlineHookType::Vmcall), + true, + )?; + HookManager::manage_kernel_ept_hook( + &mut hook_manager, + vm, + crate::windows::nt::pe::djb2_hash("NtAllocateVirtualMemory".as_bytes()), + 0x18, + crate::intel::hooks::hook_manager::EptHookType::Function(crate::intel::hooks::inline::InlineHookType::Vmcall), + true, + )?; + HookManager::manage_kernel_ept_hook( + &mut hook_manager, + vm, + crate::windows::nt::pe::djb2_hash("NtQueryInformationProcess".as_bytes()), + 0x19, + crate::intel::hooks::hook_manager::EptHookType::Function(crate::intel::hooks::inline::InlineHookType::Vmcall), + true, + )?; + // Set the flag + hook_manager.has_cpuid_cache_info_been_called = true; } } leaf if leaf == CpuidLeaf::ExtendedFeatureInformation as u32 => { diff --git a/hypervisor/src/intel/vmexit/ept.rs b/hypervisor/src/intel/vmexit/ept.rs index 9b74e3e..01cfe49 100644 --- a/hypervisor/src/intel/vmexit/ept.rs +++ b/hypervisor/src/intel/vmexit/ept.rs @@ -3,6 +3,7 @@ use { error::HypervisorError, intel::{ ept::AccessType, + hooks::hook_manager::HookManager, support::vmread, vm::Vm, vmerror::EptViolationExitQualification, @@ -31,22 +32,23 @@ pub fn handle_ept_violation(vm: &mut Vm) -> Result { let guest_large_page_pa = guest_page_pa.align_down_to_large_page(); trace!("Faulting Guest Large Page PA: {:#x}", guest_large_page_pa); + let mut hook_manager = HookManager::get_hook_manager_mut(); + + // dump_primary_ept_entries(vm, guest_pa, &mut hook_manager)?; + let shadow_page_pa = PAddr::from( - vm.hook_manager + hook_manager .memory_manager .get_shadow_page_as_ptr(guest_page_pa.as_u64()) .ok_or(HypervisorError::ShadowPageNotFound)?, ); trace!("Shadow Page PA: {:#x}", shadow_page_pa.as_u64()); - let pre_alloc_pt = vm - .hook_manager + let pre_alloc_pt = hook_manager .memory_manager .get_page_table_as_mut(guest_large_page_pa.as_u64()) .ok_or(HypervisorError::PageTableNotFound)?; - // dump_primary_ept_entries(vm, guest_pa)?; - let exit_qualification_value = vmread(vmcs::ro::EXIT_QUALIFICATION); let ept_violation_qualification = EptViolationExitQualification::from_exit_qualification(exit_qualification_value); trace!("Exit Qualification for EPT Violations: {:#?}", ept_violation_qualification); @@ -72,14 +74,14 @@ pub fn handle_ept_violation(vm: &mut Vm) -> Result { // We make this read-write-execute to allow the instruction performing a read-write // operation and then switch back to execute-only shadow page from handle_mtf vmexit - vm.hook_manager.mtf_counter = Some(1); + hook_manager.mtf_counter = Some(1); // Set the monitor trap flag and initialize counter to the number of overwritten instructions set_monitor_trap_flag(true); // Ensure all data mutations to vm are done before calling this. // This function will update the guest interrupt flag to prevent interrupts while single-stepping - update_guest_interrupt_flag(vm, false)?; + update_guest_interrupt_flag(vm, &mut hook_manager, false)?; } trace!("EPT Violation handled successfully!"); @@ -111,8 +113,10 @@ pub fn handle_ept_misconfiguration(vm: &mut Vm) -> Result Result Result<(), HypervisorError> { +pub fn dump_primary_ept_entries(vm: &mut Vm, faulting_guest_pa: u64, hook_manager: &mut HookManager) -> Result<(), HypervisorError> { // Log the critical error information. trace!("Faulting guest address: {:#x}", faulting_guest_pa); @@ -147,8 +151,7 @@ pub fn dump_primary_ept_entries(vm: &mut Vm, faulting_guest_pa: u64) -> Result<( // Get the primary EPTs. let primary_ept = &mut vm.primary_ept; - let pre_alloc_pt = vm - .hook_manager + let pre_alloc_pt = hook_manager .memory_manager .get_page_table_as_mut(guest_large_page_pa.as_u64()) .ok_or(HypervisorError::PageTableNotFound)?; diff --git a/hypervisor/src/intel/vmexit/msr.rs b/hypervisor/src/intel/vmexit/msr.rs index 7edf8bd..3ad28bd 100644 --- a/hypervisor/src/intel/vmexit/msr.rs +++ b/hypervisor/src/intel/vmexit/msr.rs @@ -13,6 +13,7 @@ use { intel::{ bitmap::{MsrAccessType, MsrOperation}, events::EventInjection, + hooks::hook_manager::HookManager, support::{rdmsr, wrmsr}, vm::Vm, vmexit::ExitType, @@ -103,15 +104,8 @@ pub fn handle_msr_access(vm: &mut Vm, access_type: MsrAccessType) -> Result Result { trace!("Handling Monitor Trap Flag exit."); - if let Some(counter) = vm.hook_manager.mtf_counter.as_mut() { + let mut hook_manager = HookManager::get_hook_manager_mut(); + + if let Some(counter) = hook_manager.mtf_counter.as_mut() { trace!("Guest RIP: {:#x}", vm.guest_registers.rip); trace!("MTF counter before decrement: {}", *counter); *counter = counter.saturating_sub(1); // Safely decrement the counter @@ -47,15 +50,14 @@ pub fn handle_monitor_trap_flag(vm: &mut Vm) -> Result Result`: Ok if successful, Err if an error occurred during VMCS read/write operations. -pub fn update_guest_interrupt_flag(vm: &mut Vm, enable: bool) -> Result<(), HypervisorError> { +pub fn update_guest_interrupt_flag(vm: &mut Vm, hook_manager: &mut HookManager, enable: bool) -> Result<(), HypervisorError> { trace!("Updating guest interrupt flag..."); // Retrieve the current RFLAGS from the VMCS guest state area @@ -115,7 +118,7 @@ pub fn update_guest_interrupt_flag(vm: &mut Vm, enable: bool) -> Result<(), Hype trace!("Current guest RFLAGS before update: {:#x}", current_rflags_bits); // Optionally save the current RFLAGS to old_rflags before modification - vm.hook_manager.old_rflags = Some(current_rflags_bits); + hook_manager.old_rflags = Some(current_rflags_bits); // Set or clear the Interrupt Flag based on the 'enable' parameter if enable { @@ -138,11 +141,12 @@ pub fn update_guest_interrupt_flag(vm: &mut Vm, enable: bool) -> Result<(), Hype /// /// # Parameters /// * `vm`: A mutable reference to the virtual machine instance. +/// * `hook_manager`: A mutable reference to the hook manager instance. /// /// # Returns /// * `Result<(), HypervisorError>`: Ok if successful, Err if an error occurred during VMCS read/write operations. -pub fn restore_guest_interrupt_flag(vm: &mut Vm) -> Result<(), HypervisorError> { - if let Some(old_rflags_bits) = vm.hook_manager.old_rflags { +pub fn restore_guest_interrupt_flag(vm: &mut Vm, hook_manager: &mut HookManager) -> Result<(), HypervisorError> { + if let Some(old_rflags_bits) = hook_manager.old_rflags { trace!("Restoring guest RFLAGS to old value: {:#x}", old_rflags_bits); // Update VM register state first diff --git a/hypervisor/src/intel/vmexit/vmcall.rs b/hypervisor/src/intel/vmexit/vmcall.rs index 992688e..8ef2dec 100644 --- a/hypervisor/src/intel/vmexit/vmcall.rs +++ b/hypervisor/src/intel/vmexit/vmcall.rs @@ -59,8 +59,10 @@ pub fn handle_vmcall(vm: &mut Vm) -> Result { let guest_large_page_pa = guest_page_pa.align_down_to_large_page(); trace!("Guest Large Page PA: {:#x}", guest_large_page_pa.as_u64()); + let mut hook_manager = HookManager::get_hook_manager_mut(); + // Set the current hook to the EPT hook for handling MTF exit - let exit_type = if let Some(shadow_page_pa) = vm.hook_manager.memory_manager.get_shadow_page_as_ptr(guest_page_pa.as_u64()) { + let exit_type = if let Some(shadow_page_pa) = hook_manager.memory_manager.get_shadow_page_as_ptr(guest_page_pa.as_u64()) { trace!("Shadow Page PA: {:#x}", shadow_page_pa); trace!("Executing VMCALL hook on shadow page for EPT hook at PA: {:#x} with VA: {:#x}", guest_function_pa, vm.guest_registers.rip); @@ -69,8 +71,7 @@ pub fn handle_vmcall(vm: &mut Vm) -> Result { // crate::windows::log::log_nt_open_process_params(&vm.guest_registers); // crate::windows::log::log_mm_is_address_valid_params(&vm.guest_registers); - let pre_alloc_pt = vm - .hook_manager + let pre_alloc_pt = hook_manager .memory_manager .get_page_table_as_mut(guest_large_page_pa.as_u64()) .ok_or(HypervisorError::PageTableNotFound)?; @@ -79,8 +80,7 @@ pub fn handle_vmcall(vm: &mut Vm) -> Result { vm.primary_ept .swap_page(guest_page_pa.as_u64(), guest_page_pa.as_u64(), AccessType::READ_WRITE_EXECUTE, pre_alloc_pt)?; - let hook_info = vm - .hook_manager + let hook_info = hook_manager .memory_manager .get_hook_info_by_function_pa(guest_page_pa.as_u64(), guest_function_pa.as_u64()) .ok_or(HypervisorError::HookInfoNotFound)?; @@ -88,17 +88,16 @@ pub fn handle_vmcall(vm: &mut Vm) -> Result { debug!("Hook info: {:#x?}", hook_info); // Calculate the number of instructions in the function to set the MTF counter for restoring overwritten instructions by single-stepping. - // (NOTE: CHANGE HOOK SIZE IF YOU MOVE THIS INTO CPUID OR INT3) let instruction_count = unsafe { HookManager::calculate_instruction_count(guest_function_pa.as_u64(), HookManager::hook_size(hook_info.ept_hook_type)) as u64 }; - vm.hook_manager.mtf_counter = Some(instruction_count); + hook_manager.mtf_counter = Some(instruction_count); // Set the monitor trap flag and initialize counter to the number of overwritten instructions set_monitor_trap_flag(true); // Ensure all data mutations to vm are done before calling this. // This function will update the guest interrupt flag to prevent interrupts while single-stepping - update_guest_interrupt_flag(vm, false)?; + update_guest_interrupt_flag(vm, &mut hook_manager, false)?; Ok(ExitType::Continue) } else { diff --git a/hypervisor/src/windows/kernel.rs b/hypervisor/src/windows/kernel.rs deleted file mode 100644 index 8433818..0000000 --- a/hypervisor/src/windows/kernel.rs +++ /dev/null @@ -1,114 +0,0 @@ -use { - crate::{ - error::HypervisorError, - intel::{ - addresses::PhysicalAddress, - hooks::hook_manager::{EptHookType, HookManager}, - vm::Vm, - }, - windows::{ - nt::pe::{get_export_by_hash, get_image_base_address, get_size_of_image}, - ssdt::ssdt_hook::SsdtHook, - }, - }, - log::*, -}; - -/// Represents a hook into the Windows kernel, allowing redirection of functions and syscalls. -#[derive(Debug, Clone)] -#[allow(dead_code)] -pub struct KernelHook { - /// The base virtual address of ntoskrnl.exe. - ntoskrnl_base_va: u64, - - /// The base physical address of ntoskrnl.exe. - ntoskrnl_base_pa: u64, - - /// The size of ntoskrnl.exe. - ntoskrnl_size: u64, -} - -impl KernelHook { - /// Creates a new instance of `KernelHook`. - /// - /// # Returns - /// - /// * `Ok(Self)` - The new instance of `KernelHook`. - pub fn new() -> Result { - trace!("Initializing kernel hook"); - Ok(Self { - ntoskrnl_base_va: 0, - ntoskrnl_base_pa: 0, - ntoskrnl_size: 0, - }) - } - - /// Sets the base address and size of the Windows kernel. - /// - /// # Arguments - /// - /// * `guest_va` - The virtual address of the guest. - /// - /// # Returns - /// - /// * `Ok(())` - The kernel base and size were set successfully. - pub fn set_kernel_base_and_size(&mut self, guest_va: u64) -> Result<(), HypervisorError> { - // Get the base address of ntoskrnl.exe. - self.ntoskrnl_base_va = unsafe { get_image_base_address(guest_va).ok_or(HypervisorError::FailedToGetImageBaseAddress)? }; - - // Get the physical address of ntoskrnl.exe using GUEST_CR3 and the virtual address. - self.ntoskrnl_base_pa = PhysicalAddress::pa_from_va(self.ntoskrnl_base_va); - - // Get the size of ntoskrnl.exe. - self.ntoskrnl_size = unsafe { get_size_of_image(self.ntoskrnl_base_pa as _).ok_or(HypervisorError::FailedToGetKernelSize)? } as u64; - - Ok(()) - } - - /// Manages an EPT hook for a kernel function, enabling or disabling it. - /// - /// # Arguments - /// - /// * `vm` - The virtual machine to install/remove the hook on. - /// * `function_hash` - The hash of the function to hook/unhook. - /// * `syscall_number` - The syscall number to use if `get_export_by_hash` fails. - /// * `ept_hook_type` - The type of EPT hook to use. - /// * `enable` - A boolean indicating whether to enable (true) or disable (false) the hook. - /// - /// # Returns - /// - /// * `Ok(())` - The hook was managed successfully. - /// * `Err(HypervisorError)` - If the hook management fails. - pub fn manage_kernel_ept_hook( - &mut self, - vm: &mut Vm, - function_hash: u32, - syscall_number: u16, - ept_hook_type: EptHookType, - enable: bool, - ) -> Result<(), HypervisorError> { - let action = if enable { "Enabling" } else { "Disabling" }; - debug!("{} EPT hook for function: {}", action, function_hash); - - let function_va = unsafe { - if let Some(va) = get_export_by_hash(self.ntoskrnl_base_pa as _, self.ntoskrnl_base_va as _, function_hash) { - va - } else { - let ssdt_function_address = - SsdtHook::find_ssdt_function_address(syscall_number as _, false, self.ntoskrnl_base_pa as _, self.ntoskrnl_size as _); - match ssdt_function_address { - Ok(ssdt_hook) => ssdt_hook.guest_function_va as *mut u8, - Err(_) => return Err(HypervisorError::FailedToGetExport), - } - } - }; - - if enable { - HookManager::ept_hook_function(vm, function_va as _, function_hash, ept_hook_type)?; - } else { - HookManager::ept_unhook_function(vm, function_va as _, ept_hook_type)?; - } - - Ok(()) - } -} diff --git a/hypervisor/src/windows/mod.rs b/hypervisor/src/windows/mod.rs index d88426d..27d968e 100644 --- a/hypervisor/src/windows/mod.rs +++ b/hypervisor/src/windows/mod.rs @@ -1,4 +1,3 @@ -pub mod kernel; pub mod log; pub mod nt; pub mod ssdt; diff --git a/uefi/Cargo.toml b/uefi/Cargo.toml index bc9107b..9c6ba2b 100644 --- a/uefi/Cargo.toml +++ b/uefi/Cargo.toml @@ -16,4 +16,5 @@ log = { version = "0.4.20", default-features = false } # https://crates.io/crate once_cell = "1.19.0" # https://crates.io/crates/once_cell spin = "0.9" # https://crates.io/crates/spin com_logger = "0.1.1" # https://crates.io/crates/com_logger +lazy_static = { version = "1.4.0", features = ["spin_no_std"] } # https://crates.io/crates/lazy_static hypervisor = { path = "../hypervisor" } \ No newline at end of file diff --git a/uefi/src/setup.rs b/uefi/src/setup.rs index 598086e..85a755c 100644 --- a/uefi/src/setup.rs +++ b/uefi/src/setup.rs @@ -3,18 +3,12 @@ //! physical to virtual addressing. This is useful for ensuring a stable memory layout in hypervisor development. use { - alloc::boxed::Box, - core::sync::atomic::Ordering, - hypervisor::{ - allocator::{box_zeroed, record_allocation}, - intel::{hooks::hook_manager::DUMMY_PAGE_ADDRESS, page::Page}, - }, + hypervisor::{allocator::record_allocation, intel::hooks::hook_manager::GLOBAL_HOOK_MANAGER}, log::debug, uefi::{prelude::BootServices, proto::loaded_image::LoadedImage}, }; -/// Sets up the hypervisor by recording the image base, creating a dummy page, -/// and nullifying the relocation table. +/// Sets up the hypervisor by recording the image base, nullifying the relocation table, and initializing the global hook manager. /// /// # Arguments /// @@ -26,9 +20,9 @@ use { pub fn setup(boot_services: &BootServices) -> uefi::Result<()> { let loaded_image = boot_services.open_protocol_exclusive::(boot_services.image_handle())?; record_image_base(&loaded_image); - create_dummy_page(0xFF); let image_base = loaded_image.info().0 as u64; zap_relocations(image_base); + lazy_static::initialize(&GLOBAL_HOOK_MANAGER); Ok(()) } @@ -47,21 +41,6 @@ pub fn record_image_base(loaded_image: &LoadedImage) { record_allocation(image_base as usize, image_size as usize); } -/// Creates a dummy page filled with a specific byte value. -/// -/// This function allocates a page of memory and fills it with a specified byte value. -/// The address of the dummy page is stored in a global variable for access by multiple cores/threads/processors. -/// -/// # Arguments -/// -/// * `fill_byte` - The byte value to fill the page with. -pub fn create_dummy_page(fill_byte: u8) { - let mut dummy_page = unsafe { box_zeroed::() }; - dummy_page.0.iter_mut().for_each(|byte| *byte = fill_byte); - let dummy_page_pa = Box::into_raw(dummy_page) as u64; - DUMMY_PAGE_ADDRESS.store(dummy_page_pa, Ordering::SeqCst); -} - /// Nullifies the relocation table of the loaded UEFI image to prevent relocation. /// /// This function modifies the loaded image's PE header to zero out the relocation table, From 9593c161d25be8df3cd29cf560ace5553cd2143c Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Sun, 23 Jun 2024 19:51:54 +1200 Subject: [PATCH 77/87] Stack and Heap Allocation Refactor (Seperation) --- hypervisor/Cargo.toml | 2 - hypervisor/src/allocator.rs | 68 +++-------------------- hypervisor/src/global_const.rs | 5 -- uefi/src/main.rs | 12 +++-- uefi/src/stack.rs | 98 ++++++++++++++++++++++++++++++++++ uefi/src/virtualize.rs | 27 +++++++--- 6 files changed, 134 insertions(+), 78 deletions(-) create mode 100644 uefi/src/stack.rs diff --git a/hypervisor/Cargo.toml b/hypervisor/Cargo.toml index 6d555c9..57b6cee 100644 --- a/hypervisor/Cargo.toml +++ b/hypervisor/Cargo.toml @@ -12,8 +12,6 @@ path = "src/lib.rs" [dependencies] x86 = "0.52.0" # https://crates.io/crates/x86 x86_64 = "0.15.0" # https://crates.io/crates/x86_64 -uefi = { version = "0.28.0", features = ["alloc"] } # https://crates.io/crates/uefi -#uefi-services = { version = "0.25.0", default-features = false } # https://crates.io/crates/uefi-services thiserror-no-std = "2.0.2" # https://crates.io/crates/thiserror-no-std bitfield = "0.15.0" # https://crates.io/crates/bitfield bit_field = "0.10.2" # https://crates.io/crates/bit_field diff --git a/hypervisor/src/allocator.rs b/hypervisor/src/allocator.rs index 8808317..5064fc0 100644 --- a/hypervisor/src/allocator.rs +++ b/hypervisor/src/allocator.rs @@ -4,23 +4,26 @@ //! debugging information. use { - crate::global_const::{HEAP_SIZE, STACK_MEMORY_TYPE, STACK_NUMBER_OF_PAGES}, + crate::global_const::HEAP_SIZE, alloc::{boxed::Box, vec::Vec}, core::{ alloc::{GlobalAlloc, Layout}, ptr, - sync::atomic::{AtomicPtr, AtomicUsize, Ordering}, + sync::atomic::{AtomicUsize, Ordering}, }, log::debug, spin::Mutex, - uefi::table::{boot::AllocateType, Boot, SystemTable}, - x86::bits64::paging::BASE_PAGE_SIZE, }; /// Global allocator instance with a heap size of `HEAP_SIZE`. #[global_allocator] pub static mut HEAP: ListHeap = ListHeap::new(); +/// Initializes the linked list heap. +pub unsafe fn heap_init() { + HEAP.reset(); +} + /// A heap allocator based on a linked list of free chunks. /// /// This struct manages a heap of a fixed size using a linked list @@ -330,63 +333,6 @@ pub unsafe fn box_zeroed() -> Box { unsafe { Box::::new_zeroed().assume_init() } } -/// Reference to the system table, used to call the boot services pool memory -/// allocation functions. -static SYSTEM_TABLE: AtomicPtr> = AtomicPtr::new(ptr::null_mut()); - -/// Initializes the system table and resets the global heap. -/// -/// This function must be called before any memory allocation operations are performed. It initializes -/// the system table reference and resets the global heap to its default state. -/// -/// # Safety -/// -/// This function is unsafe because it must be called exactly once and must be called -/// before any allocations are made. -/// -/// # Important -/// -/// This function must be called to ensure that the global allocator is properly initialized and reset. -/// -/// # Arguments -/// -/// * `system_table` - A reference to the UEFI system table. -pub unsafe fn initialize_system_table_and_heap(system_table: &SystemTable) { - SYSTEM_TABLE.store(system_table as *const _ as *mut _, Ordering::Release); - HEAP.reset(); -} - -/// Allocates a block of memory pages using UEFI's allocate_pages function. -/// -/// This function allocates memory pages that are not part of the global allocator. -/// The allocated memory is of type `RUNTIME_SERVICES_DATA` and is allocated anywhere in memory. -/// -/// # Returns -/// -/// A pointer to the allocated memory block. -/// -/// # Panics -/// -/// This function will panic if memory allocation fails. -pub fn allocate_host_stack() -> *mut u8 { - let _guard = ALLOCATOR_MUTEX.lock(); // Ensure thread safety - - // Get the system table and boot services - let system_table = SYSTEM_TABLE.load(Ordering::Acquire); - let boot_services = unsafe { &(*system_table).boot_services() }; - - // Allocate the pages using UEFI's allocate_pages function - let allocated_pages = boot_services - .allocate_pages(AllocateType::AnyPages, STACK_MEMORY_TYPE, STACK_NUMBER_OF_PAGES) - .expect("Failed to allocate UEFI pages"); - - // Record the allocation - record_allocation(allocated_pages as usize, STACK_NUMBER_OF_PAGES * BASE_PAGE_SIZE); // Assuming 4KB pages - - // Return the pointer to the allocated memory block - allocated_pages as *mut u8 -} - /// Structure to store allocated memory ranges. /// /// This struct is used to keep track of memory allocations by storing the diff --git a/hypervisor/src/global_const.rs b/hypervisor/src/global_const.rs index fe4f411..82cfbe7 100644 --- a/hypervisor/src/global_const.rs +++ b/hypervisor/src/global_const.rs @@ -1,10 +1,5 @@ -use uefi::table::boot::MemoryType; - /// The size of the heap in bytes. pub const HEAP_SIZE: usize = 0x180000; /// The size of the stack in bytes. pub const STACK_NUMBER_OF_PAGES: usize = 0x300; - -/// The memory type for the stack allocated pages -pub const STACK_MEMORY_TYPE: MemoryType = MemoryType::RUNTIME_SERVICES_DATA; diff --git a/uefi/src/main.rs b/uefi/src/main.rs index abbdec1..930a082 100644 --- a/uefi/src/main.rs +++ b/uefi/src/main.rs @@ -10,9 +10,9 @@ extern crate alloc; use { - crate::{processor::start_hypervisor_on_all_processors, setup::setup}, + crate::{processor::start_hypervisor_on_all_processors, setup::setup, stack::init}, hypervisor::{ - allocator::initialize_system_table_and_heap, + allocator::heap_init, logger::{self, SerialPort}, }, log::*, @@ -21,6 +21,7 @@ use { pub mod processor; pub mod setup; +pub mod stack; pub mod virtualize; /// Custom panic handler for the UEFI application. @@ -56,9 +57,12 @@ fn panic_handler(info: &core::panic::PanicInfo) -> ! { /// The status of the application execution. Returns `Status::SUCCESS` on successful execution, /// or `Status::ABORTED` if the hypervisor fails to install. #[entry] -fn main(_image_handle: Handle, system_table: SystemTable) -> Status { +fn main(_image_handle: Handle, mut system_table: SystemTable) -> Status { unsafe { - initialize_system_table_and_heap(&system_table); + // Initialize the stack allocator. + init(&mut system_table); + // Initialize the global heap allocator. + heap_init(); } // Initialize logging with the COM2 port and set the level filter to Debug. diff --git a/uefi/src/stack.rs b/uefi/src/stack.rs new file mode 100644 index 0000000..d14470c --- /dev/null +++ b/uefi/src/stack.rs @@ -0,0 +1,98 @@ +use { + core::{ + alloc::Layout, + ffi::c_void, + ptr, + sync::atomic::{AtomicPtr, AtomicU32, Ordering}, + }, + hypervisor::allocator::record_allocation, + uefi::{ + prelude::{Boot, BootServices, SystemTable}, + proto::loaded_image::LoadedImage, + table::boot::MemoryType, + }, +}; + +/// Reference to the system table, used to call the boot services pool memory +/// allocation functions. +/// +/// The pointer is only safe to dereference if UEFI boot services have not been +/// exited by the host application yet. +static SYSTEM_TABLE: AtomicPtr = AtomicPtr::new(ptr::null_mut()); + +/// The memory type used for pool memory allocations. +static MEMORY_TYPE: AtomicU32 = AtomicU32::new(MemoryType::LOADER_DATA.0); + +/// Initializes the allocator. +/// +/// # Safety +/// +/// This function is unsafe because you _must_ make sure that exit_boot_services +/// will be called when UEFI boot services will be exited. +pub unsafe fn init(system_table: &mut SystemTable) { + SYSTEM_TABLE.store(system_table.as_ptr().cast_mut(), Ordering::Release); + + let boot_services = system_table.boot_services(); + if let Ok(loaded_image) = boot_services.open_protocol_exclusive::(boot_services.image_handle()) { + MEMORY_TYPE.store(loaded_image.data_type().0, Ordering::Release); + } +} + +/// Allocate memory using [`BootServices::allocate_pool`]. The allocation is +/// of type [`MemoryType::LOADER_DATA`] for UEFI applications, [`MemoryType::BOOT_SERVICES_DATA`] +/// for UEFI boot drivers and [`MemoryType::RUNTIME_SERVICES_DATA`] for UEFI runtime drivers. +pub unsafe fn allocate_host_stack(layout: Layout) -> *mut u8 { + let size = layout.size(); + let align = layout.align(); + + // Get the system table and boot services + let memory_type = MemoryType(MEMORY_TYPE.load(Ordering::Acquire)); + let boot_services = &*boot_services(); + + let stack = if align > 8 { + // The requested alignment is greater than 8, but `allocate_pool` is + // only guaranteed to provide eight-byte alignment. Allocate extra + // space so that we can return an appropriately-aligned pointer + // within the allocation. + let full_alloc_ptr = if let Ok(ptr) = boot_services.allocate_pool(memory_type, size + align) { + ptr + } else { + return ptr::null_mut(); + }; + + // Calculate the offset needed to get an aligned pointer within the + // full allocation. If that offset is zero, increase it to `align` + // so that we still have space to store the extra pointer described + // below. + let mut offset = full_alloc_ptr.align_offset(align); + if offset == 0 { + offset = align; + } + + // Before returning the aligned allocation, store a pointer to the + // full unaligned allocation in the bytes just before the aligned + // allocation. We know we have at least eight bytes there due to + // adding `align` to the memory allocation size. We also know the + // write is appropriately aligned for a `*mut u8` pointer because + // `align_ptr` is aligned, and alignments are always powers of two + // (as enforced by the `Layout` type). + let aligned_ptr = full_alloc_ptr.add(offset); + aligned_ptr.cast::<*mut u8>().sub(1).write(full_alloc_ptr); + aligned_ptr + } else { + // The requested alignment is less than or equal to eight, and + // `allocate_pool` always provides eight-byte alignment, so we can + // use `allocate_pool` directly. + boot_services.allocate_pool(memory_type, size).map(|ptr| ptr).unwrap_or(ptr::null_mut()) + }; + record_allocation(stack as usize, layout.size()); + + stack +} + +/// Access the boot services +fn boot_services() -> *const BootServices { + let ptr = SYSTEM_TABLE.load(Ordering::Acquire); + let system_table = unsafe { SystemTable::from_ptr(ptr) }.expect("The system table handle is not available"); + system_table.boot_services() +} diff --git a/uefi/src/virtualize.rs b/uefi/src/virtualize.rs index 75192d0..ca55b9e 100644 --- a/uefi/src/virtualize.rs +++ b/uefi/src/virtualize.rs @@ -4,8 +4,13 @@ //! Credits to Satoshi Tanda: https://github.com/tandasat/Hello-VT-rp/blob/main/hypervisor/src/switch_stack.rs use { - core::arch::global_asm, - hypervisor::{allocator::allocate_host_stack, global_const::STACK_NUMBER_OF_PAGES, intel::capture::GuestRegisters, vmm::start_hypervisor}, + crate::stack::allocate_host_stack, + core::{alloc::Layout, arch::global_asm, intrinsics::copy_nonoverlapping}, + hypervisor::{ + global_const::STACK_NUMBER_OF_PAGES, + intel::{capture::GuestRegisters, page::Page}, + vmm::start_hypervisor, + }, log::debug, }; @@ -17,11 +22,21 @@ use { pub fn virtualize_system(guest_registers: &GuestRegisters) -> ! { debug!("Allocating stack space for host"); - let host_stack = allocate_host_stack() as usize; - let range = host_stack..(host_stack + STACK_NUMBER_OF_PAGES * 4096); - debug!("Host stack allocated at {:#x?}", range); + let layout = Layout::array::(STACK_NUMBER_OF_PAGES).unwrap(); + let stack = unsafe { allocate_host_stack(layout) }; + let size = layout.size(); - unsafe { switch_stack(guest_registers, start_hypervisor as usize, host_stack as _) }; + debug!("Zeroing stack space for host"); + unsafe { copy_nonoverlapping(0 as _, stack, size) } + + if stack == core::ptr::null_mut() { + panic!("Failed to allocate stack"); + } + + let stack_base = stack as u64 + layout.size() as u64 - 0x10; + log::trace!("Stack range: {:#x?}", stack as u64..stack_base); + + unsafe { switch_stack(guest_registers, start_hypervisor as usize, stack_base as _) }; } extern "efiapi" { From 0c551117d3a823c746f6a88eb249337265a8d883 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Mon, 24 Jun 2024 01:01:09 +1200 Subject: [PATCH 78/87] Create a MaybeUninit zeroed VM. - EPT Bug Needs Fixing! --- hypervisor/src/intel/bitmap.rs | 18 +++---- hypervisor/src/intel/descriptor.rs | 29 +++++------- hypervisor/src/intel/ept.rs | 16 +++---- hypervisor/src/intel/paging.rs | 14 ++---- hypervisor/src/intel/vm.rs | 76 +++++++++++++++++------------- hypervisor/src/intel/vmcs.rs | 16 ++----- hypervisor/src/intel/vmxon.rs | 14 ++---- hypervisor/src/vmm.rs | 13 +++-- 8 files changed, 90 insertions(+), 106 deletions(-) diff --git a/hypervisor/src/intel/bitmap.rs b/hypervisor/src/intel/bitmap.rs index 57040b7..2d5098b 100644 --- a/hypervisor/src/intel/bitmap.rs +++ b/hypervisor/src/intel/bitmap.rs @@ -47,18 +47,12 @@ pub struct MsrBitmap { } impl MsrBitmap { - /// Creates a new MSR bitmap with all bits cleared. - /// - /// # Returns - /// - /// * A `MsrBitmap` instance with all bits initialized to zero. - pub fn new() -> Self { - Self { - read_low_msrs: [0; 0x400], - read_high_msrs: [0; 0x400], - write_low_msrs: [0; 0x400], - write_high_msrs: [0; 0x400], - } + /// Initializes the MSR bitmap by setting all bits to 0. + pub fn init(&mut self) { + self.read_low_msrs.iter_mut().for_each(|byte| *byte = 0); + self.read_high_msrs.iter_mut().for_each(|byte| *byte = 0); + self.write_low_msrs.iter_mut().for_each(|byte| *byte = 0); + self.write_high_msrs.iter_mut().for_each(|byte| *byte = 0); } /// Modifies the interception for a specific MSR based on the specified operation and access type. diff --git a/hypervisor/src/intel/descriptor.rs b/hypervisor/src/intel/descriptor.rs index 4dadb43..4b4e889 100644 --- a/hypervisor/src/intel/descriptor.rs +++ b/hypervisor/src/intel/descriptor.rs @@ -23,7 +23,7 @@ use { /// for both host and guest VMX operations. pub struct Descriptors { /// Vector holding the GDT entries. - gdt: Vec, + pub gdt: Vec, /// Descriptor table pointer to the GDT. pub gdtr: DescriptorTablePointer, @@ -38,17 +38,6 @@ pub struct Descriptors { pub tss: TaskStateSegment, } -impl Default for Descriptors { - fn default() -> Self { - Self { - gdt: Vec::new(), - gdtr: DescriptorTablePointer::::default(), - cs: SegmentSelector::from_raw(0), - tr: SegmentSelector::from_raw(0), - tss: TaskStateSegment::default(), - } - } -} impl Descriptors { /// Creates a new GDT based on the current one, including TSS. /// @@ -64,10 +53,12 @@ impl Descriptors { let current_gdtr = sgdt(); let current_gdt = unsafe { core::slice::from_raw_parts(current_gdtr.base.cast::(), usize::from(current_gdtr.limit + 1) / 8) }; - // Copy the current GDT. - let mut descriptors = Self { + let mut descriptors = Descriptors { gdt: current_gdt.to_vec(), - ..Default::default() + gdtr: DescriptorTablePointer::::default(), + cs: SegmentSelector::from_raw(0), + tr: SegmentSelector::from_raw(0), + tss: TaskStateSegment::default(), }; // Append the TSS descriptor. Push extra 0 as it is 16 bytes. @@ -95,7 +86,13 @@ impl Descriptors { pub fn new_for_host() -> Self { log::debug!("Creating a new GDT with TSS for host"); - let mut descriptors = Self::default(); + let mut descriptors = Descriptors { + gdt: Vec::new(), + gdtr: DescriptorTablePointer::::default(), + cs: SegmentSelector::from_raw(0), + tr: SegmentSelector::from_raw(0), + tss: TaskStateSegment::default(), + }; descriptors.gdt.push(0); descriptors.gdt.push(Self::code_segment_descriptor().as_u64()); diff --git a/hypervisor/src/intel/ept.rs b/hypervisor/src/intel/ept.rs index 4359660..b4de333 100644 --- a/hypervisor/src/intel/ept.rs +++ b/hypervisor/src/intel/ept.rs @@ -39,16 +39,12 @@ pub struct Ept { } impl Ept { - /// Constructs a new `Ept` instance with default-initialized entries. - /// - /// Initializes all entries in PML4, PDPT, PD, and PT tables to zero, preparing the EPT for use. - pub fn new() -> Self { - Self { - pml4: Pml4(Table { entries: [Entry(0); 512] }), - pdpt: Pdpt(Table { entries: [Entry(0); 512] }), - pd: [Pd(Table { entries: [Entry(0); 512] }); 512], - pt: Pt(Table { entries: [Entry(0); 512] }), - } + /// Initializes the Extended Page Table (EPT) structure. + pub fn init(&mut self) { + self.pml4 = Pml4(Table { entries: [Entry(0); 512] }); + self.pdpt = Pdpt(Table { entries: [Entry(0); 512] }); + self.pd = [Pd(Table { entries: [Entry(0); 512] }); 512]; + self.pt = Pt(Table { entries: [Entry(0); 512] }); } /// Builds an identity-mapped Extended Page Table (EPT) structure with considerations for Memory Type Range Registers (MTRR). diff --git a/hypervisor/src/intel/paging.rs b/hypervisor/src/intel/paging.rs index 4052924..74bcd9b 100644 --- a/hypervisor/src/intel/paging.rs +++ b/hypervisor/src/intel/paging.rs @@ -40,15 +40,11 @@ pub struct PageTables { } impl PageTables { - /// Constructs a new `PageTables` instance with default-initialized entries. - /// - /// Initializes all entries in PML4, PDPT, and PD tables to zero, preparing the Page Tables for use. - pub fn new() -> Self { - Self { - pml4: Pml4(Table { entries: [Entry(0); 512] }), - pdpt: Pdpt(Table { entries: [Entry(0); 512] }), - pd: [Pd(Table { entries: [Entry(0); 512] }); 512], - } + /// Initializes the Page Tables structure with empty tables. + pub fn init(&mut self) { + self.pml4 = Pml4(Table { entries: [Entry(0); 512] }); + self.pdpt = Pdpt(Table { entries: [Entry(0); 512] }); + self.pd = [Pd(Table { entries: [Entry(0); 512] }); 512]; } /// Builds a basic identity map for the page tables. diff --git a/hypervisor/src/intel/vm.rs b/hypervisor/src/intel/vm.rs index f782ef5..8c2a657 100644 --- a/hypervisor/src/intel/vm.rs +++ b/hypervisor/src/intel/vm.rs @@ -21,6 +21,7 @@ use { vmxon::Vmxon, }, }, + core::mem::MaybeUninit, log::*, x86::{bits64::rflags::RFlags, msr, vmx::vmcs}, }; @@ -37,24 +38,24 @@ pub struct Vm { /// The VMCS (Virtual Machine Control Structure) for the VM. pub vmcs_region: Vmcs, - /// Descriptor tables for the guest state. - pub guest_descriptor: Descriptors, - /// Descriptor tables for the host state. pub host_descriptor: Descriptors, + /// Descriptor tables for the guest state. + pub guest_descriptor: Descriptors, + /// Paging tables for the host. pub host_paging: PageTables, - /// A bitmap for handling MSRs. - pub msr_bitmap: MsrBitmap, - /// The primary EPT (Extended Page Tables) for the VM. pub primary_ept: Ept, /// The primary EPTP (Extended Page Tables Pointer) for the VM. pub primary_eptp: u64, + /// A bitmap for handling MSRs. + pub msr_bitmap: MsrBitmap, + /// State of guest general-purpose registers. pub guest_registers: GuestRegisters, @@ -63,6 +64,11 @@ pub struct Vm { } impl Vm { + /// Creates a new zeroed VM instance. + pub fn zeroed() -> MaybeUninit { + MaybeUninit::zeroed() + } + /// Initializes a new VM instance with specified guest registers. /// /// Sets up the necessary environment for the VM, including VMCS initialization, host and guest @@ -76,50 +82,52 @@ impl Vm { /// /// Returns `Ok(Self)` with a newly created `Vm` instance, or an `Err(HypervisorError)` if /// any part of the setup fails. - pub fn new(guest_registers: &GuestRegisters) -> Result { + pub fn init(&mut self, guest_registers: &GuestRegisters) -> Result<(), HypervisorError> { trace!("Creating VM"); - trace!("Allocating VMXON region"); - let vmxon_region = Vmxon::new(); + trace!("Initializing VMXON region"); + self.vmxon_region.init(); - trace!("Allocating VMCS region"); - let vmcs_region = Vmcs::new(); + trace!("Initializing VMCS region"); + self.vmcs_region.init(); - trace!("Allocating Memory for Host Paging"); - let mut host_paging = PageTables::new(); + trace!("Initializing Host Descriptor Tables"); + self.host_descriptor = Descriptors::new_for_host(); - trace!("Building Identity Paging for Host"); - host_paging.build_identity(); + trace!("Initializing Guest Descriptor Tables"); + self.guest_descriptor = Descriptors::new_from_current(); + + trace!("Initializing Host Paging Tables"); + self.host_paging.init(); - trace!("Allocating MSR Bitmap"); - let mut msr_bitmap = MsrBitmap::new(); + trace!("Building Identity Paging for Host"); + self.host_paging.build_identity(); - trace!("Allocating Primary EPT"); - let mut primary_ept = Ept::new(); + trace!("Initializing Primary EPT"); + self.primary_ept.init(); trace!("Identity Mapping Primary EPT"); - primary_ept.build_identity()?; + self.primary_ept.build_identity()?; trace!("Creating primary EPTP with WB and 4-level walk"); - let primary_eptp = primary_ept.create_eptp_with_wb_and_4lvl_walk()?; + self.primary_eptp = self.primary_ept.create_eptp_with_wb_and_4lvl_walk()?; + + trace!("Initializing MSR Bitmap"); + self.msr_bitmap.init(); trace!("Modifying MSR interception for LSTAR MSR write access"); - msr_bitmap.modify_msr_interception(msr::IA32_LSTAR, MsrAccessType::Write, MsrOperation::Hook); + self.msr_bitmap + .modify_msr_interception(msr::IA32_LSTAR, MsrAccessType::Write, MsrOperation::Hook); + + trace!("Initializing Guest Registers"); + self.guest_registers = guest_registers.clone(); + + trace!("Initializing Launch State"); + self.has_launched = false; trace!("VM created"); - Ok(Self { - vmxon_region, - vmcs_region, - host_paging, - host_descriptor: Descriptors::new_for_host(), - guest_descriptor: Descriptors::new_from_current(), - msr_bitmap, - primary_ept, - primary_eptp, - guest_registers: guest_registers.clone(), - has_launched: false, - }) + Ok(()) } /// Activates the VMXON region to enable VMX operation. diff --git a/hypervisor/src/intel/vmcs.rs b/hypervisor/src/intel/vmcs.rs index bfc4838..44194fc 100644 --- a/hypervisor/src/intel/vmcs.rs +++ b/hypervisor/src/intel/vmcs.rs @@ -44,18 +44,10 @@ pub struct Vmcs { } impl Vmcs { - /// Constructs a default `Vmcs` instance with the necessary revision ID. - /// - /// Initializes the VMCS with the appropriate revision identifier obtained from the IA32_VMX_BASIC MSR, - /// sets the abort indicator to 0, and fills the reserved area with zeros, preparing the VMCS for use. - pub fn new() -> Self { - let mut revision_id = rdmsr(msr::IA32_VMX_BASIC) as u32; - revision_id.set_bit(31, false); - Self { - revision_id, - abort_indicator: 0, - reserved: [0; BASE_PAGE_SIZE - 8], - } + /// Initializes the VMCS region. + pub fn init(&mut self) { + self.revision_id = rdmsr(msr::IA32_VMX_BASIC) as u32; + self.revision_id.set_bit(31, false); } /// Initialize the guest state for the currently loaded VMCS. diff --git a/hypervisor/src/intel/vmxon.rs b/hypervisor/src/intel/vmxon.rs index f80895b..6074600 100644 --- a/hypervisor/src/intel/vmxon.rs +++ b/hypervisor/src/intel/vmxon.rs @@ -27,16 +27,10 @@ pub struct Vmxon { } impl Vmxon { - /// Constructs a default `Vmxon` instance. - /// - /// Sets the revision ID to the value read from the IA32_VMX_BASIC MSR and initializes the data array to zeros, preparing the VMXON region for use. - pub fn new() -> Self { - let mut revision_id = rdmsr(msr::IA32_VMX_BASIC) as u32; - revision_id.set_bit(31, false); - Self { - revision_id, - data: [0; BASE_PAGE_SIZE - 4], - } + /// Initializes the VMXON region. + pub fn init(&mut self) { + self.revision_id = rdmsr(msr::IA32_VMX_BASIC) as u32; + self.revision_id.set_bit(31, false); } /// Enables VMX operation by setting the VMX-enable bit in CR4. diff --git a/hypervisor/src/vmm.rs b/hypervisor/src/vmm.rs index ebfe0af..08c0076 100644 --- a/hypervisor/src/vmm.rs +++ b/hypervisor/src/vmm.rs @@ -60,9 +60,14 @@ pub fn start_hypervisor(guest_registers: &GuestRegisters) -> ! { Err(e) => panic!("CPU is not supported: {:?}", e), }; - let mut vm = match Vm::new(&guest_registers) { - Ok(vm) => vm, - Err(e) => panic!("Failed to create VM: {:?}", e), + let mut vm_uninit = Vm::zeroed(); + + let mut vm = unsafe { + let vm_ptr = &mut *vm_uninit.as_mut_ptr(); + match vm_ptr.init(&guest_registers) { + Ok(_) => vm_uninit.assume_init(), + Err(e) => panic!("Failed to create VM: {:?}", e), + } }; match vm.activate_vmxon() { @@ -75,6 +80,8 @@ pub fn start_hypervisor(guest_registers: &GuestRegisters) -> ! { Err(e) => panic!("Failed to activate VMCS: {:?}", e), } + trace!("VMCS Dump: {:#?}", vm.vmcs_region); + /* match HookManager::hide_hypervisor_memory(&mut vm, AccessType::READ_WRITE_EXECUTE) { Ok(_) => debug!("Hypervisor memory hidden"), From e3058e6c6ef8ff5e87982b70ecccfa6cbbcaff35 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Mon, 24 Jun 2024 01:17:07 +1200 Subject: [PATCH 79/87] EPT Bug Fixed! --- hypervisor/src/vmm.rs | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/hypervisor/src/vmm.rs b/hypervisor/src/vmm.rs index 08c0076..e7f6da0 100644 --- a/hypervisor/src/vmm.rs +++ b/hypervisor/src/vmm.rs @@ -60,15 +60,11 @@ pub fn start_hypervisor(guest_registers: &GuestRegisters) -> ! { Err(e) => panic!("CPU is not supported: {:?}", e), }; - let mut vm_uninit = Vm::zeroed(); - - let mut vm = unsafe { - let vm_ptr = &mut *vm_uninit.as_mut_ptr(); - match vm_ptr.init(&guest_registers) { - Ok(_) => vm_uninit.assume_init(), - Err(e) => panic!("Failed to create VM: {:?}", e), - } - }; + let mut vm = unsafe { Vm::zeroed().assume_init() }; + match vm.init(guest_registers) { + Ok(_) => debug!("VM initialized"), + Err(e) => panic!("Failed to initialize VM: {:?}", e), + } match vm.activate_vmxon() { Ok(_) => debug!("VMX enabled"), From 5df14878302ae132272d6f829415a042972058db Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Mon, 24 Jun 2024 01:23:35 +1200 Subject: [PATCH 80/87] Update vmm.rs --- hypervisor/src/vmm.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hypervisor/src/vmm.rs b/hypervisor/src/vmm.rs index e7f6da0..21e74ea 100644 --- a/hypervisor/src/vmm.rs +++ b/hypervisor/src/vmm.rs @@ -76,7 +76,7 @@ pub fn start_hypervisor(guest_registers: &GuestRegisters) -> ! { Err(e) => panic!("Failed to activate VMCS: {:?}", e), } - trace!("VMCS Dump: {:#?}", vm.vmcs_region); + trace!("VMCS Dump: {:#x?}", vm.vmcs_region); /* match HookManager::hide_hypervisor_memory(&mut vm, AccessType::READ_WRITE_EXECUTE) { From 7be1cbb46200ba85472de23fae766231a5cebfdc Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Mon, 24 Jun 2024 12:10:24 +1200 Subject: [PATCH 81/87] Reverted "HookManager shared as global with multi-cores" Reverted Commit: 7bfe795df7d7bc9ded6bf455eeb74860375b5672 --- hypervisor/src/intel/hooks/hook_manager.rs | 205 ++++----------------- hypervisor/src/intel/vm.rs | 7 + hypervisor/src/intel/vmexit/commands.rs | 42 ++--- hypervisor/src/intel/vmexit/cpuid.rs | 81 ++++---- hypervisor/src/intel/vmexit/ept.rs | 25 ++- hypervisor/src/intel/vmexit/msr.rs | 12 +- hypervisor/src/intel/vmexit/mtf.rs | 22 +-- hypervisor/src/intel/vmexit/vmcall.rs | 15 +- hypervisor/src/windows/kernel.rs | 114 ++++++++++++ hypervisor/src/windows/mod.rs | 1 + uefi/Cargo.toml | 1 - uefi/src/setup.rs | 27 ++- 12 files changed, 284 insertions(+), 268 deletions(-) create mode 100644 hypervisor/src/windows/kernel.rs diff --git a/hypervisor/src/intel/hooks/hook_manager.rs b/hypervisor/src/intel/hooks/hook_manager.rs index a4de50d..c924929 100644 --- a/hypervisor/src/intel/hooks/hook_manager.rs +++ b/hypervisor/src/intel/hooks/hook_manager.rs @@ -1,6 +1,6 @@ use { crate::{ - allocator::{box_zeroed, print_tracked_allocations, ALLOCATED_MEMORY}, + allocator::{print_tracked_allocations, ALLOCATED_MEMORY}, error::HypervisorError, intel::{ addresses::PhysicalAddress, @@ -11,26 +11,21 @@ use { }, invept::invept_all_contexts, invvpid::invvpid_all_contexts, - page::Page, vm::Vm, }, - windows::{ - nt::pe::{get_export_by_hash, get_image_base_address, get_size_of_image}, - ssdt::ssdt_hook::SsdtHook, - }, + windows::kernel::KernelHook, + }, + core::{ + intrinsics::copy_nonoverlapping, + sync::atomic::{AtomicU64, Ordering}, }, - alloc::{boxed::Box, sync::Arc}, - core::intrinsics::copy_nonoverlapping, - lazy_static::lazy_static, log::*, - spin::{Mutex, MutexGuard}, x86::bits64::paging::{PAddr, BASE_PAGE_SIZE}, }; -lazy_static! { - /// Global instance of HookManager wrapped in a Mutex for thread-safe access. - pub static ref GLOBAL_HOOK_MANAGER: Arc> = Arc::new(Mutex::new(HookManager::new().expect("Failed to create HookManager instance"))); -} +/// Global variable to store the address of the created dummy page. +/// This variable can be accessed by multiple cores/threads/processors. +pub static DUMMY_PAGE_ADDRESS: AtomicU64 = AtomicU64::new(0); /// Enum representing different types of hooks that can be applied. #[derive(Debug, Clone, Copy)] @@ -51,14 +46,8 @@ pub struct HookManager { /// The memory manager instance for the pre-allocated shadow pages and page tables. pub memory_manager: MemoryManager, - /// The base address of ntoskrnl.exe. - pub ntoskrnl_base_va: u64, - - /// The physical address of ntoskrnl.exe. - pub ntoskrnl_base_pa: u64, - - /// The size of ntoskrnl.exe. - pub ntoskrnl_size: u64, + /// The hook instance for the Windows kernel, storing the VA and PA of ntoskrnl.exe. This is retrieved from the first LSTAR_MSR write operation, intercepted by the hypervisor. + pub kernel_hook: Option, /// A flag indicating whether the CPUID cache information has been called. This will be used to perform hooks at boot time when SSDT has been initialized. /// KiSetCacheInformation -> KiSetCacheInformationIntel -> KiSetStandardizedCacheInformation -> __cpuid(4, 0) @@ -70,57 +59,32 @@ pub struct HookManager { /// The number of times the MTF (Monitor Trap Flag) should be triggered before disabling it for restoring overwritten instructions. pub mtf_counter: Option, - - pub dummy_page: Box, } impl HookManager { /// Creates a new instance of `HookManager`. /// + /// # Arguments + /// + /// * `primary_ept_pre_alloc_pts` - A mutable reference to a vector of pre-allocated page tables. + /// /// # Returns /// A result containing a boxed `HookManager` instance or an error of type `HypervisorError`. pub fn new() -> Result { trace!("Initializing hook manager"); let memory_manager = MemoryManager::new(); - let dummy_page = HookManager::create_dummy_page(0xff); + let kernel_hook = Some(KernelHook::new()?); Ok(Self { memory_manager, has_cpuid_cache_info_been_called: false, - ntoskrnl_base_va: 0, - ntoskrnl_base_pa: 0, - ntoskrnl_size: 0, + kernel_hook, old_rflags: None, mtf_counter: None, - dummy_page, }) } - /// Returns a reference to the global HookManager instance. - pub fn get_hook_manager_ref() -> Arc> { - Arc::clone(&GLOBAL_HOOK_MANAGER) - } - - /// Locks and returns a mutable reference to the global HookManager instance. - pub fn get_hook_manager_mut() -> MutexGuard<'static, HookManager> { - GLOBAL_HOOK_MANAGER.lock() - } - - /// Creates a dummy page filled with a specific byte value. - /// - /// This function allocates a page of memory and fills it with a specified byte value. - /// The address of the dummy page is stored in a global variable for access by multiple cores/threads/processors. - /// - /// # Arguments - /// - /// * `fill_byte` - The byte value to fill the page with. - pub fn create_dummy_page(fill_byte: u8) -> Box { - let mut dummy_page = unsafe { box_zeroed::() }; - dummy_page.0.iter_mut().for_each(|byte| *byte = fill_byte); - dummy_page - } - /// Hides the hypervisor memory from the guest by installing EPT hooks on all allocated memory regions. /// /// This function iterates through the recorded memory allocations and calls `ept_hide_hypervisor_memory` @@ -135,7 +99,7 @@ impl HookManager { /// # Returns /// /// Returns `Ok(())` if the hooks were successfully installed, `Err(HypervisorError)` otherwise. - pub fn hide_hypervisor_memory(hook_manager: &mut HookManager, vm: &mut Vm, page_permissions: AccessType) -> Result<(), HypervisorError> { + pub fn hide_hypervisor_memory(vm: &mut Vm, page_permissions: AccessType) -> Result<(), HypervisorError> { // Print the tracked memory allocations for debugging purposes. print_tracked_allocations(); @@ -146,12 +110,7 @@ impl HookManager { for range in allocated_memory.iter() { for offset in (0..range.size).step_by(BASE_PAGE_SIZE) { let guest_page_pa = range.start + offset; - HookManager::ept_hide_hypervisor_memory( - hook_manager, - vm, - PAddr::from(guest_page_pa).align_down_to_base_page().as_u64(), - page_permissions, - )?; + HookManager::ept_hide_hypervisor_memory(vm, PAddr::from(guest_page_pa).align_down_to_base_page().as_u64(), page_permissions)?; } } @@ -165,32 +124,28 @@ impl HookManager { /// # Arguments /// /// * `vm` - The virtual machine instance of the hypervisor. - /// * `guest_page_pa` - The physical address of the guest page. /// * `page_permissions` - The desired permissions for the hooked page. /// /// # Returns /// /// * Returns `Ok(())` if the hook was successfully installed, `Err(HypervisorError)` otherwise. - fn ept_hide_hypervisor_memory( - hook_manager: &mut HookManager, - vm: &mut Vm, - guest_page_pa: u64, - page_permissions: AccessType, - ) -> Result<(), HypervisorError> { + fn ept_hide_hypervisor_memory(vm: &mut Vm, guest_page_pa: u64, page_permissions: AccessType) -> Result<(), HypervisorError> { let guest_page_pa = PAddr::from(guest_page_pa).align_down_to_base_page(); trace!("Guest page PA: {:#x}", guest_page_pa.as_u64()); let guest_large_page_pa = guest_page_pa.align_down_to_large_page(); trace!("Guest large page PA: {:#x}", guest_large_page_pa.as_u64()); - let dummy_page_pa = hook_manager.dummy_page.0.as_mut_ptr() as u64; + let dummy_page_pa = DUMMY_PAGE_ADDRESS.load(Ordering::SeqCst); + trace!("Dummy page PA: {:#x}", dummy_page_pa); trace!("Mapping large page"); // Map the large page to the pre-allocated page table, if it hasn't been mapped already. - hook_manager.memory_manager.map_large_page_to_pt(guest_large_page_pa.as_u64())?; + vm.hook_manager.memory_manager.map_large_page_to_pt(guest_large_page_pa.as_u64())?; - let pre_alloc_pt = hook_manager + let pre_alloc_pt = vm + .hook_manager .memory_manager .get_page_table_as_mut(guest_large_page_pa.as_u64()) .ok_or(HypervisorError::PageTableNotFound)?; @@ -244,13 +199,7 @@ impl HookManager { /// # Returns /// /// * Returns `Ok(())` if the hook was successfully installed, `Err(HypervisorError)` otherwise. - pub fn ept_hook_function( - hook_manager: &mut HookManager, - vm: &mut Vm, - guest_function_va: u64, - function_hash: u32, - ept_hook_type: EptHookType, - ) -> Result<(), HypervisorError> { + pub fn ept_hook_function(vm: &mut Vm, guest_function_va: u64, function_hash: u32, ept_hook_type: EptHookType) -> Result<(), HypervisorError> { debug!("Creating EPT hook for function at VA: {:#x}", guest_function_va); let guest_function_pa = PAddr::from(PhysicalAddress::pa_from_va(guest_function_va)); @@ -265,13 +214,14 @@ impl HookManager { // 1. Map the large page to the pre-allocated page table, if it hasn't been mapped already. // We must map the large page to the pre-allocated page table before accessing it. debug!("Mapping large page"); - hook_manager.memory_manager.map_large_page_to_pt(guest_large_page_pa.as_u64())?; + vm.hook_manager.memory_manager.map_large_page_to_pt(guest_large_page_pa.as_u64())?; // 2. Check if the large page has already been split. If not, split it into 4KB pages. debug!("Checking if large page has already been split"); if vm.primary_ept.is_large_page(guest_page_pa.as_u64()) { // We must map the large page to the pre-allocated page table before accessing it. - let pre_alloc_pt = hook_manager + let pre_alloc_pt = vm + .hook_manager .memory_manager .get_page_table_as_mut(guest_large_page_pa.as_u64()) .ok_or(HypervisorError::PageTableNotFound)?; @@ -282,10 +232,10 @@ impl HookManager { // 3. Check if the guest page is already processed. If not, map the guest page to the shadow page. // Ensure the memory manager maintains a set of processed guest pages to track this mapping. - if !hook_manager.memory_manager.is_guest_page_processed(guest_page_pa.as_u64()) { + if !vm.hook_manager.memory_manager.is_guest_page_processed(guest_page_pa.as_u64()) { // We must map the guest page to the shadow page before accessing it. debug!("Mapping guest page and shadow page"); - hook_manager.memory_manager.map_guest_to_shadow_page( + vm.hook_manager.memory_manager.map_guest_to_shadow_page( guest_page_pa.as_u64(), guest_function_va, guest_function_pa.as_u64(), @@ -295,7 +245,7 @@ impl HookManager { // We must map the guest page to the shadow page before accessing it. let shadow_page_pa = PAddr::from( - hook_manager + vm.hook_manager .memory_manager .get_shadow_page_as_ptr(guest_page_pa.as_u64()) .ok_or(HypervisorError::ShadowPageNotFound)?, @@ -303,13 +253,12 @@ impl HookManager { // 4. Copy the guest page to the shadow page if it hasn't been copied already, ensuring the shadow page contains the original function code. debug!("Copying guest page to shadow page: {:#x}", guest_page_pa.as_u64()); - HookManager::unsafe_copy_guest_to_shadow(guest_page_pa, shadow_page_pa); + Self::unsafe_copy_guest_to_shadow(guest_page_pa, shadow_page_pa); // 5. Install the inline hook at the shadow function address if the hook type is `Function`. match ept_hook_type { EptHookType::Function(inline_hook_type) => { - let shadow_function_pa = - PAddr::from(HookManager::calculate_function_offset_in_host_shadow_page(shadow_page_pa, guest_function_pa)); + let shadow_function_pa = PAddr::from(Self::calculate_function_offset_in_host_shadow_page(shadow_page_pa, guest_function_pa)); debug!("Shadow Function PA: {:#x}", shadow_function_pa); debug!("Installing inline hook at shadow function PA: {:#x}", shadow_function_pa.as_u64()); @@ -320,7 +269,8 @@ impl HookManager { } } - let pre_alloc_pt = hook_manager + let pre_alloc_pt = vm + .hook_manager .memory_manager .get_page_table_as_mut(guest_large_page_pa.as_u64()) .ok_or(HypervisorError::PageTableNotFound)?; @@ -353,12 +303,7 @@ impl HookManager { /// # Returns /// /// * Returns `Ok(())` if the hook was successfully removed, `Err(HypervisorError)` otherwise. - pub fn ept_unhook_function( - hook_manager: &mut HookManager, - vm: &mut Vm, - guest_function_va: u64, - _ept_hook_type: EptHookType, - ) -> Result<(), HypervisorError> { + pub fn ept_unhook_function(vm: &mut Vm, guest_function_va: u64, _ept_hook_type: EptHookType) -> Result<(), HypervisorError> { debug!("Removing EPT hook for function at VA: {:#x}", guest_function_va); let guest_function_pa = PAddr::from(PhysicalAddress::pa_from_va(guest_function_va)); @@ -370,7 +315,8 @@ impl HookManager { let guest_large_page_pa = guest_function_pa.align_down_to_large_page(); debug!("Guest large page PA: {:#x}", guest_large_page_pa.as_u64()); - let pre_alloc_pt = hook_manager + let pre_alloc_pt = vm + .hook_manager .memory_manager .get_page_table_as_mut(guest_large_page_pa.as_u64()) .ok_or(HypervisorError::PageTableNotFound)?; @@ -469,77 +415,4 @@ impl HookManager { instruction_count } - - /// Sets the base address and size of the Windows kernel. - /// - /// # Arguments - /// - /// * `guest_va` - The virtual address of the guest. - /// - /// # Returns - /// - /// * `Ok(())` - The kernel base and size were set successfully. - pub fn set_kernel_base_and_size(&mut self, guest_va: u64) -> Result<(), HypervisorError> { - // Get the base address of ntoskrnl.exe. - self.ntoskrnl_base_va = unsafe { get_image_base_address(guest_va).ok_or(HypervisorError::FailedToGetImageBaseAddress)? }; - - // Get the physical address of ntoskrnl.exe using GUEST_CR3 and the virtual address. - self.ntoskrnl_base_pa = PhysicalAddress::pa_from_va(self.ntoskrnl_base_va); - - // Get the size of ntoskrnl.exe. - self.ntoskrnl_size = unsafe { get_size_of_image(self.ntoskrnl_base_pa as _).ok_or(HypervisorError::FailedToGetKernelSize)? } as u64; - - Ok(()) - } - - /// Manages an EPT hook for a kernel function, enabling or disabling it. - /// - /// # Arguments - /// - /// * `vm` - The virtual machine to install/remove the hook on. - /// * `function_hash` - The hash of the function to hook/unhook. - /// * `syscall_number` - The syscall number to use if `get_export_by_hash` fails. - /// * `ept_hook_type` - The type of EPT hook to use. - /// * `enable` - A boolean indicating whether to enable (true) or disable (false) the hook. - /// - /// # Returns - /// - /// * `Ok(())` - The hook was managed successfully. - /// * `Err(HypervisorError)` - If the hook management fails. - pub fn manage_kernel_ept_hook( - hook_manager: &mut HookManager, - vm: &mut Vm, - function_hash: u32, - syscall_number: u16, - ept_hook_type: EptHookType, - enable: bool, - ) -> Result<(), HypervisorError> { - let action = if enable { "Enabling" } else { "Disabling" }; - debug!("{} EPT hook for function: {}", action, function_hash); - - let function_va = unsafe { - if let Some(va) = get_export_by_hash(hook_manager.ntoskrnl_base_pa as _, hook_manager.ntoskrnl_base_va as _, function_hash) { - va - } else { - let ssdt_function_address = SsdtHook::find_ssdt_function_address( - syscall_number as _, - false, - hook_manager.ntoskrnl_base_pa as _, - hook_manager.ntoskrnl_size as _, - ); - match ssdt_function_address { - Ok(ssdt_hook) => ssdt_hook.guest_function_va as *mut u8, - Err(_) => return Err(HypervisorError::FailedToGetExport), - } - } - }; - - if enable { - HookManager::ept_hook_function(hook_manager, vm, function_va as _, function_hash, ept_hook_type)?; - } else { - HookManager::ept_unhook_function(hook_manager, vm, function_va as _, ept_hook_type)?; - } - - Ok(()) - } } diff --git a/hypervisor/src/intel/vm.rs b/hypervisor/src/intel/vm.rs index 8c2a657..7890da0 100644 --- a/hypervisor/src/intel/vm.rs +++ b/hypervisor/src/intel/vm.rs @@ -13,6 +13,7 @@ use { capture::GuestRegisters, descriptor::Descriptors, ept::Ept, + hooks::hook_manager::HookManager, paging::PageTables, support::{vmclear, vmptrld, vmread, vmxon}, vmcs::Vmcs, @@ -61,6 +62,9 @@ pub struct Vm { /// Flag indicating if the VM has been launched. pub has_launched: bool, + + /// The hook manager for the VM. + pub hook_manager: HookManager, } impl Vm { @@ -125,6 +129,9 @@ impl Vm { trace!("Initializing Launch State"); self.has_launched = false; + trace!("Initializing Hook Manager"); + self.hook_manager = HookManager::new()?; + trace!("VM created"); Ok(()) diff --git a/hypervisor/src/intel/vmexit/commands.rs b/hypervisor/src/intel/vmexit/commands.rs index 1571f73..8d3aa7c 100644 --- a/hypervisor/src/intel/vmexit/commands.rs +++ b/hypervisor/src/intel/vmexit/commands.rs @@ -1,10 +1,7 @@ use { crate::intel::{ addresses::PhysicalAddress, - hooks::{ - hook_manager::{EptHookType, HookManager}, - inline::InlineHookType, - }, + hooks::{hook_manager::EptHookType, inline::InlineHookType}, vm::Vm, }, log::*, @@ -38,26 +35,29 @@ pub fn handle_guest_commands(vm: &mut Vm) -> bool { let result = match client_data.command { Commands::EnableKernelEptHook | Commands::DisableKernelEptHook => { let enable = client_data.command == Commands::EnableKernelEptHook; + if let Some(mut kernel_hook) = vm.hook_manager.kernel_hook.take() { + let result = kernel_hook.manage_kernel_ept_hook( + vm, + client_data.function_hash, + client_data.syscall_number, + EptHookType::Function(InlineHookType::Vmcall), + enable, + ); - // Lock the global HookManager once - let mut hook_manager = HookManager::get_hook_manager_mut(); + // Put the kernel hook back in the box + vm.hook_manager.kernel_hook = Some(kernel_hook); - let result = HookManager::manage_kernel_ept_hook( - &mut hook_manager, - vm, - client_data.function_hash, - client_data.syscall_number, - EptHookType::Function(InlineHookType::Vmcall), - enable, - ); - - match result { - Ok(_) => true, - Err(e) => { - let action = if enable { "setup" } else { "disable" }; - error!("Failed to {} kernel EPT hook: {:?}", action, e); - false + match result { + Ok(_) => true, + Err(e) => { + let action = if enable { "setup" } else { "disable" }; + error!("Failed to {} kernel EPT hook: {:?}", action, e); + false + } } + } else { + error!("KernelHook is missing"); + false } } Commands::Invalid => { diff --git a/hypervisor/src/intel/vmexit/cpuid.rs b/hypervisor/src/intel/vmexit/cpuid.rs index 4b1d4a1..75046a2 100644 --- a/hypervisor/src/intel/vmexit/cpuid.rs +++ b/hypervisor/src/intel/vmexit/cpuid.rs @@ -5,7 +5,6 @@ use { crate::{ error::HypervisorError, intel::{ - hooks::hook_manager::HookManager, vm::Vm, vmexit::{commands::handle_guest_commands, ExitType}, }, @@ -92,6 +91,8 @@ const PASSWORD: u64 = 0xDEADBEEF; pub fn handle_cpuid(vm: &mut Vm) -> Result { trace!("Handling CPUID VM exit..."); + // const HYPERV_CPUID_LEAF_RANGE: RangeInclusive = 0x40000000..=0x4FFFFFFF; + let leaf = vm.guest_registers.rax as u32; let sub_leaf = vm.guest_registers.rcx as u32; @@ -124,46 +125,46 @@ pub fn handle_cpuid(vm: &mut Vm) -> Result { } leaf if leaf == CpuidLeaf::CacheInformation as u32 => { trace!("CPUID leaf 0x2 detected (Cache Information)."); - - // Lock the global HookManager once - let mut hook_manager = HookManager::get_hook_manager_mut(); - - if !hook_manager.has_cpuid_cache_info_been_called { + if vm.hook_manager.has_cpuid_cache_info_been_called == false { + /* // Test UEFI boot-time hooks - HookManager::manage_kernel_ept_hook( - &mut hook_manager, - vm, - crate::windows::nt::pe::djb2_hash("NtQuerySystemInformation".as_bytes()), - 0x0036, - crate::intel::hooks::hook_manager::EptHookType::Function(crate::intel::hooks::inline::InlineHookType::Vmcall), - true, - )?; - HookManager::manage_kernel_ept_hook( - &mut hook_manager, - vm, - crate::windows::nt::pe::djb2_hash("NtCreateFile".as_bytes()), - 0x0055, - crate::intel::hooks::hook_manager::EptHookType::Function(crate::intel::hooks::inline::InlineHookType::Vmcall), - true, - )?; - HookManager::manage_kernel_ept_hook( - &mut hook_manager, - vm, - crate::windows::nt::pe::djb2_hash("NtAllocateVirtualMemory".as_bytes()), - 0x18, - crate::intel::hooks::hook_manager::EptHookType::Function(crate::intel::hooks::inline::InlineHookType::Vmcall), - true, - )?; - HookManager::manage_kernel_ept_hook( - &mut hook_manager, - vm, - crate::windows::nt::pe::djb2_hash("NtQueryInformationProcess".as_bytes()), - 0x19, - crate::intel::hooks::hook_manager::EptHookType::Function(crate::intel::hooks::inline::InlineHookType::Vmcall), - true, - )?; - // Set the flag - hook_manager.has_cpuid_cache_info_been_called = true; + if let Some(mut kernel_hook) = vm.hook_manager.kernel_hook.take() { + kernel_hook.manage_kernel_ept_hook( + vm, + crate::windows::nt::pe::djb2_hash("NtQuerySystemInformation".as_bytes()), + 0x0036, + crate::intel::hooks::hook_manager::EptHookType::Function(crate::intel::hooks::inline::InlineHookType::Vmcall), + true, + )?; + kernel_hook.manage_kernel_ept_hook( + vm, + crate::windows::nt::pe::djb2_hash("NtCreateFile".as_bytes()), + 0x0055, + crate::intel::hooks::hook_manager::EptHookType::Function(crate::intel::hooks::inline::InlineHookType::Vmcall), + true, + )?; + kernel_hook.manage_kernel_ept_hook( + vm, + crate::windows::nt::pe::djb2_hash("NtAllocateVirtualMemory".as_bytes()), + 0x18, + crate::intel::hooks::hook_manager::EptHookType::Function(crate::intel::hooks::inline::InlineHookType::Vmcall), + true, + )?; + kernel_hook.manage_kernel_ept_hook( + vm, + crate::windows::nt::pe::djb2_hash("NtQueryInformationProcess".as_bytes()), + 0x19, + crate::intel::hooks::hook_manager::EptHookType::Function(crate::intel::hooks::inline::InlineHookType::Vmcall), + true, + )?; + // Place the kernel hook back in the box + vm.hook_manager.kernel_hook = Some(kernel_hook); + // Set the flag + vm.hook_manager.has_cpuid_cache_info_been_called = true; + } else { + return Err(HypervisorError::KernelHookMissing); + } + */ } } leaf if leaf == CpuidLeaf::ExtendedFeatureInformation as u32 => { diff --git a/hypervisor/src/intel/vmexit/ept.rs b/hypervisor/src/intel/vmexit/ept.rs index 01cfe49..9b74e3e 100644 --- a/hypervisor/src/intel/vmexit/ept.rs +++ b/hypervisor/src/intel/vmexit/ept.rs @@ -3,7 +3,6 @@ use { error::HypervisorError, intel::{ ept::AccessType, - hooks::hook_manager::HookManager, support::vmread, vm::Vm, vmerror::EptViolationExitQualification, @@ -32,23 +31,22 @@ pub fn handle_ept_violation(vm: &mut Vm) -> Result { let guest_large_page_pa = guest_page_pa.align_down_to_large_page(); trace!("Faulting Guest Large Page PA: {:#x}", guest_large_page_pa); - let mut hook_manager = HookManager::get_hook_manager_mut(); - - // dump_primary_ept_entries(vm, guest_pa, &mut hook_manager)?; - let shadow_page_pa = PAddr::from( - hook_manager + vm.hook_manager .memory_manager .get_shadow_page_as_ptr(guest_page_pa.as_u64()) .ok_or(HypervisorError::ShadowPageNotFound)?, ); trace!("Shadow Page PA: {:#x}", shadow_page_pa.as_u64()); - let pre_alloc_pt = hook_manager + let pre_alloc_pt = vm + .hook_manager .memory_manager .get_page_table_as_mut(guest_large_page_pa.as_u64()) .ok_or(HypervisorError::PageTableNotFound)?; + // dump_primary_ept_entries(vm, guest_pa)?; + let exit_qualification_value = vmread(vmcs::ro::EXIT_QUALIFICATION); let ept_violation_qualification = EptViolationExitQualification::from_exit_qualification(exit_qualification_value); trace!("Exit Qualification for EPT Violations: {:#?}", ept_violation_qualification); @@ -74,14 +72,14 @@ pub fn handle_ept_violation(vm: &mut Vm) -> Result { // We make this read-write-execute to allow the instruction performing a read-write // operation and then switch back to execute-only shadow page from handle_mtf vmexit - hook_manager.mtf_counter = Some(1); + vm.hook_manager.mtf_counter = Some(1); // Set the monitor trap flag and initialize counter to the number of overwritten instructions set_monitor_trap_flag(true); // Ensure all data mutations to vm are done before calling this. // This function will update the guest interrupt flag to prevent interrupts while single-stepping - update_guest_interrupt_flag(vm, &mut hook_manager, false)?; + update_guest_interrupt_flag(vm, false)?; } trace!("EPT Violation handled successfully!"); @@ -113,10 +111,8 @@ pub fn handle_ept_misconfiguration(vm: &mut Vm) -> Result Result Result<(), HypervisorError> { +pub fn dump_primary_ept_entries(vm: &mut Vm, faulting_guest_pa: u64) -> Result<(), HypervisorError> { // Log the critical error information. trace!("Faulting guest address: {:#x}", faulting_guest_pa); @@ -151,7 +147,8 @@ pub fn dump_primary_ept_entries(vm: &mut Vm, faulting_guest_pa: u64, hook_manage // Get the primary EPTs. let primary_ept = &mut vm.primary_ept; - let pre_alloc_pt = hook_manager + let pre_alloc_pt = vm + .hook_manager .memory_manager .get_page_table_as_mut(guest_large_page_pa.as_u64()) .ok_or(HypervisorError::PageTableNotFound)?; diff --git a/hypervisor/src/intel/vmexit/msr.rs b/hypervisor/src/intel/vmexit/msr.rs index 3ad28bd..7edf8bd 100644 --- a/hypervisor/src/intel/vmexit/msr.rs +++ b/hypervisor/src/intel/vmexit/msr.rs @@ -13,7 +13,6 @@ use { intel::{ bitmap::{MsrAccessType, MsrOperation}, events::EventInjection, - hooks::hook_manager::HookManager, support::{rdmsr, wrmsr}, vm::Vm, vmexit::ExitType, @@ -104,8 +103,15 @@ pub fn handle_msr_access(vm: &mut Vm, access_type: MsrAccessType) -> Result Result { trace!("Handling Monitor Trap Flag exit."); - let mut hook_manager = HookManager::get_hook_manager_mut(); - - if let Some(counter) = hook_manager.mtf_counter.as_mut() { + if let Some(counter) = vm.hook_manager.mtf_counter.as_mut() { trace!("Guest RIP: {:#x}", vm.guest_registers.rip); trace!("MTF counter before decrement: {}", *counter); *counter = counter.saturating_sub(1); // Safely decrement the counter @@ -50,14 +47,15 @@ pub fn handle_monitor_trap_flag(vm: &mut Vm) -> Result Result`: Ok if successful, Err if an error occurred during VMCS read/write operations. -pub fn update_guest_interrupt_flag(vm: &mut Vm, hook_manager: &mut HookManager, enable: bool) -> Result<(), HypervisorError> { +pub fn update_guest_interrupt_flag(vm: &mut Vm, enable: bool) -> Result<(), HypervisorError> { trace!("Updating guest interrupt flag..."); // Retrieve the current RFLAGS from the VMCS guest state area @@ -118,7 +115,7 @@ pub fn update_guest_interrupt_flag(vm: &mut Vm, hook_manager: &mut HookManager, trace!("Current guest RFLAGS before update: {:#x}", current_rflags_bits); // Optionally save the current RFLAGS to old_rflags before modification - hook_manager.old_rflags = Some(current_rflags_bits); + vm.hook_manager.old_rflags = Some(current_rflags_bits); // Set or clear the Interrupt Flag based on the 'enable' parameter if enable { @@ -141,12 +138,11 @@ pub fn update_guest_interrupt_flag(vm: &mut Vm, hook_manager: &mut HookManager, /// /// # Parameters /// * `vm`: A mutable reference to the virtual machine instance. -/// * `hook_manager`: A mutable reference to the hook manager instance. /// /// # Returns /// * `Result<(), HypervisorError>`: Ok if successful, Err if an error occurred during VMCS read/write operations. -pub fn restore_guest_interrupt_flag(vm: &mut Vm, hook_manager: &mut HookManager) -> Result<(), HypervisorError> { - if let Some(old_rflags_bits) = hook_manager.old_rflags { +pub fn restore_guest_interrupt_flag(vm: &mut Vm) -> Result<(), HypervisorError> { + if let Some(old_rflags_bits) = vm.hook_manager.old_rflags { trace!("Restoring guest RFLAGS to old value: {:#x}", old_rflags_bits); // Update VM register state first diff --git a/hypervisor/src/intel/vmexit/vmcall.rs b/hypervisor/src/intel/vmexit/vmcall.rs index 8ef2dec..992688e 100644 --- a/hypervisor/src/intel/vmexit/vmcall.rs +++ b/hypervisor/src/intel/vmexit/vmcall.rs @@ -59,10 +59,8 @@ pub fn handle_vmcall(vm: &mut Vm) -> Result { let guest_large_page_pa = guest_page_pa.align_down_to_large_page(); trace!("Guest Large Page PA: {:#x}", guest_large_page_pa.as_u64()); - let mut hook_manager = HookManager::get_hook_manager_mut(); - // Set the current hook to the EPT hook for handling MTF exit - let exit_type = if let Some(shadow_page_pa) = hook_manager.memory_manager.get_shadow_page_as_ptr(guest_page_pa.as_u64()) { + let exit_type = if let Some(shadow_page_pa) = vm.hook_manager.memory_manager.get_shadow_page_as_ptr(guest_page_pa.as_u64()) { trace!("Shadow Page PA: {:#x}", shadow_page_pa); trace!("Executing VMCALL hook on shadow page for EPT hook at PA: {:#x} with VA: {:#x}", guest_function_pa, vm.guest_registers.rip); @@ -71,7 +69,8 @@ pub fn handle_vmcall(vm: &mut Vm) -> Result { // crate::windows::log::log_nt_open_process_params(&vm.guest_registers); // crate::windows::log::log_mm_is_address_valid_params(&vm.guest_registers); - let pre_alloc_pt = hook_manager + let pre_alloc_pt = vm + .hook_manager .memory_manager .get_page_table_as_mut(guest_large_page_pa.as_u64()) .ok_or(HypervisorError::PageTableNotFound)?; @@ -80,7 +79,8 @@ pub fn handle_vmcall(vm: &mut Vm) -> Result { vm.primary_ept .swap_page(guest_page_pa.as_u64(), guest_page_pa.as_u64(), AccessType::READ_WRITE_EXECUTE, pre_alloc_pt)?; - let hook_info = hook_manager + let hook_info = vm + .hook_manager .memory_manager .get_hook_info_by_function_pa(guest_page_pa.as_u64(), guest_function_pa.as_u64()) .ok_or(HypervisorError::HookInfoNotFound)?; @@ -88,16 +88,17 @@ pub fn handle_vmcall(vm: &mut Vm) -> Result { debug!("Hook info: {:#x?}", hook_info); // Calculate the number of instructions in the function to set the MTF counter for restoring overwritten instructions by single-stepping. + // (NOTE: CHANGE HOOK SIZE IF YOU MOVE THIS INTO CPUID OR INT3) let instruction_count = unsafe { HookManager::calculate_instruction_count(guest_function_pa.as_u64(), HookManager::hook_size(hook_info.ept_hook_type)) as u64 }; - hook_manager.mtf_counter = Some(instruction_count); + vm.hook_manager.mtf_counter = Some(instruction_count); // Set the monitor trap flag and initialize counter to the number of overwritten instructions set_monitor_trap_flag(true); // Ensure all data mutations to vm are done before calling this. // This function will update the guest interrupt flag to prevent interrupts while single-stepping - update_guest_interrupt_flag(vm, &mut hook_manager, false)?; + update_guest_interrupt_flag(vm, false)?; Ok(ExitType::Continue) } else { diff --git a/hypervisor/src/windows/kernel.rs b/hypervisor/src/windows/kernel.rs new file mode 100644 index 0000000..8433818 --- /dev/null +++ b/hypervisor/src/windows/kernel.rs @@ -0,0 +1,114 @@ +use { + crate::{ + error::HypervisorError, + intel::{ + addresses::PhysicalAddress, + hooks::hook_manager::{EptHookType, HookManager}, + vm::Vm, + }, + windows::{ + nt::pe::{get_export_by_hash, get_image_base_address, get_size_of_image}, + ssdt::ssdt_hook::SsdtHook, + }, + }, + log::*, +}; + +/// Represents a hook into the Windows kernel, allowing redirection of functions and syscalls. +#[derive(Debug, Clone)] +#[allow(dead_code)] +pub struct KernelHook { + /// The base virtual address of ntoskrnl.exe. + ntoskrnl_base_va: u64, + + /// The base physical address of ntoskrnl.exe. + ntoskrnl_base_pa: u64, + + /// The size of ntoskrnl.exe. + ntoskrnl_size: u64, +} + +impl KernelHook { + /// Creates a new instance of `KernelHook`. + /// + /// # Returns + /// + /// * `Ok(Self)` - The new instance of `KernelHook`. + pub fn new() -> Result { + trace!("Initializing kernel hook"); + Ok(Self { + ntoskrnl_base_va: 0, + ntoskrnl_base_pa: 0, + ntoskrnl_size: 0, + }) + } + + /// Sets the base address and size of the Windows kernel. + /// + /// # Arguments + /// + /// * `guest_va` - The virtual address of the guest. + /// + /// # Returns + /// + /// * `Ok(())` - The kernel base and size were set successfully. + pub fn set_kernel_base_and_size(&mut self, guest_va: u64) -> Result<(), HypervisorError> { + // Get the base address of ntoskrnl.exe. + self.ntoskrnl_base_va = unsafe { get_image_base_address(guest_va).ok_or(HypervisorError::FailedToGetImageBaseAddress)? }; + + // Get the physical address of ntoskrnl.exe using GUEST_CR3 and the virtual address. + self.ntoskrnl_base_pa = PhysicalAddress::pa_from_va(self.ntoskrnl_base_va); + + // Get the size of ntoskrnl.exe. + self.ntoskrnl_size = unsafe { get_size_of_image(self.ntoskrnl_base_pa as _).ok_or(HypervisorError::FailedToGetKernelSize)? } as u64; + + Ok(()) + } + + /// Manages an EPT hook for a kernel function, enabling or disabling it. + /// + /// # Arguments + /// + /// * `vm` - The virtual machine to install/remove the hook on. + /// * `function_hash` - The hash of the function to hook/unhook. + /// * `syscall_number` - The syscall number to use if `get_export_by_hash` fails. + /// * `ept_hook_type` - The type of EPT hook to use. + /// * `enable` - A boolean indicating whether to enable (true) or disable (false) the hook. + /// + /// # Returns + /// + /// * `Ok(())` - The hook was managed successfully. + /// * `Err(HypervisorError)` - If the hook management fails. + pub fn manage_kernel_ept_hook( + &mut self, + vm: &mut Vm, + function_hash: u32, + syscall_number: u16, + ept_hook_type: EptHookType, + enable: bool, + ) -> Result<(), HypervisorError> { + let action = if enable { "Enabling" } else { "Disabling" }; + debug!("{} EPT hook for function: {}", action, function_hash); + + let function_va = unsafe { + if let Some(va) = get_export_by_hash(self.ntoskrnl_base_pa as _, self.ntoskrnl_base_va as _, function_hash) { + va + } else { + let ssdt_function_address = + SsdtHook::find_ssdt_function_address(syscall_number as _, false, self.ntoskrnl_base_pa as _, self.ntoskrnl_size as _); + match ssdt_function_address { + Ok(ssdt_hook) => ssdt_hook.guest_function_va as *mut u8, + Err(_) => return Err(HypervisorError::FailedToGetExport), + } + } + }; + + if enable { + HookManager::ept_hook_function(vm, function_va as _, function_hash, ept_hook_type)?; + } else { + HookManager::ept_unhook_function(vm, function_va as _, ept_hook_type)?; + } + + Ok(()) + } +} diff --git a/hypervisor/src/windows/mod.rs b/hypervisor/src/windows/mod.rs index 27d968e..d88426d 100644 --- a/hypervisor/src/windows/mod.rs +++ b/hypervisor/src/windows/mod.rs @@ -1,3 +1,4 @@ +pub mod kernel; pub mod log; pub mod nt; pub mod ssdt; diff --git a/uefi/Cargo.toml b/uefi/Cargo.toml index 9c6ba2b..bc9107b 100644 --- a/uefi/Cargo.toml +++ b/uefi/Cargo.toml @@ -16,5 +16,4 @@ log = { version = "0.4.20", default-features = false } # https://crates.io/crate once_cell = "1.19.0" # https://crates.io/crates/once_cell spin = "0.9" # https://crates.io/crates/spin com_logger = "0.1.1" # https://crates.io/crates/com_logger -lazy_static = { version = "1.4.0", features = ["spin_no_std"] } # https://crates.io/crates/lazy_static hypervisor = { path = "../hypervisor" } \ No newline at end of file diff --git a/uefi/src/setup.rs b/uefi/src/setup.rs index 85a755c..598086e 100644 --- a/uefi/src/setup.rs +++ b/uefi/src/setup.rs @@ -3,12 +3,18 @@ //! physical to virtual addressing. This is useful for ensuring a stable memory layout in hypervisor development. use { - hypervisor::{allocator::record_allocation, intel::hooks::hook_manager::GLOBAL_HOOK_MANAGER}, + alloc::boxed::Box, + core::sync::atomic::Ordering, + hypervisor::{ + allocator::{box_zeroed, record_allocation}, + intel::{hooks::hook_manager::DUMMY_PAGE_ADDRESS, page::Page}, + }, log::debug, uefi::{prelude::BootServices, proto::loaded_image::LoadedImage}, }; -/// Sets up the hypervisor by recording the image base, nullifying the relocation table, and initializing the global hook manager. +/// Sets up the hypervisor by recording the image base, creating a dummy page, +/// and nullifying the relocation table. /// /// # Arguments /// @@ -20,9 +26,9 @@ use { pub fn setup(boot_services: &BootServices) -> uefi::Result<()> { let loaded_image = boot_services.open_protocol_exclusive::(boot_services.image_handle())?; record_image_base(&loaded_image); + create_dummy_page(0xFF); let image_base = loaded_image.info().0 as u64; zap_relocations(image_base); - lazy_static::initialize(&GLOBAL_HOOK_MANAGER); Ok(()) } @@ -41,6 +47,21 @@ pub fn record_image_base(loaded_image: &LoadedImage) { record_allocation(image_base as usize, image_size as usize); } +/// Creates a dummy page filled with a specific byte value. +/// +/// This function allocates a page of memory and fills it with a specified byte value. +/// The address of the dummy page is stored in a global variable for access by multiple cores/threads/processors. +/// +/// # Arguments +/// +/// * `fill_byte` - The byte value to fill the page with. +pub fn create_dummy_page(fill_byte: u8) { + let mut dummy_page = unsafe { box_zeroed::() }; + dummy_page.0.iter_mut().for_each(|byte| *byte = fill_byte); + let dummy_page_pa = Box::into_raw(dummy_page) as u64; + DUMMY_PAGE_ADDRESS.store(dummy_page_pa, Ordering::SeqCst); +} + /// Nullifies the relocation table of the loaded UEFI image to prevent relocation. /// /// This function modifies the loaded image's PE header to zero out the relocation table, From dd43bc319739dd94a48be41d52eb0a9038894bc6 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Mon, 24 Jun 2024 13:28:22 +1200 Subject: [PATCH 82/87] deadlock temp fix --- uefi/src/stack.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/uefi/src/stack.rs b/uefi/src/stack.rs index d14470c..e7a6492 100644 --- a/uefi/src/stack.rs +++ b/uefi/src/stack.rs @@ -85,7 +85,7 @@ pub unsafe fn allocate_host_stack(layout: Layout) -> *mut u8 { // use `allocate_pool` directly. boot_services.allocate_pool(memory_type, size).map(|ptr| ptr).unwrap_or(ptr::null_mut()) }; - record_allocation(stack as usize, layout.size()); + // record_allocation(stack as usize, layout.size()); // This will cause a deadlock stack } From 8299d62ccb83f492301a9470e434cf17bb9a86c2 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Mon, 24 Jun 2024 18:11:55 +1200 Subject: [PATCH 83/87] Fixed memory stack allocator & memory tracker - Testing --- hypervisor/src/allocator.rs | 58 +--------------- hypervisor/src/global_const.rs | 28 ++++++-- hypervisor/src/intel/hooks/hook_manager.rs | 28 +++++--- hypervisor/src/lib.rs | 1 + hypervisor/src/tracker.rs | 81 ++++++++++++++++++++++ uefi/src/setup.rs | 3 +- uefi/src/stack.rs | 6 +- uefi/src/virtualize.rs | 4 +- 8 files changed, 137 insertions(+), 72 deletions(-) create mode 100644 hypervisor/src/tracker.rs diff --git a/hypervisor/src/allocator.rs b/hypervisor/src/allocator.rs index 5064fc0..db0e028 100644 --- a/hypervisor/src/allocator.rs +++ b/hypervisor/src/allocator.rs @@ -4,12 +4,11 @@ //! debugging information. use { - crate::global_const::HEAP_SIZE, - alloc::{boxed::Box, vec::Vec}, + crate::global_const::TOTAL_HEAP_SIZE, + alloc::boxed::Box, core::{ alloc::{GlobalAlloc, Layout}, ptr, - sync::atomic::{AtomicUsize, Ordering}, }, log::debug, spin::Mutex, @@ -17,7 +16,7 @@ use { /// Global allocator instance with a heap size of `HEAP_SIZE`. #[global_allocator] -pub static mut HEAP: ListHeap = ListHeap::new(); +pub static mut HEAP: ListHeap = ListHeap::new(); /// Initializes the linked list heap. pub unsafe fn heap_init() { @@ -332,54 +331,3 @@ unsafe impl GlobalAlloc for ListHeap { pub unsafe fn box_zeroed() -> Box { unsafe { Box::::new_zeroed().assume_init() } } - -/// Structure to store allocated memory ranges. -/// -/// This struct is used to keep track of memory allocations by storing the -/// start address and size of each allocated memory block. -#[derive(Debug)] -pub struct MemoryRange { - /// The start address of the allocated memory range. - pub start: usize, - /// The size of the allocated memory range. - pub size: usize, -} - -/// Global list to store allocated memory ranges. -/// -/// This global mutex-protected vector keeps track of all allocated memory ranges -/// for monitoring and debugging purposes. -pub static ALLOCATED_MEMORY: Mutex> = Mutex::new(Vec::new()); - -/// Atomic counter to track the total allocated memory size. -/// -/// This atomic counter is incremented whenever a new memory block is allocated -/// and provides a quick way to get the total allocated memory size. -static TOTAL_ALLOCATED_MEMORY: AtomicUsize = AtomicUsize::new(0); - -/// Records an allocation by adding the memory range to the global list and updating the total allocated memory. -/// -/// This function is called whenever a new memory block is allocated. It stores the start address -/// and size of the allocated memory in the global list and updates the total allocated memory counter. -/// -/// # Arguments -/// -/// * `start` - The start address of the allocated memory range. -/// * `size` - The size of the allocated memory range. -pub fn record_allocation(start: usize, size: usize) { - let mut allocated_memory = ALLOCATED_MEMORY.lock(); - allocated_memory.push(MemoryRange { start, size }); - TOTAL_ALLOCATED_MEMORY.fetch_add(size, Ordering::SeqCst); -} - -/// Prints the tracked memory allocations. -/// -/// This function iterates over all recorded memory allocations and prints the start address -/// and size of each allocated memory range. It also prints the total allocated memory size. -pub fn print_tracked_allocations() { - let allocated_memory = ALLOCATED_MEMORY.lock(); - for range in allocated_memory.iter() { - debug!("Allocated memory range: start = {:#x}, size = {:#x}", range.start, range.size); - } - debug!("Total allocated memory: {:#x} bytes", TOTAL_ALLOCATED_MEMORY.load(Ordering::SeqCst)); -} diff --git a/hypervisor/src/global_const.rs b/hypervisor/src/global_const.rs index 82cfbe7..5718f16 100644 --- a/hypervisor/src/global_const.rs +++ b/hypervisor/src/global_const.rs @@ -1,5 +1,25 @@ -/// The size of the heap in bytes. -pub const HEAP_SIZE: usize = 0x180000; +/// The default number of logical processors for a high-end desktop system. +/// +/// This value is set to 1 for testing purposes but can be adjusted up to 32 or more based on the system. +/// Adjusting this value will increase the total heap size accordingly. +const DEFAULT_LOGICAL_PROCESSORS: usize = 1; -/// The size of the stack in bytes. -pub const STACK_NUMBER_OF_PAGES: usize = 0x300; +/// The total size of the heap in bytes, shared among all processors. +/// +/// This base heap size is for 1 processor, calculated as: +/// 64 * 1024 * 1024 = 67,108,864 bytes (64 MB) +/// +/// For 32 processors, the heap size would be: +/// 64 * 1024 * 1024 * 32 = 2,147,483,648 bytes (2 GB) +/// +/// By adjusting the number of logical processors, the heap size will scale accordingly. +pub const TOTAL_HEAP_SIZE: usize = 64 * 1024 * 1024 * DEFAULT_LOGICAL_PROCESSORS; + +/// The number of pages for the stack per processor/core. +/// +/// Each processor/core gets its own stack. The default stack size per processor is calculated as: +/// STACK_PAGES_PER_PROCESSOR * BASE_PAGE_SIZE (4096 bytes per page) +/// 0x100 * 4096 = 1,048,576 bytes (1 MB) +/// +/// This stack size is allocated individually for each processor. +pub const STACK_PAGES_PER_PROCESSOR: usize = 0x100; diff --git a/hypervisor/src/intel/hooks/hook_manager.rs b/hypervisor/src/intel/hooks/hook_manager.rs index c924929..8c3ed8e 100644 --- a/hypervisor/src/intel/hooks/hook_manager.rs +++ b/hypervisor/src/intel/hooks/hook_manager.rs @@ -1,6 +1,5 @@ use { crate::{ - allocator::{print_tracked_allocations, ALLOCATED_MEMORY}, error::HypervisorError, intel::{ addresses::PhysicalAddress, @@ -13,6 +12,7 @@ use { invvpid::invvpid_all_contexts, vm::Vm, }, + tracker::{print_allocated_memory, ALLOCATED_MEMORY_HEAD}, windows::kernel::KernelHook, }, core::{ @@ -101,17 +101,29 @@ impl HookManager { /// Returns `Ok(())` if the hooks were successfully installed, `Err(HypervisorError)` otherwise. pub fn hide_hypervisor_memory(vm: &mut Vm, page_permissions: AccessType) -> Result<(), HypervisorError> { // Print the tracked memory allocations for debugging purposes. - print_tracked_allocations(); + print_allocated_memory(); - // Lock the allocated memory list to ensure thread safety. - let allocated_memory = ALLOCATED_MEMORY.lock(); + // Load the head of the allocated memory list. + let mut current_node = ALLOCATED_MEMORY_HEAD.load(Ordering::Acquire); - // Iterate through the recorded memory allocations and hide each page. - for range in allocated_memory.iter() { - for offset in (0..range.size).step_by(BASE_PAGE_SIZE) { - let guest_page_pa = range.start + offset; + // Iterate through the linked list and hide each memory range. + while !current_node.is_null() { + // Get a reference to the current node. + let node = unsafe { &*current_node }; + + // Print the memory range. + trace!("Memory Range: Start = {:#X}, Size = {}", node.start, node.size); + + // Iterate through the memory range in 4KB steps. + for offset in (0..node.size).step_by(BASE_PAGE_SIZE) { + let guest_page_pa = node.start + offset; + // Print the page address before hiding it. + trace!("Hiding memory page at: {:#X}", guest_page_pa); HookManager::ept_hide_hypervisor_memory(vm, PAddr::from(guest_page_pa).align_down_to_base_page().as_u64(), page_permissions)?; } + + // Move to the next node. + current_node = node.next.load(Ordering::Acquire); } Ok(()) diff --git a/hypervisor/src/lib.rs b/hypervisor/src/lib.rs index 5c5cbc9..ce6c9d8 100644 --- a/hypervisor/src/lib.rs +++ b/hypervisor/src/lib.rs @@ -18,5 +18,6 @@ pub mod error; pub mod global_const; pub mod intel; pub mod logger; +pub mod tracker; pub mod vmm; pub mod windows; diff --git a/hypervisor/src/tracker.rs b/hypervisor/src/tracker.rs new file mode 100644 index 0000000..262ffaa --- /dev/null +++ b/hypervisor/src/tracker.rs @@ -0,0 +1,81 @@ +use { + alloc::boxed::Box, + core::{ + ptr::null_mut, + sync::atomic::{AtomicPtr, Ordering}, + }, + log::trace, +}; + +/// Structure to represent a memory range. +/// +/// This struct holds the start address and size of an allocated memory range. +/// It also includes an atomic pointer to the next memory range in a linked list. +#[derive(Debug)] +pub struct MemoryRangeTracker { + pub start: usize, + pub size: usize, + pub next: AtomicPtr, +} + +/// Global atomic pointer to the head of the allocated memory list. +/// +/// This static variable holds the head of the linked list that keeps track of all allocated memory ranges. +/// It is initialized to a null pointer. +pub static ALLOCATED_MEMORY_HEAD: AtomicPtr = AtomicPtr::new(null_mut()); + +/// Records an allocation by adding the memory range to the global list. +/// +/// This function is called whenever a new memory block is allocated. It stores the start address +/// and size of the allocated memory in the global list. +/// +/// # Arguments +/// +/// * `start` - The start address of the allocated memory range. +/// * `size` - The size of the allocated memory range. +pub fn record_allocation(start: usize, size: usize) { + // Create a new memory range node. + let new_node = Box::into_raw(Box::new(MemoryRangeTracker { + start, + size, + next: AtomicPtr::new(null_mut()), + })); + + // Update the head of the list in a lock-free manner. + let mut current_head = ALLOCATED_MEMORY_HEAD.load(Ordering::Acquire); + loop { + // Set the new node's next pointer to the current head. + unsafe { (*new_node).next.store(current_head, Ordering::Release) }; + + // Attempt to update the head to the new node. + match ALLOCATED_MEMORY_HEAD.compare_exchange(current_head, new_node, Ordering::AcqRel, Ordering::Acquire) { + // If the head was successfully updated, break out of the loop. + Ok(_) => break, + // If the head was changed by another thread, update current_head and retry. + Err(head) => current_head = head, + } + } +} + +/// Prints the entire allocated memory range one by one. +/// +/// This function iterates through the linked list of allocated memory ranges +/// and prints the start address and size of each range. +pub fn print_allocated_memory() { + // Load the head of the allocated memory list. + let mut current_node = ALLOCATED_MEMORY_HEAD.load(Ordering::Acquire); + + // Iterate through the linked list and print each memory range. + while !current_node.is_null() { + unsafe { + // Get a reference to the current node. + let node = &*current_node; + + // Print the memory range. + trace!("Memory Range: Start = {:#X}, Size = {}", node.start, node.size); + + // Move to the next node. + current_node = node.next.load(Ordering::Acquire); + } + } +} diff --git a/uefi/src/setup.rs b/uefi/src/setup.rs index 598086e..819f4a1 100644 --- a/uefi/src/setup.rs +++ b/uefi/src/setup.rs @@ -6,8 +6,9 @@ use { alloc::boxed::Box, core::sync::atomic::Ordering, hypervisor::{ - allocator::{box_zeroed, record_allocation}, + allocator::box_zeroed, intel::{hooks::hook_manager::DUMMY_PAGE_ADDRESS, page::Page}, + tracker::record_allocation, }, log::debug, uefi::{prelude::BootServices, proto::loaded_image::LoadedImage}, diff --git a/uefi/src/stack.rs b/uefi/src/stack.rs index e7a6492..b6c1f8f 100644 --- a/uefi/src/stack.rs +++ b/uefi/src/stack.rs @@ -5,7 +5,7 @@ use { ptr, sync::atomic::{AtomicPtr, AtomicU32, Ordering}, }, - hypervisor::allocator::record_allocation, + hypervisor::tracker::record_allocation, uefi::{ prelude::{Boot, BootServices, SystemTable}, proto::loaded_image::LoadedImage, @@ -85,7 +85,9 @@ pub unsafe fn allocate_host_stack(layout: Layout) -> *mut u8 { // use `allocate_pool` directly. boot_services.allocate_pool(memory_type, size).map(|ptr| ptr).unwrap_or(ptr::null_mut()) }; - // record_allocation(stack as usize, layout.size()); // This will cause a deadlock + + // Record the allocation without causing a deadlock. + record_allocation(stack as usize, layout.size()); stack } diff --git a/uefi/src/virtualize.rs b/uefi/src/virtualize.rs index ca55b9e..ebf0513 100644 --- a/uefi/src/virtualize.rs +++ b/uefi/src/virtualize.rs @@ -7,7 +7,7 @@ use { crate::stack::allocate_host_stack, core::{alloc::Layout, arch::global_asm, intrinsics::copy_nonoverlapping}, hypervisor::{ - global_const::STACK_NUMBER_OF_PAGES, + global_const::STACK_PAGES_PER_PROCESSOR, intel::{capture::GuestRegisters, page::Page}, vmm::start_hypervisor, }, @@ -22,7 +22,7 @@ use { pub fn virtualize_system(guest_registers: &GuestRegisters) -> ! { debug!("Allocating stack space for host"); - let layout = Layout::array::(STACK_NUMBER_OF_PAGES).unwrap(); + let layout = Layout::array::(STACK_PAGES_PER_PROCESSOR).unwrap(); let stack = unsafe { allocate_host_stack(layout) }; let size = layout.size(); From 2aa69c37c912851d565eccd31f7db5e76950b285 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Mon, 24 Jun 2024 18:16:41 +1200 Subject: [PATCH 84/87] Update global_const.rs --- hypervisor/src/global_const.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/hypervisor/src/global_const.rs b/hypervisor/src/global_const.rs index 5718f16..52900b4 100644 --- a/hypervisor/src/global_const.rs +++ b/hypervisor/src/global_const.rs @@ -19,7 +19,10 @@ pub const TOTAL_HEAP_SIZE: usize = 64 * 1024 * 1024 * DEFAULT_LOGICAL_PROCESSORS /// /// Each processor/core gets its own stack. The default stack size per processor is calculated as: /// STACK_PAGES_PER_PROCESSOR * BASE_PAGE_SIZE (4096 bytes per page) -/// 0x100 * 4096 = 1,048,576 bytes (1 MB) +/// 0x4000 * 4096 = 67,108,864 bytes (64 MB) /// /// This stack size is allocated individually for each processor. -pub const STACK_PAGES_PER_PROCESSOR: usize = 0x100; +pub const STACK_PAGES_PER_PROCESSOR: usize = 0x4000; + +/// The size of each memory page in bytes. +pub const BASE_PAGE_SIZE: usize = 4096; From 64e8c0be3f7b4285470a943a78c24a5d667239cd Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Mon, 24 Jun 2024 18:18:00 +1200 Subject: [PATCH 85/87] Update global_const.rs --- hypervisor/src/global_const.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/hypervisor/src/global_const.rs b/hypervisor/src/global_const.rs index 52900b4..09e2ff8 100644 --- a/hypervisor/src/global_const.rs +++ b/hypervisor/src/global_const.rs @@ -23,6 +23,3 @@ pub const TOTAL_HEAP_SIZE: usize = 64 * 1024 * 1024 * DEFAULT_LOGICAL_PROCESSORS /// /// This stack size is allocated individually for each processor. pub const STACK_PAGES_PER_PROCESSOR: usize = 0x4000; - -/// The size of each memory page in bytes. -pub const BASE_PAGE_SIZE: usize = 4096; From 99551cab388a4f77653ce22db238b561284973ad Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Mon, 24 Jun 2024 18:50:19 +1200 Subject: [PATCH 86/87] Update global_const.rs --- hypervisor/src/global_const.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/hypervisor/src/global_const.rs b/hypervisor/src/global_const.rs index 09e2ff8..6b3b9e6 100644 --- a/hypervisor/src/global_const.rs +++ b/hypervisor/src/global_const.rs @@ -2,18 +2,21 @@ /// /// This value is set to 1 for testing purposes but can be adjusted up to 32 or more based on the system. /// Adjusting this value will increase the total heap size accordingly. -const DEFAULT_LOGICAL_PROCESSORS: usize = 1; +const DEFAULT_LOGICAL_PROCESSORS: usize = 32; /// The total size of the heap in bytes, shared among all processors. /// /// This base heap size is for 1 processor, calculated as: -/// 64 * 1024 * 1024 = 67,108,864 bytes (64 MB) +/// 16 * 1024 * 1024 = 16,777,216 bytes (16 MB) +/// +/// For 16 processors, the heap size would be: +/// 16 * 1024 * 1024 * 16 = 268,435,456 bytes (256 MB) /// /// For 32 processors, the heap size would be: -/// 64 * 1024 * 1024 * 32 = 2,147,483,648 bytes (2 GB) +/// 16 * 1024 * 1024 * 32 = 536,870,912 bytes (512 MB) /// /// By adjusting the number of logical processors, the heap size will scale accordingly. -pub const TOTAL_HEAP_SIZE: usize = 64 * 1024 * 1024 * DEFAULT_LOGICAL_PROCESSORS; +pub const TOTAL_HEAP_SIZE: usize = 16 * 1024 * 1024 * DEFAULT_LOGICAL_PROCESSORS; /// The number of pages for the stack per processor/core. /// From 510f8393d7bd95c71e6c4bae8d42113956c85074 Mon Sep 17 00:00:00 2001 From: memN0ps <89628341+memN0ps@users.noreply.github.com> Date: Mon, 24 Jun 2024 19:30:07 +1200 Subject: [PATCH 87/87] Update global_const.rs --- hypervisor/src/global_const.rs | 54 ++++++++++++++++++++++++---------- 1 file changed, 39 insertions(+), 15 deletions(-) diff --git a/hypervisor/src/global_const.rs b/hypervisor/src/global_const.rs index 6b3b9e6..e6b35cc 100644 --- a/hypervisor/src/global_const.rs +++ b/hypervisor/src/global_const.rs @@ -1,28 +1,52 @@ /// The default number of logical processors for a high-end desktop system. /// -/// This value is set to 1 for testing purposes but can be adjusted up to 32 or more based on the system. +/// This value is set to 1 for testing purposes but can be adjusted up to 64 or more based on the system. /// Adjusting this value will increase the total heap size accordingly. -const DEFAULT_LOGICAL_PROCESSORS: usize = 32; +const DEFAULT_LOGICAL_PROCESSORS: usize = 8; + +/// The number of pages for the stack per processor/core. +/// +/// Each processor/core gets its own stack. The default stack size per processor is calculated as: +/// STACK_PAGES_PER_PROCESSOR * BASE_PAGE_SIZE (4096 bytes per page) +/// 0x4000 * 4096 = 67,108,864 bytes (64 MB) +/// +/// This stack size is allocated individually for each processor. +pub const STACK_PAGES_PER_PROCESSOR: usize = 0x4000; + +/// The size of a page table in bytes. +const PAGE_TABLE_SIZE: usize = 2 * 1024 * 1024; // 2 MB + +/// The total number of page tables needed per processor to split the stack. +/// +/// This is calculated as: +/// STACK_SIZE / PAGE_TABLE_SIZE +/// 64 MB / 2 MB = 32 page tables +const PAGE_TABLES_PER_PROCESSOR: usize = 32; + +/// The padding added to the heap size for other allocations (e.g., vectors, boxes). +/// +/// This is an additional memory buffer to ensure there's enough space for other dynamic allocations. +const HEAP_PADDING: usize = 8 * 1024 * 1024; // 8 MB /// The total size of the heap in bytes, shared among all processors. /// /// This base heap size is for 1 processor, calculated as: -/// 16 * 1024 * 1024 = 16,777,216 bytes (16 MB) +/// 32 * 2 * 1024 * 1024 + 8 * 1024 * 1024 = 72,237,568 bytes (68 MB) +/// +/// For 4 processors, the heap size would be: +/// (32 * 2 * 1024 * 1024 * 4) + 8 * 1024 * 1024 = 288,957,440 bytes (276 MB) +/// +/// For 8 processors, the heap size would be: +/// (32 * 2 * 1024 * 1024 * 8) + 8 * 1024 * 1024 = 577,874,944 bytes (552 MB) /// /// For 16 processors, the heap size would be: -/// 16 * 1024 * 1024 * 16 = 268,435,456 bytes (256 MB) +/// (32 * 2 * 1024 * 1024 * 16) + 8 * 1024 * 1024 = 1,155,685,888 bytes (1.08 GB) /// /// For 32 processors, the heap size would be: -/// 16 * 1024 * 1024 * 32 = 536,870,912 bytes (512 MB) +/// (32 * 2 * 1024 * 1024 * 32) + 8 * 1024 * 1024 = 2,311,371,776 bytes (2.16 GB) /// -/// By adjusting the number of logical processors, the heap size will scale accordingly. -pub const TOTAL_HEAP_SIZE: usize = 16 * 1024 * 1024 * DEFAULT_LOGICAL_PROCESSORS; - -/// The number of pages for the stack per processor/core. +/// For 64 processors, the heap size would be: +/// (32 * 2 * 1024 * 1024 * 64) + 8 * 1024 * 1024 = 4,622,743,552 bytes (4.32 GB) /// -/// Each processor/core gets its own stack. The default stack size per processor is calculated as: -/// STACK_PAGES_PER_PROCESSOR * BASE_PAGE_SIZE (4096 bytes per page) -/// 0x4000 * 4096 = 67,108,864 bytes (64 MB) -/// -/// This stack size is allocated individually for each processor. -pub const STACK_PAGES_PER_PROCESSOR: usize = 0x4000; +/// By adjusting the number of logical processors, the heap size will scale accordingly. +pub const TOTAL_HEAP_SIZE: usize = (PAGE_TABLES_PER_PROCESSOR * PAGE_TABLE_SIZE * DEFAULT_LOGICAL_PROCESSORS) + HEAP_PADDING;