diff --git a/kernel/src/boot_stage2.rs b/kernel/src/boot_stage2.rs index cf2d187b7..4edbee8d0 100644 --- a/kernel/src/boot_stage2.rs +++ b/kernel/src/boot_stage2.rs @@ -81,6 +81,16 @@ global_asm!( decl %ecx jnz 1b + /* Insert a self-map entry */ + movl $pgtable, %edi + movl %edi, %eax + orl $0x63, %eax + /* The value 0xF68 is equivalent to 8 * PGTABLE_LVL3_IDX_PTE_SELFMAP */ + movl %eax, 0xF68(%edi) + movl $0x80000000, %eax + orl %edx, %eax + movl %eax, 0xF6C(%edi) + /* Signal APs */ movl $setup_flag, %edi movl $1, (%edi) @@ -99,11 +109,13 @@ global_asm!( bts $5, %eax movl %eax, %cr4 - /* Enable long mode, EFER.LME. */ + /* Enable long mode, EFER.LME. Also ensure NXE is set. */ movl $0xc0000080, %ecx rdmsr - bts $8, %eax - jc 2f + movl %eax, %ebx + orl $0x900, %eax + cmp %eax, %ebx + jz 2f wrmsr 2: diff --git a/kernel/src/cpu/efer.rs b/kernel/src/cpu/efer.rs index fa66c2d92..681db39a1 100644 --- a/kernel/src/cpu/efer.rs +++ b/kernel/src/cpu/efer.rs @@ -4,9 +4,7 @@ // // Author: Joerg Roedel -use super::features::cpu_has_nx; use super::msr::{read_msr, write_msr, EFER}; -use crate::platform::SvsmPlatform; use bitflags::bitflags; bitflags! { @@ -34,15 +32,3 @@ pub fn write_efer(efer: EFERFlags) { let val = efer.bits(); write_msr(EFER, val); } - -pub fn efer_init(platform: &dyn SvsmPlatform) { - let mut efer = read_efer(); - - // All processors that are capable of virtualization will support - // no-execute table entries, so there is no reason to support any processor - // that does not enumerate NX capability. - assert!(cpu_has_nx(platform), "CPU does not support NX"); - - efer.insert(EFERFlags::NXE); - write_efer(efer); -} diff --git a/kernel/src/cpu/features.rs b/kernel/src/cpu/features.rs index 25b6fd220..a96b830ce 100644 --- a/kernel/src/cpu/features.rs +++ b/kernel/src/cpu/features.rs @@ -6,18 +6,8 @@ use crate::platform::SvsmPlatform; -const X86_FEATURE_NX: u32 = 20; const X86_FEATURE_PGE: u32 = 13; -pub fn cpu_has_nx(platform: &dyn SvsmPlatform) -> bool { - let ret = platform.cpuid(0x80000001); - - match ret { - None => false, - Some(c) => (c.edx >> X86_FEATURE_NX) & 1 == 1, - } -} - pub fn cpu_has_pge(platform: &dyn SvsmPlatform) -> bool { let ret = platform.cpuid(0x00000001); diff --git a/kernel/src/igvm_params.rs b/kernel/src/igvm_params.rs index 7c6e90c6e..d6461bbe3 100644 --- a/kernel/src/igvm_params.rs +++ b/kernel/src/igvm_params.rs @@ -12,7 +12,7 @@ use crate::cpu::efer::EFERFlags; use crate::error::SvsmError; use crate::fw_meta::SevFWMetaData; use crate::mm::{GuestPtr, PerCPUPageMappingGuard, PAGE_SIZE}; -use crate::platform::{PageStateChangeOp, SVSM_PLATFORM}; +use crate::platform::{PageStateChangeOp, PageValidateOp, SVSM_PLATFORM}; use crate::types::PageSize; use crate::utils::MemoryRegion; use alloc::vec::Vec; @@ -173,7 +173,7 @@ impl IgvmParams<'_> { } let mem_map_va_region = MemoryRegion::::new(mem_map_va, mem_map_region.len()); - platform.validate_page_range(mem_map_va_region)?; + platform.validate_virtual_page_range(mem_map_va_region, PageValidateOp::Validate)?; // Calculate the maximum number of entries that can be inserted. let max_entries = fw_info.memory_map_page_count as usize * PAGE_SIZE diff --git a/kernel/src/mm/address_space.rs b/kernel/src/mm/address_space.rs index 005df6e45..a3005669e 100644 --- a/kernel/src/mm/address_space.rs +++ b/kernel/src/mm/address_space.rs @@ -7,6 +7,9 @@ use crate::address::{PhysAddr, VirtAddr}; use crate::utils::immut_after_init::ImmutAfterInitCell; +#[cfg(target_os = "none")] +use crate::mm::pagetable::PageTable; + #[derive(Debug, Copy, Clone)] #[allow(dead_code)] pub struct FixedAddressMappingRange { @@ -38,16 +41,6 @@ impl FixedAddressMappingRange { } } } - - #[cfg(target_os = "none")] - fn virt_to_phys(&self, vaddr: VirtAddr) -> Option { - if (vaddr < self.virt_start) || (vaddr >= self.virt_end) { - None - } else { - let offset: usize = vaddr - self.virt_start; - Some(self.phys_start + offset) - } - } } #[derive(Debug, Copy, Clone)] @@ -74,16 +67,12 @@ pub fn init_kernel_mapping_info( #[cfg(target_os = "none")] pub fn virt_to_phys(vaddr: VirtAddr) -> PhysAddr { - if let Some(addr) = FIXED_MAPPING.kernel_mapping.virt_to_phys(vaddr) { - return addr; - } - if let Some(ref mapping) = FIXED_MAPPING.heap_mapping { - if let Some(addr) = mapping.virt_to_phys(vaddr) { - return addr; + match PageTable::virt_to_phys(vaddr) { + Some(paddr) => paddr, + None => { + panic!("Invalid virtual address {:#018x}", vaddr); } } - - panic!("Invalid virtual address {:#018x}", vaddr); } #[cfg(target_os = "none")] @@ -203,6 +192,11 @@ pub const SVSM_PERTASK_END: VirtAddr = SVSM_PERTASK_BASE.const_add(SIZE_LEVEL3); /// Kernel stack for a task pub const SVSM_PERTASK_STACK_BASE: VirtAddr = SVSM_PERTASK_BASE; +/// Page table self-map level 3 index +pub const PGTABLE_LVL3_IDX_PTE_SELFMAP: usize = 493; + +pub const SVSM_PTE_BASE: VirtAddr = virt_from_idx(PGTABLE_LVL3_IDX_PTE_SELFMAP); + // // User-space mapping constants // diff --git a/kernel/src/mm/page_visibility.rs b/kernel/src/mm/page_visibility.rs index b5c015a65..0d0e30d3d 100644 --- a/kernel/src/mm/page_visibility.rs +++ b/kernel/src/mm/page_visibility.rs @@ -16,7 +16,7 @@ use crate::mm::validate::{ valid_bitmap_clear_valid_4k, valid_bitmap_set_valid_4k, valid_bitmap_valid_addr, }; use crate::mm::{virt_to_phys, PageBox}; -use crate::platform::{PageStateChangeOp, SVSM_PLATFORM}; +use crate::platform::{PageStateChangeOp, PageValidateOp, SVSM_PLATFORM}; use crate::protocols::errors::SvsmReqError; use crate::types::{PageSize, PAGE_SIZE}; use crate::utils::MemoryRegion; @@ -39,7 +39,10 @@ unsafe fn make_page_shared(vaddr: VirtAddr) -> Result<(), SvsmError> { let platform = SVSM_PLATFORM.as_dyn_ref(); // Revoke page validation before changing page state. - platform.invalidate_page_range(MemoryRegion::new(vaddr, PAGE_SIZE))?; + platform.validate_virtual_page_range( + MemoryRegion::new(vaddr, PAGE_SIZE), + PageValidateOp::Invalidate, + )?; let paddr = virt_to_phys(vaddr); if valid_bitmap_valid_addr(paddr) { valid_bitmap_clear_valid_4k(paddr); @@ -83,8 +86,11 @@ unsafe fn make_page_private(vaddr: VirtAddr) -> Result<(), SvsmError> { PageStateChangeOp::Private, )?; - // Revoke page validation before changing page state. - platform.validate_page_range(MemoryRegion::new(vaddr, PAGE_SIZE))?; + // Validate the page now that it is private again. + platform.validate_virtual_page_range( + MemoryRegion::new(vaddr, PAGE_SIZE), + PageValidateOp::Validate, + )?; if valid_bitmap_valid_addr(paddr) { valid_bitmap_set_valid_4k(paddr); } diff --git a/kernel/src/mm/pagetable.rs b/kernel/src/mm/pagetable.rs index 3f6e40252..c70a97911 100644 --- a/kernel/src/mm/pagetable.rs +++ b/kernel/src/mm/pagetable.rs @@ -11,8 +11,10 @@ use crate::cpu::flush_tlb_global_sync; use crate::cpu::idt::common::PageFaultError; use crate::cpu::registers::RFlags; use crate::error::SvsmError; -use crate::mm::PageBox; -use crate::mm::{phys_to_virt, virt_to_phys, PGTABLE_LVL3_IDX_SHARED}; +use crate::mm::{ + phys_to_virt, virt_to_phys, PageBox, PGTABLE_LVL3_IDX_PTE_SELFMAP, PGTABLE_LVL3_IDX_SHARED, + SVSM_PTE_BASE, +}; use crate::platform::SvsmPlatform; use crate::types::{PageSize, PAGE_SIZE, PAGE_SIZE_2M}; use crate::utils::immut_after_init::{ImmutAfterInitCell, ImmutAfterInitResult}; @@ -53,7 +55,6 @@ pub fn paging_init_early(platform: &dyn SvsmPlatform) -> ImmutAfterInitResult<() init_encrypt_mask(platform)?; let mut feature_mask = PTEntryFlags::all(); - feature_mask.remove(PTEntryFlags::NX); feature_mask.remove(PTEntryFlags::GLOBAL); FEATURE_MASK.reinit(&feature_mask) } @@ -361,6 +362,17 @@ impl PTEntry { let addr = PhysAddr::from(self.0.bits() & 0x000f_ffff_ffff_f000); strip_confidentiality_bits(addr) } + + /// Read a page table entry from the specified virtual address. + /// + /// # Safety + /// + /// Reads from an arbitrary virtual address, making this essentially a + /// raw pointer read. The caller must be certain to calculate the correct + /// address. + pub unsafe fn read_pte(vaddr: VirtAddr) -> Self { + *vaddr.as_ptr::() + } } /// A pagetable page with multiple entries. @@ -457,13 +469,33 @@ impl PageTable { virt_to_phys(pgtable) } + /// Allocate a new page table root. + /// + /// # Errors + /// Returns [`SvsmError`] if the page cannot be allocated. + pub fn allocate_new() -> Result, SvsmError> { + let mut pgtable = PageBox::try_new(PageTable::default())?; + let paddr = virt_to_phys(pgtable.vaddr()); + + // Set the self-map entry. + let entry = &mut pgtable.root[PGTABLE_LVL3_IDX_PTE_SELFMAP]; + let flags = PTEntryFlags::PRESENT + | PTEntryFlags::WRITABLE + | PTEntryFlags::ACCESSED + | PTEntryFlags::DIRTY + | PTEntryFlags::NX; + entry.set(make_private_address(paddr), flags); + + Ok(pgtable) + } + /// Clone the shared part of the page table; excluding the private /// parts. /// /// # Errors /// Returns [`SvsmError`] if the page cannot be allocated. pub fn clone_shared(&self) -> Result, SvsmError> { - let mut pgtable = PageBox::try_new(PageTable::default())?; + let mut pgtable = Self::allocate_new()?; pgtable.root.entries[PGTABLE_LVL3_IDX_SHARED] = self.root.entries[PGTABLE_LVL3_IDX_SHARED]; Ok(pgtable) } @@ -561,6 +593,72 @@ impl PageTable { Self::walk_addr_lvl3(&mut self.root, vaddr) } + /// Calculate the virtual address of a PTE in the self-map, which maps a + /// specified virtual address. + /// + /// # Parameters + /// - `vaddr': The virtual address whose PTE should be located. + /// + /// # Returns + /// The virtual address of the PTE. + fn get_pte_address(vaddr: VirtAddr) -> VirtAddr { + SVSM_PTE_BASE + ((usize::from(vaddr) & 0x0000_FFFF_FFFF_F000) >> 9) + } + + /// Perform a virtual to physical translation using the self-map. + /// + /// # Parameters + /// - `vaddr': The virtual address to transalte. + /// + /// # Returns + /// Some(PhysAddr) if the virtual address is valid. + /// None if the virtual address is not valid. + pub fn virt_to_phys(vaddr: VirtAddr) -> Option { + // Calculate the virtual addresses of each level of the paging + // hierarchy in the self-map. + let pte_addr = Self::get_pte_address(vaddr); + let pde_addr = Self::get_pte_address(pte_addr); + let pdpe_addr = Self::get_pte_address(pde_addr); + let pml4e_addr = Self::get_pte_address(pdpe_addr); + + // Check each entry in the paging hierarchy to determine whether this + // address is mapped. Because the hierarchy is read from the top + // down using self-map addresses that were calculated correctly, + // the reads are safe to perform. + let pml4e = unsafe { PTEntry::read_pte(pml4e_addr) }; + if !pml4e.present() { + return None; + } + + // There is no need to check for a large page in the PML4E because + // the architecture does not support the large bit at the top-level + // entry. If a large page is detected at a lower level of the + // hierarchy, the low bits from the virtual address must be combined + // with the physical address from the PDE/PDPE. + let pdpe = unsafe { PTEntry::read_pte(pdpe_addr) }; + if !pdpe.present() { + return None; + } + if pdpe.huge() { + return Some(pdpe.address() + (usize::from(vaddr) & 0x3FFF_FFFF)); + } + + let pde = unsafe { PTEntry::read_pte(pde_addr) }; + if !pde.present() { + return None; + } + if pde.huge() { + return Some(pde.address() + (usize::from(vaddr) & 0x001F_FFFF)); + } + + let pte = unsafe { PTEntry::read_pte(pte_addr) }; + if pte.present() { + Some(pte.address() + (usize::from(vaddr) & 0xFFF)) + } else { + None + } + } + fn alloc_pte_lvl3(entry: &mut PTEntry, vaddr: VirtAddr, size: PageSize) -> Mapping<'_> { let flags = entry.flags(); @@ -576,7 +674,7 @@ impl PageTable { | PTEntryFlags::WRITABLE | PTEntryFlags::USER | PTEntryFlags::ACCESSED; - entry.set(paddr, flags); + entry.set(make_private_address(paddr), flags); let idx = Self::index::<2>(vaddr); Self::alloc_pte_lvl2(&mut page[idx], vaddr, size) @@ -597,7 +695,7 @@ impl PageTable { | PTEntryFlags::WRITABLE | PTEntryFlags::USER | PTEntryFlags::ACCESSED; - entry.set(paddr, flags); + entry.set(make_private_address(paddr), flags); let idx = Self::index::<1>(vaddr); Self::alloc_pte_lvl1(&mut page[idx], vaddr, size) @@ -618,7 +716,7 @@ impl PageTable { | PTEntryFlags::WRITABLE | PTEntryFlags::USER | PTEntryFlags::ACCESSED; - entry.set(paddr, flags); + entry.set(make_private_address(paddr), flags); let idx = Self::index::<0>(vaddr); Mapping::Level0(&mut page[idx]) @@ -1031,9 +1129,7 @@ impl PageTable { | PTEntryFlags::USER | PTEntryFlags::ACCESSED; let entry = &mut self.root[idx]; - // The C bit is not required here because all page table fetches are - // made as C=1. - entry.set(paddr, flags); + entry.set(make_private_address(paddr), flags); } } } diff --git a/kernel/src/platform/mod.rs b/kernel/src/platform/mod.rs index 5e3f5cd93..e9171d451 100644 --- a/kernel/src/platform/mod.rs +++ b/kernel/src/platform/mod.rs @@ -41,6 +41,12 @@ pub enum PageStateChangeOp { Unsmash, } +#[derive(Debug, Clone, Copy)] +pub enum PageValidateOp { + Validate, + Invalidate, +} + /// This defines a platform abstraction to permit the SVSM to run on different /// underlying architectures. pub trait SvsmPlatform { @@ -82,11 +88,22 @@ pub trait SvsmPlatform { op: PageStateChangeOp, ) -> Result<(), SvsmError>; - /// Marks a range of pages as valid for use as private pages. - fn validate_page_range(&self, region: MemoryRegion) -> Result<(), SvsmError>; + /// Marks a physical range of pages as valid or invalid for use as private + /// pages. Not usable in stage2. + fn validate_physical_page_range( + &self, + region: MemoryRegion, + op: PageValidateOp, + ) -> Result<(), SvsmError>; - /// Marks a range of pages as invalid for use as private pages. - fn invalidate_page_range(&self, region: MemoryRegion) -> Result<(), SvsmError>; + /// Marks a virtual range of pages as valid or invalid for use as private + /// pages. Provided primarily for use in stage2 where validation by + /// physical address cannot e supported. + fn validate_virtual_page_range( + &self, + region: MemoryRegion, + op: PageValidateOp, + ) -> Result<(), SvsmError>; /// Configures the use of alternate injection as requested. fn configure_alternate_injection(&mut self, alt_inj_requested: bool) -> Result<(), SvsmError>; diff --git a/kernel/src/platform/native.rs b/kernel/src/platform/native.rs index 9f7a9a3d3..efa2d00b6 100644 --- a/kernel/src/platform/native.rs +++ b/kernel/src/platform/native.rs @@ -11,13 +11,16 @@ use crate::cpu::msr::write_msr; use crate::cpu::percpu::PerCpu; use crate::error::SvsmError; use crate::io::IOPort; -use crate::platform::{PageEncryptionMasks, PageStateChangeOp, SvsmPlatform}; +use crate::platform::{PageEncryptionMasks, PageStateChangeOp, PageValidateOp, SvsmPlatform}; use crate::serial::SerialPort; use crate::svsm_console::NativeIOPort; use crate::types::PageSize; use crate::utils::immut_after_init::ImmutAfterInitCell; use crate::utils::MemoryRegion; +#[cfg(debug_assertions)] +use crate::mm::virt_to_phys; + static CONSOLE_IO: NativeIOPort = NativeIOPort::new(); static CONSOLE_SERIAL: ImmutAfterInitCell> = ImmutAfterInitCell::uninit(); @@ -95,13 +98,31 @@ impl SvsmPlatform for NativePlatform { Ok(()) } - /// Marks a range of pages as valid for use as private pages. - fn validate_page_range(&self, _region: MemoryRegion) -> Result<(), SvsmError> { + fn validate_physical_page_range( + &self, + _region: MemoryRegion, + _op: PageValidateOp, + ) -> Result<(), SvsmError> { Ok(()) } - /// Marks a range of pages as invalid for use as private pages. - fn invalidate_page_range(&self, _region: MemoryRegion) -> Result<(), SvsmError> { + fn validate_virtual_page_range( + &self, + _region: MemoryRegion, + _op: PageValidateOp, + ) -> Result<(), SvsmError> { + #[cfg(debug_assertions)] + { + // Ensure that it is possible to translate this virtual address to + // a physical address. This is not necessary for correctness + // here, but since other platformss may rely on virtual-to-physical + // translation, it is helpful to force a translation here for + // debugging purposes just to help catch potential errors when + // testing on native. + for va in _region.iter_pages(PageSize::Regular) { + let _ = virt_to_phys(va); + } + } Ok(()) } diff --git a/kernel/src/platform/snp.rs b/kernel/src/platform/snp.rs index d732f3c49..6ba2c0e69 100644 --- a/kernel/src/platform/snp.rs +++ b/kernel/src/platform/snp.rs @@ -4,7 +4,7 @@ // // Author: Jon Lange -use crate::address::{PhysAddr, VirtAddr}; +use crate::address::{Address, PhysAddr, VirtAddr}; use crate::console::init_console; use crate::cpu::cpuid::{cpuid_table, CpuidResult}; use crate::cpu::percpu::{current_ghcb, this_cpu, PerCpu}; @@ -12,7 +12,8 @@ use crate::error::ApicError::Registration; use crate::error::SvsmError; use crate::greq::driver::guest_request_driver_init; use crate::io::IOPort; -use crate::platform::{PageEncryptionMasks, PageStateChangeOp, SvsmPlatform}; +use crate::mm::{PerCPUPageMappingGuard, PAGE_SIZE, PAGE_SIZE_2M}; +use crate::platform::{PageEncryptionMasks, PageStateChangeOp, PageValidateOp, SvsmPlatform}; use crate::serial::SerialPort; use crate::sev::hv_doorbell::current_hv_doorbell; use crate::sev::msr_protocol::{hypervisor_ghcb_features, verify_ghcb_version, GHCBHvFeatures}; @@ -25,6 +26,9 @@ use crate::types::PageSize; use crate::utils::immut_after_init::ImmutAfterInitCell; use crate::utils::MemoryRegion; +#[cfg(debug_assertions)] +use crate::mm::virt_to_phys; + use core::sync::atomic::{AtomicU32, Ordering}; static CONSOLE_IO: SVSMIOPort = SVSMIOPort::new(); @@ -34,6 +38,36 @@ static VTOM: ImmutAfterInitCell = ImmutAfterInitCell::uninit(); static APIC_EMULATION_REG_COUNT: AtomicU32 = AtomicU32::new(0); +fn pvalidate_page_range(range: MemoryRegion, op: PvalidateOp) -> Result<(), SvsmError> { + // In the future, it is likely that this function will need to be prepared + // to execute both PVALIDATE and RMPADJUST over the same set of addresses, + // so the loop is structured to anticipate that possibility. + let mut paddr = range.start(); + let paddr_end = range.end(); + while paddr < paddr_end { + // Check whether a 2 MB page can be attempted. + let len = if paddr.is_aligned(PAGE_SIZE_2M) && paddr + PAGE_SIZE_2M <= paddr_end { + PAGE_SIZE_2M + } else { + PAGE_SIZE + }; + let mapping = PerCPUPageMappingGuard::create(paddr, paddr + len, 0)?; + pvalidate_range(MemoryRegion::new(mapping.virt_addr(), len), op)?; + paddr = paddr + len; + } + + Ok(()) +} + +impl From for PvalidateOp { + fn from(op: PageValidateOp) -> PvalidateOp { + match op { + PageValidateOp::Validate => PvalidateOp::Valid, + PageValidateOp::Invalidate => PvalidateOp::Invalid, + } + } +} + #[derive(Clone, Copy, Debug)] pub struct SnpPlatform {} @@ -141,14 +175,32 @@ impl SvsmPlatform for SnpPlatform { current_ghcb().page_state_change(region, size, op) } - /// Marks a range of pages as valid for use as private pages. - fn validate_page_range(&self, region: MemoryRegion) -> Result<(), SvsmError> { - pvalidate_range(region, PvalidateOp::Valid) + fn validate_physical_page_range( + &self, + region: MemoryRegion, + op: PageValidateOp, + ) -> Result<(), SvsmError> { + pvalidate_page_range(region, PvalidateOp::from(op)) } - /// Marks a range of pages as invalid for use as private pages. - fn invalidate_page_range(&self, region: MemoryRegion) -> Result<(), SvsmError> { - pvalidate_range(region, PvalidateOp::Invalid) + fn validate_virtual_page_range( + &self, + region: MemoryRegion, + op: PageValidateOp, + ) -> Result<(), SvsmError> { + #[cfg(debug_assertions)] + { + // Ensure that it is possible to translate this virtual address to + // a physical address. This is not necessary for correctness + // here, but since other platformss may rely on virtual-to-physical + // translation, it is helpful to force a translation here for + // debugging purposes just to help catch potential errors when + // testing on SNP. + for va in region.iter_pages(PageSize::Regular) { + let _ = virt_to_phys(va); + } + } + pvalidate_range(region, PvalidateOp::from(op)) } fn configure_alternate_injection(&mut self, alt_inj_requested: bool) -> Result<(), SvsmError> { diff --git a/kernel/src/platform/tdp.rs b/kernel/src/platform/tdp.rs index 35214eddb..9a665f911 100644 --- a/kernel/src/platform/tdp.rs +++ b/kernel/src/platform/tdp.rs @@ -10,7 +10,7 @@ use crate::cpu::cpuid::CpuidResult; use crate::cpu::percpu::PerCpu; use crate::error::SvsmError; use crate::io::IOPort; -use crate::platform::{PageEncryptionMasks, PageStateChangeOp, SvsmPlatform}; +use crate::platform::{PageEncryptionMasks, PageStateChangeOp, PageValidateOp, SvsmPlatform}; use crate::serial::SerialPort; use crate::svsm_console::SVSMIOPort; use crate::types::PageSize; @@ -95,11 +95,19 @@ impl SvsmPlatform for TdpPlatform { Err(SvsmError::Tdx) } - fn validate_page_range(&self, _region: MemoryRegion) -> Result<(), SvsmError> { + fn validate_physical_page_range( + &self, + _region: MemoryRegion, + _op: PageValidateOp, + ) -> Result<(), SvsmError> { Err(SvsmError::Tdx) } - fn invalidate_page_range(&self, _region: MemoryRegion) -> Result<(), SvsmError> { + fn validate_virtual_page_range( + &self, + _region: MemoryRegion, + _op: PageValidateOp, + ) -> Result<(), SvsmError> { Err(SvsmError::Tdx) } diff --git a/kernel/src/stage2.rs b/kernel/src/stage2.rs index de86313a5..1bfd6650e 100755 --- a/kernel/src/stage2.rs +++ b/kernel/src/stage2.rs @@ -33,7 +33,7 @@ use svsm::mm::validate::{ init_valid_bitmap_alloc, valid_bitmap_addr, valid_bitmap_set_valid_range, }; use svsm::mm::{init_kernel_mapping_info, FixedAddressMappingRange, SVSM_PERCPU_BASE}; -use svsm::platform::{PageStateChangeOp, SvsmPlatform, SvsmPlatformCell}; +use svsm::platform::{PageStateChangeOp, PageValidateOp, SvsmPlatform, SvsmPlatformCell}; use svsm::types::{PageSize, PAGE_SIZE, PAGE_SIZE_2M}; use svsm::utils::{halt, is_aligned, MemoryRegion}; @@ -88,24 +88,27 @@ fn setup_env( .env_setup(debug_serial_port, launch_info.vtom.try_into().unwrap()) .expect("Early environment setup failed"); - // Validate the first 640 KB of memory so it can be used if necessary. - let region = MemoryRegion::::new(VirtAddr::from(0u64), 640 * 1024); - platform - .validate_page_range(region) - .expect("failed to validate low 640 KB"); - - // Supply the heap bounds as the kernel range, since the only virtual-to - // physical translations required will be on heap memory. let kernel_mapping = FixedAddressMappingRange::new( VirtAddr::from(0x808000u64), VirtAddr::from(launch_info.stage2_end as u64), PhysAddr::from(0x808000u64), ); - let heap_mapping = - FixedAddressMappingRange::new(region.start(), region.end(), PhysAddr::from(0u64)); + // Use the low 640 KB of memory as the heap. + let lowmem_region = MemoryRegion::new(VirtAddr::from(0u64), 640 * 1024); + let heap_mapping = FixedAddressMappingRange::new( + lowmem_region.start(), + lowmem_region.end(), + PhysAddr::from(0u64), + ); init_kernel_mapping_info(kernel_mapping, Some(heap_mapping)); + // Now that the heap virtual-to-physical mapping has been established, + // validate the first 640 KB of memory so it can be used if necessary. + platform + .validate_virtual_page_range(lowmem_region, PageValidateOp::Validate) + .expect("failed to validate low 640 KB"); + let cpuid_page = unsafe { &*(launch_info.cpuid_page as *const SnpCpuidTable) }; register_cpuid_table(cpuid_page); @@ -152,7 +155,7 @@ fn map_and_validate( PageStateChangeOp::Private, )?; } - platform.validate_page_range(vregion)?; + platform.validate_virtual_page_range(vregion, PageValidateOp::Validate)?; valid_bitmap_set_valid_range(paddr, paddr + vregion.len()); Ok(()) } diff --git a/kernel/src/svsm.rs b/kernel/src/svsm.rs index f5cb7ef21..ee4418c42 100755 --- a/kernel/src/svsm.rs +++ b/kernel/src/svsm.rs @@ -20,7 +20,6 @@ use svsm::config::SvsmConfig; use svsm::console::install_console_logger; use svsm::cpu::control_regs::{cr0_init, cr4_init}; use svsm::cpu::cpuid::{dump_cpuid_table, register_cpuid_table}; -use svsm::cpu::efer::efer_init; use svsm::cpu::gdt; use svsm::cpu::idt::svsm::{early_idt_init, idt_init}; use svsm::cpu::percpu::current_ghcb; @@ -311,7 +310,6 @@ pub extern "C" fn svsm_start(li: &KernelLaunchInfo, vb_addr: usize) { cr0_init(); cr4_init(platform); - efer_init(platform); install_console_logger("SVSM").expect("Console logger already initialized"); platform .env_setup(debug_serial_port, launch_info.vtom.try_into().unwrap()) diff --git a/kernel/src/svsm_paging.rs b/kernel/src/svsm_paging.rs index 3ea9b7ec7..8c7772ed6 100644 --- a/kernel/src/svsm_paging.rs +++ b/kernel/src/svsm_paging.rs @@ -9,10 +9,9 @@ use crate::config::SvsmConfig; use crate::error::SvsmError; use crate::igvm_params::IgvmParams; use crate::mm::pagetable::{PTEntryFlags, PageTable}; -use crate::mm::{PageBox, PerCPUPageMappingGuard}; -use crate::platform::PageStateChangeOp; -use crate::platform::SvsmPlatform; -use crate::types::{PageSize, PAGE_SIZE}; +use crate::mm::PageBox; +use crate::platform::{PageStateChangeOp, PageValidateOp, SvsmPlatform}; +use crate::types::PageSize; use crate::utils::MemoryRegion; use bootlib::kernel_launch::KernelLaunchInfo; @@ -25,7 +24,8 @@ pub fn init_page_table( launch_info: &KernelLaunchInfo, kernel_elf: &elf::Elf64File<'_>, ) -> Result, SvsmError> { - let mut pgtable = PageBox::try_new(PageTable::default())?; + let mut pgtable = PageTable::allocate_new()?; + let igvm_param_info = if launch_info.igvm_params_virt_addr != 0 { let addr = VirtAddr::from(launch_info.igvm_params_virt_addr); IgvmParamInfo { @@ -105,15 +105,12 @@ fn invalidate_boot_memory_region( region.end() ); - for paddr in region.iter_pages(PageSize::Regular) { - let guard = PerCPUPageMappingGuard::create_4k(paddr)?; - let vaddr = guard.virt_addr(); - - platform.invalidate_page_range(MemoryRegion::new(vaddr, PAGE_SIZE))?; - } + if !region.is_empty() { + platform.validate_physical_page_range(region, PageValidateOp::Invalidate)?; - if config.page_state_change_required() && !region.is_empty() { - platform.page_state_change(region, PageSize::Regular, PageStateChangeOp::Shared)?; + if config.page_state_change_required() { + platform.page_state_change(region, PageSize::Regular, PageStateChangeOp::Shared)?; + } } Ok(())