diff --git a/kernel/src/boot_stage2.rs b/kernel/src/boot_stage2.rs index 12335b3b2..8136f4fd8 100644 --- a/kernel/src/boot_stage2.rs +++ b/kernel/src/boot_stage2.rs @@ -81,6 +81,15 @@ global_asm!( decl %ecx jnz 1b + /* Insert a self-map entry */ + movl $pgtable, %edi + movl %edi, %eax + orl $0x63, %eax + /* The value 0xF68 is equivalent to 8 * PGTABLE_LVL3_IDX_PTE_SELFMAP */ + movl %eax, 0xF68(%edi) + movl $0x80000000, %eax + movl %eax, 0xF6C(%edi) + /* Signal APs */ movl $setup_flag, %edi movl $1, (%edi) diff --git a/kernel/src/mm/address_space.rs b/kernel/src/mm/address_space.rs index 005df6e45..052b297db 100644 --- a/kernel/src/mm/address_space.rs +++ b/kernel/src/mm/address_space.rs @@ -5,6 +5,7 @@ // Author: Joerg Roedel use crate::address::{PhysAddr, VirtAddr}; +use crate::mm::pagetable::PageTable; use crate::utils::immut_after_init::ImmutAfterInitCell; #[derive(Debug, Copy, Clone)] @@ -38,16 +39,6 @@ impl FixedAddressMappingRange { } } } - - #[cfg(target_os = "none")] - fn virt_to_phys(&self, vaddr: VirtAddr) -> Option { - if (vaddr < self.virt_start) || (vaddr >= self.virt_end) { - None - } else { - let offset: usize = vaddr - self.virt_start; - Some(self.phys_start + offset) - } - } } #[derive(Debug, Copy, Clone)] @@ -74,16 +65,12 @@ pub fn init_kernel_mapping_info( #[cfg(target_os = "none")] pub fn virt_to_phys(vaddr: VirtAddr) -> PhysAddr { - if let Some(addr) = FIXED_MAPPING.kernel_mapping.virt_to_phys(vaddr) { - return addr; - } - if let Some(ref mapping) = FIXED_MAPPING.heap_mapping { - if let Some(addr) = mapping.virt_to_phys(vaddr) { - return addr; + match PageTable::virt_to_phys(vaddr) { + Some(paddr) => paddr, + None => { + panic!("Invalid virtual address {:#018x}", vaddr); } } - - panic!("Invalid virtual address {:#018x}", vaddr); } #[cfg(target_os = "none")] @@ -203,6 +190,11 @@ pub const SVSM_PERTASK_END: VirtAddr = SVSM_PERTASK_BASE.const_add(SIZE_LEVEL3); /// Kernel stack for a task pub const SVSM_PERTASK_STACK_BASE: VirtAddr = SVSM_PERTASK_BASE; +/// Page table self-map level 3 index +pub const PGTABLE_LVL3_IDX_PTE_SELFMAP: usize = 493; + +pub const SVSM_PTE_BASE: VirtAddr = virt_from_idx(PGTABLE_LVL3_IDX_PTE_SELFMAP); + // // User-space mapping constants // diff --git a/kernel/src/mm/pagetable.rs b/kernel/src/mm/pagetable.rs index b9246dc9b..62d03c0a3 100644 --- a/kernel/src/mm/pagetable.rs +++ b/kernel/src/mm/pagetable.rs @@ -11,8 +11,10 @@ use crate::cpu::flush_tlb_global_sync; use crate::cpu::idt::common::PageFaultError; use crate::cpu::registers::RFlags; use crate::error::SvsmError; -use crate::mm::PageBox; -use crate::mm::{phys_to_virt, virt_to_phys, PGTABLE_LVL3_IDX_SHARED}; +use crate::mm::{ + phys_to_virt, virt_to_phys, PageBox, PGTABLE_LVL3_IDX_PTE_SELFMAP, PGTABLE_LVL3_IDX_SHARED, + SVSM_PTE_BASE, +}; use crate::platform::SvsmPlatform; use crate::types::{PageSize, PAGE_SIZE, PAGE_SIZE_2M}; use crate::utils::immut_after_init::{ImmutAfterInitCell, ImmutAfterInitResult}; @@ -360,6 +362,11 @@ impl PTEntry { let addr = PhysAddr::from(self.0.bits() & 0x000f_ffff_ffff_f000); strip_confidentiality_bits(addr) } + + /// Read a page table entry from the specified virtual address. + pub fn read_pte(vaddr: VirtAddr) -> PTEntry { + unsafe { *vaddr.as_ptr::() } + } } /// A pagetable page with multiple entries. @@ -456,6 +463,18 @@ impl PageTable { virt_to_phys(pgtable) } + /// Initializes the self-map pointer within the page table. + pub fn set_self_map(&mut self, paddr: PhysAddr) { + let entry = &mut self.root[PGTABLE_LVL3_IDX_PTE_SELFMAP]; + let flags = PTEntryFlags::PRESENT + | PTEntryFlags::WRITABLE + | PTEntryFlags::ACCESSED + | PTEntryFlags::DIRTY + | PTEntryFlags::NX; + + entry.set(paddr, flags); + } + /// Clone the shared part of the page table; excluding the private /// parts. /// @@ -463,7 +482,9 @@ impl PageTable { /// Returns [`SvsmError`] if the page cannot be allocated. pub fn clone_shared(&self) -> Result, SvsmError> { let mut pgtable = PageBox::try_new(PageTable::default())?; + let paddr = virt_to_phys(pgtable.vaddr()); pgtable.root.entries[PGTABLE_LVL3_IDX_SHARED] = self.root.entries[PGTABLE_LVL3_IDX_SHARED]; + pgtable.set_self_map(paddr); Ok(pgtable) } @@ -560,6 +581,73 @@ impl PageTable { Self::walk_addr_lvl3(&mut self.root, vaddr) } + /// Calculate the virtual address of a PTE in the self-map, which maps a + /// specified virtual address. + /// + /// # Parameters + /// - `vaddr': The virtual address whose PTE should be located. + /// + /// # Returns + /// The virtual address of the PTE. + fn get_pte_address(vaddr: VirtAddr) -> VirtAddr { + SVSM_PTE_BASE + + ((u64::from(vaddr) & 0x0000_FFFF_FFFF_F000) >> 9) + .try_into() + .unwrap() + } + + /// Perform a virtual to physical translation using the self-map. + /// + /// # Parameters + /// - `vaddr': The virtual address to transalte. + /// + /// # Returns + /// Some(PhysAddr) if the virtual address is valid. + /// None if the virtual address is not valid. + pub fn virt_to_phys(vaddr: VirtAddr) -> Option { + // Calculate the virtual addresses of each level of the paging + // hierarchy in the self-map + let pte_addr = Self::get_pte_address(vaddr); + let pde_addr = Self::get_pte_address(pte_addr); + let pdpe_addr = Self::get_pte_address(pde_addr); + let pml4e_addr = Self::get_pte_address(pdpe_addr); + + // Check each entry in the paging hierarchy to determine whether this + // address is mapped. + let pml4e = PTEntry::read_pte(pml4e_addr); + if !pml4e.present() { + return None; + } + + // There is no need to check for a large page in the PML4E because + // the architecture does not support the large bit at the top-level + // entry. If a large page is detected at a lower level of the + // hierarchy, the low bits from the virtual address must be combined + // with the physical address from the PDE/PDPE. + let pdpe = PTEntry::read_pte(pdpe_addr); + if !pdpe.present() { + return None; + } + if pdpe.huge() { + return Some(pdpe.address() + (u64::from(vaddr) & 0x3FFF_FFFF).try_into().unwrap()); + } + + let pde = PTEntry::read_pte(pde_addr); + if !pde.present() { + return None; + } + if pde.huge() { + return Some(pde.address() + (u64::from(vaddr) & 0x001F_FFFF).try_into().unwrap()); + } + + let pte = PTEntry::read_pte(pte_addr); + if pte.present() { + Some(pte.address() + (u64::from(vaddr) & 0xFFF).try_into().unwrap()) + } else { + None + } + } + fn alloc_pte_lvl3(entry: &mut PTEntry, vaddr: VirtAddr, size: PageSize) -> Mapping<'_> { let flags = entry.flags(); diff --git a/kernel/src/svsm_paging.rs b/kernel/src/svsm_paging.rs index 94551ee93..433e145ac 100644 --- a/kernel/src/svsm_paging.rs +++ b/kernel/src/svsm_paging.rs @@ -9,7 +9,7 @@ use crate::config::SvsmConfig; use crate::error::SvsmError; use crate::igvm_params::IgvmParams; use crate::mm::pagetable::{PTEntryFlags, PageTable}; -use crate::mm::PageBox; +use crate::mm::{virt_to_phys, PageBox}; use crate::platform::{PageStateChangeOp, PageValidateOp, SvsmPlatform}; use crate::types::PageSize; use crate::utils::MemoryRegion; @@ -25,6 +25,9 @@ pub fn init_page_table( kernel_elf: &elf::Elf64File<'_>, ) -> Result, SvsmError> { let mut pgtable = PageBox::try_new(PageTable::default())?; + let paddr = virt_to_phys(pgtable.vaddr()); + pgtable.set_self_map(paddr); + let igvm_param_info = if launch_info.igvm_params_virt_addr != 0 { let addr = VirtAddr::from(launch_info.igvm_params_virt_addr); IgvmParamInfo {