Skip to content

Commit

Permalink
SSE/FPU: implement xsave area using PageBox
Browse files Browse the repository at this point in the history
This is a temporary solution until we have smart pointer.
This also gives a chance to move xsave/xrestor functionality
outside the assembly block.

Signed-off-by: Vasant Karasulli <[email protected]>
  • Loading branch information
vsntk18 committed Oct 7, 2024
1 parent ab03816 commit 35a3107
Show file tree
Hide file tree
Showing 6 changed files with 78 additions and 91 deletions.
33 changes: 33 additions & 0 deletions kernel/src/cpu/sse.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

use crate::cpu::control_regs::{cr0_sse_enable, cr4_osfxsr_enable, cr4_xsave_enable};
use crate::cpu::cpuid::CpuidResult;
use core::arch::asm;
use core::arch::x86_64::{_xgetbv, _xsetbv};

const CPUID_EDX_SSE1: u32 = 25;
Expand Down Expand Up @@ -69,3 +70,35 @@ pub fn sse_init() {
legacy_sse_enable();
extended_sse_enable();
}

/// # Safety
/// inline assembly here is used to save the SSE/FPU
/// context. This context store is specific to a task and
/// no other part of the code is accessing this memory at the same time.
pub unsafe fn sse_save_context(addr: u64) {
let save_bits = XCR0_X87_ENABLE | XCR0_SSE_ENABLE | XCR0_YMM_ENABLE;
asm!(
r#"
xsaveopt (%rsi)
"#,
in("rsi") addr,
in("rax") save_bits,
in("rdx") 0,
options(att_syntax));
}

/// # Safety
/// inline assembly here is used to restore the SSE/FPU
/// context. This context store is specific to a task and
/// no other part of the code is accessing this memory at the same time.
pub unsafe fn sse_restore_context(addr: u64) {
let save_bits = XCR0_X87_ENABLE | XCR0_SSE_ENABLE | XCR0_YMM_ENABLE;
asm!(
r#"
xrstor (%rsi)
"#,
in("rsi") addr,
in("rax") save_bits,
in("rdx") 0,
options(att_syntax));
}
2 changes: 0 additions & 2 deletions kernel/src/mm/vm/mapping/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ pub mod phys_mem;
pub mod rawalloc;
pub mod reserved;
pub mod vmalloc;
pub mod xsave_area;

pub use api::{Mapping, VMMAdapter, VMPageFaultResolution, VirtualMapping, VMM};
pub use file_mapping::{VMFileMapping, VMFileMappingFlags};
Expand All @@ -20,4 +19,3 @@ pub use phys_mem::VMPhysMem;
pub use rawalloc::RawAllocMapping;
pub use reserved::VMReserved;
pub use vmalloc::VMalloc;
pub use xsave_area::XSaveArea;
46 changes: 0 additions & 46 deletions kernel/src/mm/vm/mapping/xsave_area.rs

This file was deleted.

2 changes: 1 addition & 1 deletion kernel/src/mm/vm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,6 @@ mod range;

pub use mapping::{
Mapping, RawAllocMapping, VMFileMapping, VMFileMappingFlags, VMKernelStack, VMMAdapter,
VMPhysMem, VMReserved, VMalloc, VirtualMapping, XSaveArea, VMM,
VMPhysMem, VMReserved, VMalloc, VirtualMapping, VMM,
};
pub use range::{VMRMapping, VMR, VMR_GRANULE};
29 changes: 10 additions & 19 deletions kernel/src/task/schedule.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@ use super::INITIAL_TASK_ID;
use super::{Task, TaskListAdapter, TaskPointer, TaskRunListAdapter};
use crate::address::Address;
use crate::cpu::percpu::{irq_nesting_count, this_cpu};
use crate::cpu::sse::sse_restore_context;
use crate::cpu::sse::sse_save_context;
use crate::cpu::IrqGuard;
use crate::error::SvsmError;
use crate::locking::SpinLock;
Expand Down Expand Up @@ -305,7 +307,7 @@ unsafe fn switch_to(prev: *const Task, next: *const Task) {
"#,
in("rsi") prev as u64,
in("rdi") next as u64,
in("rcx") cr3,
in("rdx") cr3,
options(att_syntax));
}

Expand Down Expand Up @@ -353,16 +355,20 @@ pub fn schedule() {
unsafe {
let a = task_pointer(current);
let b = task_pointer(next);
sse_save_context(u64::from((*a).xsa.vaddr()));

// Switch tasks
switch_to(a, b);

// We're now in the context of the new task.
sse_restore_context(u64::from((*a).xsa.vaddr()));
}
}

drop(guard);

// We're now in the context of the new task. If the previous task had terminated
// then we can release it's reference here.
// If the previous task had terminated then we can release
// it's reference here.
let _ = this_cpu().runqueue().lock_write().terminated_task.take();
}

Expand Down Expand Up @@ -399,33 +405,18 @@ global_asm!(
// Save the current stack pointer
testq %rsi, %rsi
jz 1f
//set bits in edx:eax that correspond to SSE/FPU context
movq $0x7, %rax
xorq %rdx, %rdx
//rsi + 8 contains xsave area address
movq 8(%rsi), %rbx
xsaveopt (%rbx)
movq %rsp, (%rsi)
1:
// Switch to the new task state
mov %rcx, %cr3
mov %rdx, %cr3
// Switch to the new task stack
movq (%rdi), %rsp
// We've already restored rsp
addq $8, %rsp
//set bits in edx:eax that correspond to SSE/FPU context
movq $0x7, %rax
xorq %rdx, %rdx
//rdi + 8 contains xsave area address
movq 8(%rdi), %rbx
xrstor (%rbx)
// Restore the task context
popq %r15
popq %r14
Expand Down
57 changes: 34 additions & 23 deletions kernel/src/task/tasks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,24 +10,25 @@ use alloc::collections::btree_map::BTreeMap;
use alloc::sync::Arc;
use core::fmt;
use core::mem::size_of;
use core::num::NonZeroUsize;
use core::sync::atomic::{AtomicU32, Ordering};

use crate::address::{Address, VirtAddr};
use crate::cpu::idt::svsm::return_new_task;
use crate::cpu::msr::read_flags;
use crate::cpu::percpu::PerCpu;
use crate::cpu::sse::{get_xsave_area_size, sse_restore_context};
use crate::cpu::X86ExceptionContext;
use crate::cpu::{irqs_enable, X86GeneralRegs};
use crate::error::SvsmError;
use crate::fs::FileHandle;
use crate::locking::{RWLock, SpinLock};
use crate::mm::pagetable::{PTEntryFlags, PageTable};
use crate::mm::vm::{Mapping, VMFileMappingFlags, VMKernelStack, XSaveArea, VMR};
use crate::mm::vm::{Mapping, VMFileMappingFlags, VMKernelStack, VMR};
use crate::mm::PageBox;
use crate::mm::{
mappings::create_anon_mapping, mappings::create_file_mapping, VMMappingGuard,
SVSM_PERTASK_BASE, SVSM_PERTASK_END, SVSM_PERTASK_STACK_BASE, SVSM_PERTASK_XSAVE_AREA_BASE,
USER_MEM_END, USER_MEM_START,
SVSM_PERTASK_BASE, SVSM_PERTASK_END, SVSM_PERTASK_STACK_BASE, USER_MEM_END, USER_MEM_START,
};
use crate::syscall::{Obj, ObjError, ObjHandle};
use crate::types::{SVSM_USER_CS, SVSM_USER_DS};
Expand Down Expand Up @@ -122,8 +123,8 @@ impl TaskSchedState {
pub struct Task {
pub rsp: u64,

/// XSave area address for the task
pub xsa_addr: u64,
/// XSave area
pub xsa: PageBox<[u8]>,

pub stack_bounds: MemoryRegion<VirtAddr>,

Expand Down Expand Up @@ -188,12 +189,11 @@ impl Task {
let vm_kernel_range = VMR::new(SVSM_PERTASK_BASE, SVSM_PERTASK_END, PTEntryFlags::empty());
vm_kernel_range.initialize()?;

let (stack, raw_bounds, rsp_offset) = Self::allocate_ktask_stack(cpu, entry)?;
let xsa = Self::allocate_xsave_area();
let xsa_addr = u64::from(xsa.vaddr()) as usize;
let (stack, raw_bounds, rsp_offset) = Self::allocate_ktask_stack(cpu, entry, xsa_addr)?;
vm_kernel_range.insert_at(SVSM_PERTASK_STACK_BASE, stack)?;

let xsa = Self::allocate_xsave_area()?;
vm_kernel_range.insert_at(SVSM_PERTASK_XSAVE_AREA_BASE, xsa)?;

vm_kernel_range.populate(&mut pgtable);

// Remap at the per-task offset
Expand All @@ -208,7 +208,7 @@ impl Task {
.checked_sub(rsp_offset)
.expect("Invalid stack offset from task::allocate_ktask_stack()")
.bits() as u64,
xsa_addr: SVSM_PERTASK_XSAVE_AREA_BASE.bits() as u64,
xsa,
stack_bounds: bounds,
page_table: SpinLock::new(pgtable),
vm_kernel_range,
Expand All @@ -233,12 +233,12 @@ impl Task {
let vm_kernel_range = VMR::new(SVSM_PERTASK_BASE, SVSM_PERTASK_END, PTEntryFlags::empty());
vm_kernel_range.initialize()?;

let (stack, raw_bounds, stack_offset) = Self::allocate_utask_stack(cpu, user_entry)?;
let xsa = Self::allocate_xsave_area();
let xsa_addr = u64::from(xsa.vaddr()) as usize;
let (stack, raw_bounds, stack_offset) =
Self::allocate_utask_stack(cpu, user_entry, xsa_addr)?;
vm_kernel_range.insert_at(SVSM_PERTASK_STACK_BASE, stack)?;

let xsa = Self::allocate_xsave_area()?;
vm_kernel_range.insert_at(SVSM_PERTASK_XSAVE_AREA_BASE, xsa)?;

vm_kernel_range.populate(&mut pgtable);

let vm_user_range = VMR::new(USER_MEM_START, USER_MEM_END, PTEntryFlags::USER);
Expand All @@ -256,7 +256,7 @@ impl Task {
.checked_sub(stack_offset)
.expect("Invalid stack offset from task::allocate_utask_stack()")
.bits() as u64,
xsa_addr: SVSM_PERTASK_XSAVE_AREA_BASE.bits() as u64,
xsa,
stack_bounds: bounds,
page_table: SpinLock::new(pgtable),
vm_kernel_range,
Expand Down Expand Up @@ -350,6 +350,7 @@ impl Task {
fn allocate_ktask_stack(
cpu: &PerCpu,
entry: extern "C" fn(),
xsa_addr: usize,
) -> Result<(Arc<Mapping>, MemoryRegion<VirtAddr>, usize), SvsmError> {
let (mapping, bounds) = Task::allocate_stack_common()?;

Expand All @@ -369,6 +370,8 @@ impl Task {
(*task_context).flags = read_flags();
// ret_addr
(*task_context).regs.rdi = entry as *const () as usize;
// xsave area addr
(*task_context).regs.rsi = xsa_addr;
(*task_context).ret_addr = run_kernel_task as *const () as u64;
// Task termination handler for when entry point returns
stack_ptr.offset(-1).write(task_exit as *const () as u64);
Expand All @@ -380,6 +383,7 @@ impl Task {
fn allocate_utask_stack(
cpu: &PerCpu,
user_entry: usize,
xsa_addr: usize,
) -> Result<(Arc<Mapping>, MemoryRegion<VirtAddr>, usize), SvsmError> {
let (mapping, bounds) = Task::allocate_stack_common()?;

Expand Down Expand Up @@ -408,24 +412,30 @@ impl Task {

stack_offset += size_of::<TaskContext>();

let task_context = TaskContext {
let mut task_context = TaskContext {
ret_addr: VirtAddr::from(return_new_task as *const ())
.bits()
.try_into()
.unwrap(),
..Default::default()
};

// xsave area addr
task_context.regs.rdi = xsa_addr;
let stack_task_context = stack_ptr.sub(stack_offset).cast::<TaskContext>();
*stack_task_context = task_context;
}

Ok((mapping, bounds, stack_offset))
}

fn allocate_xsave_area() -> Result<Arc<Mapping>, SvsmError> {
let xsa = XSaveArea::new()?;
let mapping = Arc::new(Mapping::new(xsa));
Ok(mapping)
fn allocate_xsave_area() -> PageBox<[u8]> {
let len = get_xsave_area_size() as usize;
let xsa = PageBox::<[u8]>::try_new_slice(0u8, NonZeroUsize::new(len).unwrap());
if xsa.is_err() {
panic!("Error while allocating xsave area");
}
xsa.unwrap()
}

pub fn mmap_common(
Expand Down Expand Up @@ -594,7 +604,7 @@ pub fn is_task_fault(vaddr: VirtAddr) -> bool {
/// task. Any first-time initialization and setup work for a new task that
/// needs to happen in its context must be done here.
#[no_mangle]
fn setup_new_task() {
fn setup_new_task(xsa_addr: u64) {
// Re-enable IRQs here, as they are still disabled from the
// schedule()/sched_init() functions. After the context switch the IrqGuard
// from the previous task is not dropped, which causes IRQs to stay
Expand All @@ -607,11 +617,12 @@ fn setup_new_task() {
// schedule()/schedule_init(). See description above.
unsafe {
irqs_enable();
sse_restore_context(xsa_addr);
}
}

extern "C" fn run_kernel_task(entry: extern "C" fn()) {
setup_new_task();
extern "C" fn run_kernel_task(entry: extern "C" fn(), xsa_addr: u64) {
setup_new_task(xsa_addr);
entry();
}

Expand Down

0 comments on commit 35a3107

Please sign in to comment.