Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Aem boot delay fix #17

Open
wants to merge 8 commits into
base: aem-4.17.4
Choose a base branch
from
2 changes: 1 addition & 1 deletion xen/arch/x86/boot/head.S
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ mle_header:
.long 0x00000000 /* First valid page of MLE */
.long 0x00000000 /* Offset within binary of first byte of MLE */
.long (_end - start) /* Offset within binary of last byte + 1 of MLE */
.long 0x00000223 /* Bit vector of MLE-supported capabilities */
.long 0x00000723 /* Bit vector of MLE-supported capabilities */
.long 0x00000000 /* Starting linear address of command line (unused) */
.long 0x00000000 /* Ending linear address of command line (unused) */

Expand Down
44 changes: 44 additions & 0 deletions xen/arch/x86/cpu/intel.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include <asm/i387.h>
#include <mach_apic.h>
#include <asm/hvm/support.h>
#include <asm/intel_txt.h>

#include "cpu.h"

Expand Down Expand Up @@ -525,6 +526,47 @@ static void intel_log_freq(const struct cpuinfo_x86 *c)
printk("%u MHz\n", (factor * max_ratio + 50) / 100);
}

/*
* Print out the SMX and TXT capabilties, so that dom0 can determine if system
* is DRTM capable
*/
static void intel_log_smx_txt(struct cpuinfo_x86 *c)
{
unsigned long cr4_val, getsec_caps;

/* Run only on BSP to report the SMX/TXT caps only once */
if (smp_processor_id())
return;

printk("CPU: SMX capability ");
if (!test_bit(X86_FEATURE_SMX, &boot_cpu_data.x86_capability)) {
printk("not supported\n");
return;
}
printk("supported\n");

/* Can't run GETSEC without VMX and SMX */
if (!test_bit(X86_FEATURE_VMX, &boot_cpu_data.x86_capability))
return;

cr4_val = read_cr4();
if (!(cr4_val & X86_CR4_SMXE))
write_cr4(cr4_val | X86_CR4_SMXE);

asm volatile ("getsec\n"
: "=a" (getsec_caps)
: "a" (GETSEC_CAPABILITIES), "b" (0) :);

if (getsec_caps & GETSEC_CAP_TXT_CHIPSET)
printk("Chipset supports TXT\n");
else
printk("Chipset does not support TXT\n");

if (!(cr4_val & X86_CR4_SMXE))
write_cr4(cr4_val & ~X86_CR4_SMXE);

}

static void cf_check init_intel(struct cpuinfo_x86 *c)
{
/* Detect the extended topology information if available */
Expand Down Expand Up @@ -565,6 +607,8 @@ static void cf_check init_intel(struct cpuinfo_x86 *c)
detect_ht(c);
}

intel_log_smx_txt(c);

/* Work around errata */
Intel_errata_workarounds(c);

Expand Down
6 changes: 3 additions & 3 deletions xen/arch/x86/e820.c
Original file line number Diff line number Diff line change
Expand Up @@ -454,12 +454,12 @@ static uint64_t __init mtrr_top_of_ram(void)
ASSERT(paddr_bits);
addr_mask = ((1ull << paddr_bits) - 1) & PAGE_MASK;

rdmsrl(MSR_MTRRcap, mtrr_cap);
rdmsrl(MSR_MTRRdefType, mtrr_def);

if ( slaunch_active && boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
txt_restore_mtrrs(e820_verbose);

rdmsrl(MSR_MTRRcap, mtrr_cap);
rdmsrl(MSR_MTRRdefType, mtrr_def);

if ( e820_verbose )
printk(" MTRR cap: %"PRIx64" type: %"PRIx64"\n", mtrr_cap, mtrr_def);

Expand Down
3 changes: 2 additions & 1 deletion xen/arch/x86/hvm/vmx/vmcs.c
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
#include <asm/spec_ctrl.h>
#include <asm/tboot.h>
#include <asm/apic.h>
#include <asm/slaunch.h>

static bool_t __read_mostly opt_vpid_enabled = 1;
boolean_param("vpid", opt_vpid_enabled);
Expand Down Expand Up @@ -758,7 +759,7 @@ static int _vmx_cpu_up(bool bsp)
bios_locked = !!(eax & IA32_FEATURE_CONTROL_LOCK);
if ( bios_locked )
{
if ( !(eax & (tboot_in_measured_env()
if ( !(eax & (tboot_in_measured_env() || slaunch_active
? IA32_FEATURE_CONTROL_ENABLE_VMXON_INSIDE_SMX
: IA32_FEATURE_CONTROL_ENABLE_VMXON_OUTSIDE_SMX)) )
{
Expand Down
5 changes: 5 additions & 0 deletions xen/arch/x86/include/asm/intel_txt.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,11 @@
#define TXT_AP_BOOT_CS 0x0030
#define TXT_AP_BOOT_DS 0x0038

/* EAX value for GETSEC leaf functions. Intel SDM: GETSEC[CAPABILITIES] */
#define GETSEC_CAPABILITIES 0
/* Intel SDM: GETSEC Capability Result Encoding */
#define GETSEC_CAP_TXT_CHIPSET 1

#ifndef __ASSEMBLY__

extern char txt_ap_entry[];
Expand Down
85 changes: 83 additions & 2 deletions xen/arch/x86/intel_txt.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,12 @@
#include <asm/e820.h>
#include <xen/string.h>
#include <asm/page.h>
#include <asm/mtrr.h>
#include <asm/invpcid.h>
#include <asm/processor.h>
#include <asm/intel_txt.h>
#include <asm/slaunch.h>
#include <asm/system.h>
#include <asm/tpm.h>
#include <xen/init.h>
#include <xen/mm.h>
Expand Down Expand Up @@ -58,6 +62,63 @@ void __init protect_txt_mem_regions(void)
BUG_ON(rc == 0);
}

static DEFINE_SPINLOCK(set_atomicity_lock);

static uint64_t deftype = 0;

static bool disable_mtrrs(void)
{
unsigned long cr4;

/* Note that this is not ideal, since the cache is only flushed/disabled
for this CPU while the MTRRs are changed, but changing this requires
more invasive changes to the way the kernel boots */

spin_lock(&set_atomicity_lock);

/* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
write_cr0(read_cr0() | X86_CR0_CD);

/* Flush the caches */
wbinvd();

cr4 = read_cr4();
if (cr4 & X86_CR4_PGE)
write_cr4(cr4 & ~X86_CR4_PGE);
else if (use_invpcid)
invpcid_flush_all();
else
write_cr3(read_cr3());

/* Disable MTRRs, and set the default type to uncached */
rdmsrl(MSR_MTRRdefType, deftype);
wrmsrl(MSR_MTRRdefType, deftype & ~0xcff);

/* Again, flush caches */
wbinvd();

return cr4 & X86_CR4_PGE;
}

static void enable_mtrrs(bool pge)
{
/* Intel (P6) standard MTRRs */
wrmsrl(MSR_MTRRdefType, deftype);

/* Enable caches */
write_cr0(read_cr0() & ~X86_CR0_CD);

/* Reenable CR4.PGE (also flushes the TLB) */
if (pge)
write_cr4(read_cr4() | X86_CR4_PGE);
else if (use_invpcid)
invpcid_flush_all();
else
write_cr3(read_cr3());

spin_unlock(&set_atomicity_lock);
}

void __init txt_restore_mtrrs(bool e820_verbose)
{
struct txt_os_mle_data *os_mle;
Expand All @@ -66,6 +127,7 @@ void __init txt_restore_mtrrs(bool e820_verbose)
int os_mle_size;
uint64_t mtrr_cap, mtrr_def, base, mask;
unsigned int i;
bool pge;

os_mle_size = txt_os_mle_data_size(__va(txt_heap_base));
os_mle = txt_os_mle_data_start(__va(txt_heap_base));
Expand Down Expand Up @@ -102,8 +164,7 @@ void __init txt_restore_mtrrs(bool e820_verbose)
intel_info->saved_bsp_mtrrs.mtrr_vcnt : mtrr_cap;
}

/* Restore MTRRs saved by bootloader. */
wrmsrl(MSR_MTRRdefType, intel_info->saved_bsp_mtrrs.default_mem_type);
pge = disable_mtrrs();

for ( i = 0; i < (uint8_t)mtrr_cap; i++ )
{
Expand All @@ -113,6 +174,26 @@ void __init txt_restore_mtrrs(bool e820_verbose)
wrmsrl(MSR_IA32_MTRR_PHYSMASK(i), mask);
}

deftype = intel_info->saved_bsp_mtrrs.default_mem_type;
enable_mtrrs(pge);

if ( e820_verbose )
{
printk("Restored MTRRs:\n"); /* Printed by caller, mtrr_top_of_ram(). */

/* If MTRRs are not enabled or WB is not a default type, MTRRs won't be printed */
if ( !test_bit(11, &deftype) || ((uint8_t)deftype == X86_MT_WB) )
{
for ( i = 0; i < (uint8_t)mtrr_cap; i++ )
{
rdmsrl(MSR_IA32_MTRR_PHYSBASE(i), base);
rdmsrl(MSR_IA32_MTRR_PHYSMASK(i), mask);
printk(" MTRR[%d]: base %"PRIx64" mask %"PRIx64"\n",
i, base, mask);
}
}
}

/* Restore IA32_MISC_ENABLES */
wrmsrl(MSR_IA32_MISC_ENABLE, intel_info->saved_misc_enable_msr);
}
8 changes: 4 additions & 4 deletions xen/arch/x86/slaunch.c
Original file line number Diff line number Diff line change
Expand Up @@ -61,10 +61,6 @@ void __init map_slaunch_mem_regions(void)

map_l2(TPM_TIS_BASE, TPM_TIS_SIZE);

find_evt_log(__va(slaunch_slrt), &evt_log_addr, &evt_log_size);
if ( evt_log_addr != NULL )
map_l2((unsigned long)evt_log_addr, evt_log_size);

/* Vendor-specific part. */
if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
{
Expand All @@ -74,6 +70,10 @@ void __init map_slaunch_mem_regions(void)
{
map_l2(get_slb_start(), SKINIT_SLB_SIZE);
}

find_evt_log(__va(slaunch_slrt), &evt_log_addr, &evt_log_size);
if ( evt_log_addr != NULL )
map_l2((unsigned long)evt_log_addr, evt_log_size);
}

void __init protect_slaunch_mem_regions(void)
Expand Down