diff --git a/xen/arch/x86/boot/head.S b/xen/arch/x86/boot/head.S index 093f894850..ab33c7826c 100644 --- a/xen/arch/x86/boot/head.S +++ b/xen/arch/x86/boot/head.S @@ -128,7 +128,7 @@ mle_header: .long 0x00000000 /* First valid page of MLE */ .long 0x00000000 /* Offset within binary of first byte of MLE */ .long (_end - start) /* Offset within binary of last byte + 1 of MLE */ - .long 0x00000223 /* Bit vector of MLE-supported capabilities */ + .long 0x00000723 /* Bit vector of MLE-supported capabilities */ .long 0x00000000 /* Starting linear address of command line (unused) */ .long 0x00000000 /* Ending linear address of command line (unused) */ diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c index 2d439e0bd2..f11eb8d372 100644 --- a/xen/arch/x86/cpu/intel.c +++ b/xen/arch/x86/cpu/intel.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "cpu.h" @@ -525,6 +526,47 @@ static void intel_log_freq(const struct cpuinfo_x86 *c) printk("%u MHz\n", (factor * max_ratio + 50) / 100); } +/* + * Print out the SMX and TXT capabilties, so that dom0 can determine if system + * is DRTM capable + */ +static void intel_log_smx_txt(struct cpuinfo_x86 *c) +{ + unsigned long cr4_val, getsec_caps; + + /* Run only on BSP to report the SMX/TXT caps only once */ + if (smp_processor_id()) + return; + + printk("CPU: SMX capability "); + if (!test_bit(X86_FEATURE_SMX, &boot_cpu_data.x86_capability)) { + printk("not supported\n"); + return; + } + printk("supported\n"); + + /* Can't run GETSEC without VMX and SMX */ + if (!test_bit(X86_FEATURE_VMX, &boot_cpu_data.x86_capability)) + return; + + cr4_val = read_cr4(); + if (!(cr4_val & X86_CR4_SMXE)) + write_cr4(cr4_val | X86_CR4_SMXE); + + asm volatile ("getsec\n" + : "=a" (getsec_caps) + : "a" (GETSEC_CAPABILITIES), "b" (0) :); + + if (getsec_caps & GETSEC_CAP_TXT_CHIPSET) + printk("Chipset supports TXT\n"); + else + printk("Chipset does not support TXT\n"); + + if (!(cr4_val & X86_CR4_SMXE)) + write_cr4(cr4_val & ~X86_CR4_SMXE); + +} + static void cf_check init_intel(struct cpuinfo_x86 *c) { /* Detect the extended topology information if available */ @@ -565,6 +607,8 @@ static void cf_check init_intel(struct cpuinfo_x86 *c) detect_ht(c); } + intel_log_smx_txt(c); + /* Work around errata */ Intel_errata_workarounds(c); diff --git a/xen/arch/x86/e820.c b/xen/arch/x86/e820.c index 896921b1c5..cbeaf9920d 100644 --- a/xen/arch/x86/e820.c +++ b/xen/arch/x86/e820.c @@ -454,12 +454,12 @@ static uint64_t __init mtrr_top_of_ram(void) ASSERT(paddr_bits); addr_mask = ((1ull << paddr_bits) - 1) & PAGE_MASK; - rdmsrl(MSR_MTRRcap, mtrr_cap); - rdmsrl(MSR_MTRRdefType, mtrr_def); - if ( slaunch_active && boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) txt_restore_mtrrs(e820_verbose); + rdmsrl(MSR_MTRRcap, mtrr_cap); + rdmsrl(MSR_MTRRdefType, mtrr_def); + if ( e820_verbose ) printk(" MTRR cap: %"PRIx64" type: %"PRIx64"\n", mtrr_cap, mtrr_def); diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index 796a7055b7..444c7fbd93 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -43,6 +43,7 @@ #include #include #include +#include static bool_t __read_mostly opt_vpid_enabled = 1; boolean_param("vpid", opt_vpid_enabled); @@ -758,7 +759,7 @@ static int _vmx_cpu_up(bool bsp) bios_locked = !!(eax & IA32_FEATURE_CONTROL_LOCK); if ( bios_locked ) { - if ( !(eax & (tboot_in_measured_env() + if ( !(eax & (tboot_in_measured_env() || slaunch_active ? IA32_FEATURE_CONTROL_ENABLE_VMXON_INSIDE_SMX : IA32_FEATURE_CONTROL_ENABLE_VMXON_OUTSIDE_SMX)) ) { diff --git a/xen/arch/x86/include/asm/intel_txt.h b/xen/arch/x86/include/asm/intel_txt.h index e6c6a06be1..00903ed88e 100644 --- a/xen/arch/x86/include/asm/intel_txt.h +++ b/xen/arch/x86/include/asm/intel_txt.h @@ -78,6 +78,11 @@ #define TXT_AP_BOOT_CS 0x0030 #define TXT_AP_BOOT_DS 0x0038 +/* EAX value for GETSEC leaf functions. Intel SDM: GETSEC[CAPABILITIES] */ +#define GETSEC_CAPABILITIES 0 +/* Intel SDM: GETSEC Capability Result Encoding */ +#define GETSEC_CAP_TXT_CHIPSET 1 + #ifndef __ASSEMBLY__ extern char txt_ap_entry[]; diff --git a/xen/arch/x86/intel_txt.c b/xen/arch/x86/intel_txt.c index cc9a6d01b0..4aca4141ea 100644 --- a/xen/arch/x86/intel_txt.c +++ b/xen/arch/x86/intel_txt.c @@ -4,8 +4,12 @@ #include #include #include +#include +#include +#include #include #include +#include #include #include #include @@ -58,6 +62,63 @@ void __init protect_txt_mem_regions(void) BUG_ON(rc == 0); } +static DEFINE_SPINLOCK(set_atomicity_lock); + +static uint64_t deftype = 0; + +static bool disable_mtrrs(void) +{ + unsigned long cr4; + + /* Note that this is not ideal, since the cache is only flushed/disabled + for this CPU while the MTRRs are changed, but changing this requires + more invasive changes to the way the kernel boots */ + + spin_lock(&set_atomicity_lock); + + /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ + write_cr0(read_cr0() | X86_CR0_CD); + + /* Flush the caches */ + wbinvd(); + + cr4 = read_cr4(); + if (cr4 & X86_CR4_PGE) + write_cr4(cr4 & ~X86_CR4_PGE); + else if (use_invpcid) + invpcid_flush_all(); + else + write_cr3(read_cr3()); + + /* Disable MTRRs, and set the default type to uncached */ + rdmsrl(MSR_MTRRdefType, deftype); + wrmsrl(MSR_MTRRdefType, deftype & ~0xcff); + + /* Again, flush caches */ + wbinvd(); + + return cr4 & X86_CR4_PGE; +} + +static void enable_mtrrs(bool pge) +{ + /* Intel (P6) standard MTRRs */ + wrmsrl(MSR_MTRRdefType, deftype); + + /* Enable caches */ + write_cr0(read_cr0() & ~X86_CR0_CD); + + /* Reenable CR4.PGE (also flushes the TLB) */ + if (pge) + write_cr4(read_cr4() | X86_CR4_PGE); + else if (use_invpcid) + invpcid_flush_all(); + else + write_cr3(read_cr3()); + + spin_unlock(&set_atomicity_lock); +} + void __init txt_restore_mtrrs(bool e820_verbose) { struct txt_os_mle_data *os_mle; @@ -66,6 +127,7 @@ void __init txt_restore_mtrrs(bool e820_verbose) int os_mle_size; uint64_t mtrr_cap, mtrr_def, base, mask; unsigned int i; + bool pge; os_mle_size = txt_os_mle_data_size(__va(txt_heap_base)); os_mle = txt_os_mle_data_start(__va(txt_heap_base)); @@ -102,8 +164,7 @@ void __init txt_restore_mtrrs(bool e820_verbose) intel_info->saved_bsp_mtrrs.mtrr_vcnt : mtrr_cap; } - /* Restore MTRRs saved by bootloader. */ - wrmsrl(MSR_MTRRdefType, intel_info->saved_bsp_mtrrs.default_mem_type); + pge = disable_mtrrs(); for ( i = 0; i < (uint8_t)mtrr_cap; i++ ) { @@ -113,6 +174,26 @@ void __init txt_restore_mtrrs(bool e820_verbose) wrmsrl(MSR_IA32_MTRR_PHYSMASK(i), mask); } + deftype = intel_info->saved_bsp_mtrrs.default_mem_type; + enable_mtrrs(pge); + if ( e820_verbose ) + { printk("Restored MTRRs:\n"); /* Printed by caller, mtrr_top_of_ram(). */ + + /* If MTRRs are not enabled or WB is not a default type, MTRRs won't be printed */ + if ( !test_bit(11, &deftype) || ((uint8_t)deftype == X86_MT_WB) ) + { + for ( i = 0; i < (uint8_t)mtrr_cap; i++ ) + { + rdmsrl(MSR_IA32_MTRR_PHYSBASE(i), base); + rdmsrl(MSR_IA32_MTRR_PHYSMASK(i), mask); + printk(" MTRR[%d]: base %"PRIx64" mask %"PRIx64"\n", + i, base, mask); + } + } + } + + /* Restore IA32_MISC_ENABLES */ + wrmsrl(MSR_IA32_MISC_ENABLE, intel_info->saved_misc_enable_msr); } diff --git a/xen/arch/x86/slaunch.c b/xen/arch/x86/slaunch.c index b18b882f74..00e67132d4 100644 --- a/xen/arch/x86/slaunch.c +++ b/xen/arch/x86/slaunch.c @@ -61,10 +61,6 @@ void __init map_slaunch_mem_regions(void) map_l2(TPM_TIS_BASE, TPM_TIS_SIZE); - find_evt_log(__va(slaunch_slrt), &evt_log_addr, &evt_log_size); - if ( evt_log_addr != NULL ) - map_l2((unsigned long)evt_log_addr, evt_log_size); - /* Vendor-specific part. */ if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) { @@ -74,6 +70,10 @@ void __init map_slaunch_mem_regions(void) { map_l2(get_slb_start(), SKINIT_SLB_SIZE); } + + find_evt_log(__va(slaunch_slrt), &evt_log_addr, &evt_log_size); + if ( evt_log_addr != NULL ) + map_l2((unsigned long)evt_log_addr, evt_log_size); } void __init protect_slaunch_mem_regions(void)