[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC v1 02/74] x86: Common cpuid faulting support



From: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

With CPUID Faulting offered to SVM guests, move Xen's faulting code to being
common rather than Intel specific.

This is necessary for nested Xen (inc. pv-shim mode) to prevent PV guests from
finding the outer HVM Xen leaves via native cpuid.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/arch/x86/cpu/amd.c          | 16 +++++---
 xen/arch/x86/cpu/common.c       | 76 ++++++++++++++++++++++++++++++++++++--
 xen/arch/x86/cpu/intel.c        | 81 +++++++----------------------------------
 xen/include/asm-x86/cpuid.h     |  3 --
 xen/include/asm-x86/processor.h |  4 +-
 5 files changed, 98 insertions(+), 82 deletions(-)

diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index 5f36ac75a7..2bff3ee377 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -198,11 +198,12 @@ static void __init noinline probe_masking_msrs(void)
 }
 
 /*
- * Context switch levelling state to the next domain.  A parameter of NULL is
- * used to context switch to the default host state (by the cpu bringup-code,
- * crash path, etc).
+ * Context switch CPUID masking state to the next domain.  Only called if
+ * CPUID Faulting isn't available, but masking MSRs have been detected.  A
+ * parameter of NULL is used to context switch to the default host state (by
+ * the cpu bringup-code, crash path, etc).
  */
-static void amd_ctxt_switch_levelling(const struct vcpu *next)
+static void amd_ctxt_switch_masking(const struct vcpu *next)
 {
        struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
        const struct domain *nextd = next ? next->domain : NULL;
@@ -263,6 +264,9 @@ static void __init noinline amd_init_levelling(void)
 {
        const struct cpuidmask *m = NULL;
 
+       if (probe_cpuid_faulting())
+               return;
+
        probe_masking_msrs();
 
        if (*opt_famrev != '\0') {
@@ -352,7 +356,7 @@ static void __init noinline amd_init_levelling(void)
        }
 
        if (levelling_caps)
-               ctxt_switch_levelling = amd_ctxt_switch_levelling;
+               ctxt_switch_masking = amd_ctxt_switch_masking;
 }
 
 /*
@@ -518,7 +522,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
        if (c == &boot_cpu_data)
                amd_init_levelling();
 
-       amd_ctxt_switch_levelling(NULL);
+       ctxt_switch_levelling(NULL);
 }
 
 static void init_amd(struct cpuinfo_x86 *c)
diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index e9588b3c0d..a1f1a04776 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -113,12 +113,80 @@ static const struct cpu_dev default_cpu = {
 };
 static const struct cpu_dev *this_cpu = &default_cpu;
 
-static void default_ctxt_switch_levelling(const struct vcpu *next)
+static DEFINE_PER_CPU(uint64_t, msr_misc_features);
+void (* __read_mostly ctxt_switch_masking)(const struct vcpu *next);
+
+bool __init probe_cpuid_faulting(void)
+{
+       uint64_t val;
+
+       if (rdmsr_safe(MSR_INTEL_PLATFORM_INFO, val) ||
+           !(val & MSR_PLATFORM_INFO_CPUID_FAULTING) ||
+           rdmsr_safe(MSR_INTEL_MISC_FEATURES_ENABLES,
+                      this_cpu(msr_misc_features)))
+       {
+               setup_clear_cpu_cap(X86_FEATURE_CPUID_FAULTING);
+               return false;
+       }
+
+       expected_levelling_cap |= LCAP_faulting;
+       levelling_caps |=  LCAP_faulting;
+       setup_force_cpu_cap(X86_FEATURE_CPUID_FAULTING);
+
+       return true;
+}
+
+static void set_cpuid_faulting(bool enable)
+{
+       uint64_t *this_misc_features = &this_cpu(msr_misc_features);
+       uint64_t val = *this_misc_features;
+
+       if (!!(val & MSR_MISC_FEATURES_CPUID_FAULTING) == enable)
+               return;
+
+       val ^= MSR_MISC_FEATURES_CPUID_FAULTING;
+
+       wrmsrl(MSR_INTEL_MISC_FEATURES_ENABLES, val);
+       *this_misc_features = val;
+}
+
+void ctxt_switch_levelling(const struct vcpu *next)
 {
-       /* Nop */
+       const struct domain *nextd = next ? next->domain : NULL;
+
+       if (cpu_has_cpuid_faulting) {
+               /*
+                * No need to alter the faulting setting if we are switching
+                * to idle; it won't affect any code running in idle context.
+                */
+               if (nextd && is_idle_domain(nextd))
+                       return;
+               /*
+                * We *should* be enabling faulting for the control domain.
+                *
+                * Unfortunately, the domain builder (having only ever been a
+                * PV guest) expects to be able to see host cpuid state in a
+                * native CPUID instruction, to correctly build a CPUID policy
+                * for HVM guests (notably the xstate leaves).
+                *
+                * This logic is fundimentally broken for HVM toolstack
+                * domains, and faulting causes PV guests to behave like HVM
+                * guests from their point of view.
+                *
+                * Future development plans will move responsibility for
+                * generating the maximum full cpuid policy into Xen, at which
+                * this problem will disappear.
+                */
+               set_cpuid_faulting(nextd && !is_control_domain(nextd) &&
+                                  (is_pv_domain(nextd) ||
+                                   next->arch.msr->
+                                   misc_features_enables.cpuid_faulting));
+               return;
+       }
+
+       if (ctxt_switch_masking)
+               ctxt_switch_masking(next);
 }
-void (* __read_mostly ctxt_switch_levelling)(const struct vcpu *next) =
-       default_ctxt_switch_levelling;
 
 bool_t opt_cpu_info;
 boolean_param("cpuinfo", opt_cpu_info);
diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
index 8311952f1f..0888f76161 100644
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -15,40 +15,6 @@
 
 #include "cpu.h"
 
-static bool __init probe_intel_cpuid_faulting(void)
-{
-       uint64_t x;
-
-       if (rdmsr_safe(MSR_INTEL_PLATFORM_INFO, x) ||
-           !(x & MSR_PLATFORM_INFO_CPUID_FAULTING))
-               return 0;
-
-       expected_levelling_cap |= LCAP_faulting;
-       levelling_caps |=  LCAP_faulting;
-       setup_force_cpu_cap(X86_FEATURE_CPUID_FAULTING);
-       return 1;
-}
-
-DEFINE_PER_CPU(bool, cpuid_faulting_enabled);
-
-static void set_cpuid_faulting(bool enable)
-{
-       bool *this_enabled = &this_cpu(cpuid_faulting_enabled);
-       uint32_t hi, lo;
-
-       ASSERT(cpu_has_cpuid_faulting);
-
-       if (*this_enabled == enable)
-               return;
-
-       rdmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
-       lo &= ~MSR_MISC_FEATURES_CPUID_FAULTING;
-       if (enable)
-               lo |= MSR_MISC_FEATURES_CPUID_FAULTING;
-       wrmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
-
-       *this_enabled = enable;
-}
 
 /*
  * Set caps in expected_levelling_cap, probe a specific masking MSR, and set
@@ -145,40 +111,17 @@ static void __init probe_masking_msrs(void)
 }
 
 /*
- * Context switch levelling state to the next domain.  A parameter of NULL is
- * used to context switch to the default host state (by the cpu bringup-code,
- * crash path, etc).
+ * Context switch CPUID masking state to the next domain.  Only called if
+ * CPUID Faulting isn't available, but masking MSRs have been detected.  A
+ * parameter of NULL is used to context switch to the default host state (by
+ * the cpu bringup-code, crash path, etc).
  */
-static void intel_ctxt_switch_levelling(const struct vcpu *next)
+static void intel_ctxt_switch_masking(const struct vcpu *next)
 {
        struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
        const struct domain *nextd = next ? next->domain : NULL;
-       const struct cpuidmasks *masks;
-
-       if (cpu_has_cpuid_faulting) {
-               /*
-                * We *should* be enabling faulting for the control domain.
-                *
-                * Unfortunately, the domain builder (having only ever been a
-                * PV guest) expects to be able to see host cpuid state in a
-                * native CPUID instruction, to correctly build a CPUID policy
-                * for HVM guests (notably the xstate leaves).
-                *
-                * This logic is fundimentally broken for HVM toolstack
-                * domains, and faulting causes PV guests to behave like HVM
-                * guests from their point of view.
-                *
-                * Future development plans will move responsibility for
-                * generating the maximum full cpuid policy into Xen, at which
-                * this problem will disappear.
-                */
-               set_cpuid_faulting(nextd && !is_control_domain(nextd) &&
-                                  (is_pv_domain(nextd) ||
-                                   
next->arch.msr->misc_features_enables.cpuid_faulting));
-               return;
-       }
-
-       masks = (nextd && is_pv_domain(nextd) && 
nextd->arch.pv_domain.cpuidmasks)
+       const struct cpuidmasks *masks =
+               (nextd && is_pv_domain(nextd) && 
nextd->arch.pv_domain.cpuidmasks)
                ? nextd->arch.pv_domain.cpuidmasks : &cpuidmask_defaults;
 
         if (msr_basic) {
@@ -223,8 +166,10 @@ static void intel_ctxt_switch_levelling(const struct vcpu 
*next)
  */
 static void __init noinline intel_init_levelling(void)
 {
-       if (!probe_intel_cpuid_faulting())
-               probe_masking_msrs();
+       if (probe_cpuid_faulting())
+               return;
+
+       probe_masking_msrs();
 
        if (msr_basic) {
                uint32_t ecx, edx, tmp;
@@ -278,7 +223,7 @@ static void __init noinline intel_init_levelling(void)
        }
 
        if (levelling_caps)
-               ctxt_switch_levelling = intel_ctxt_switch_levelling;
+               ctxt_switch_masking = intel_ctxt_switch_masking;
 }
 
 static void early_init_intel(struct cpuinfo_x86 *c)
@@ -316,7 +261,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
        if (c == &boot_cpu_data)
                intel_init_levelling();
 
-       intel_ctxt_switch_levelling(NULL);
+       ctxt_switch_levelling(NULL);
 }
 
 /*
diff --git a/xen/include/asm-x86/cpuid.h b/xen/include/asm-x86/cpuid.h
index d2dd841e15..74d6f123e5 100644
--- a/xen/include/asm-x86/cpuid.h
+++ b/xen/include/asm-x86/cpuid.h
@@ -58,9 +58,6 @@ DECLARE_PER_CPU(struct cpuidmasks, cpuidmasks);
 /* Default masking MSR values, calculated at boot. */
 extern struct cpuidmasks cpuidmask_defaults;
 
-/* Whether or not cpuid faulting is available for the current domain. */
-DECLARE_PER_CPU(bool, cpuid_faulting_enabled);
-
 #define CPUID_GUEST_NR_BASIC      (0xdu + 1)
 #define CPUID_GUEST_NR_FEAT       (0u + 1)
 #define CPUID_GUEST_NR_CACHE      (5u + 1)
diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
index 41a8d8c32f..c9601b2fb2 100644
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -151,7 +151,9 @@ extern struct cpuinfo_x86 boot_cpu_data;
 extern struct cpuinfo_x86 cpu_data[];
 #define current_cpu_data cpu_data[smp_processor_id()]
 
-extern void (*ctxt_switch_levelling)(const struct vcpu *next);
+extern bool probe_cpuid_faulting(void);
+extern void ctxt_switch_levelling(const struct vcpu *next);
+extern void (*ctxt_switch_masking)(const struct vcpu *next);
 
 extern u64 host_pat;
 extern bool_t opt_cpu_info;
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.