[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2] x86/msr: add Raw and Host domain policies



Raw policy contains the actual values from H/W MSRs. Add PLATFORM_INFO
msr to the policy during probe_cpuid_faulting().

Host policy may have certain features disabled if Xen decides not
to use them. For now, make Host policy equal to Raw policy with
cpuid_faulting availability dependent on X86_FEATURE_CPUID_FAULTING.

Finally, derive HVM/PV max domain policies from the Host policy.

Signed-off-by: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>
---
v2:
- Moved *dp into a narrower scope in probe_cpuid_faulting()
- Changes to how Host/pv/hvm domain policies are calculated
---
 xen/arch/x86/cpu/common.c | 12 +++++++++++-
 xen/arch/x86/msr.c        | 37 ++++++++++++++++++++++++++++---------
 xen/include/asm-x86/msr.h |  8 ++++++++
 3 files changed, 47 insertions(+), 10 deletions(-)

diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index 4306e59650..0a452aea2c 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -119,8 +119,18 @@ void (* __read_mostly ctxt_switch_masking)(const struct 
vcpu *next);
 bool __init probe_cpuid_faulting(void)
 {
        uint64_t val;
+       int rc;
 
-       if (rdmsr_safe(MSR_INTEL_PLATFORM_INFO, val) ||
+       if ((rc = rdmsr_safe(MSR_INTEL_PLATFORM_INFO, val)) == 0)
+       {
+               struct msr_domain_policy *dp = &raw_msr_domain_policy;
+
+               dp->plaform_info.available = true;
+               if (val & MSR_PLATFORM_INFO_CPUID_FAULTING)
+                       dp->plaform_info.cpuid_faulting = true;
+       }
+
+       if (rc ||
            !(val & MSR_PLATFORM_INFO_CPUID_FAULTING) ||
            rdmsr_safe(MSR_INTEL_MISC_FEATURES_ENABLES,
                       this_cpu(msr_misc_features)))
diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
index 7875d9c1e0..7aaa2b0406 100644
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -24,12 +24,31 @@
 #include <xen/sched.h>
 #include <asm/msr.h>
 
-struct msr_domain_policy __read_mostly hvm_max_msr_domain_policy,
+struct msr_domain_policy __read_mostly     raw_msr_domain_policy,
+                         __read_mostly    host_msr_domain_policy,
+                         __read_mostly hvm_max_msr_domain_policy,
                          __read_mostly  pv_max_msr_domain_policy;
 
 struct msr_vcpu_policy __read_mostly hvm_max_msr_vcpu_policy,
                        __read_mostly  pv_max_msr_vcpu_policy;
 
+static void __init calculate_raw_policy(void)
+{
+    /* 0x000000ce  MSR_INTEL_PLATFORM_INFO */
+    /* Was already added by probe_cpuid_faulting() */
+}
+
+static void __init calculate_host_policy(void)
+{
+    struct msr_domain_policy *dp = &host_msr_domain_policy;
+
+    *dp = raw_msr_domain_policy;
+
+    /* 0x000000ce  MSR_INTEL_PLATFORM_INFO */
+    /* probe_cpuid_faulting() sanity checks presence of MISC_FEATURES_ENABLES 
*/
+    dp->plaform_info.cpuid_faulting = cpu_has_cpuid_faulting;
+}
+
 static void __init calculate_hvm_max_policy(void)
 {
     struct msr_domain_policy *dp = &hvm_max_msr_domain_policy;
@@ -38,7 +57,10 @@ static void __init calculate_hvm_max_policy(void)
     if ( !hvm_enabled )
         return;
 
+    *dp = host_msr_domain_policy;
+
     /* 0x000000ce  MSR_INTEL_PLATFORM_INFO */
+    /* It's always possible to emulate CPUID faulting for HVM guests */
     if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
          boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
     {
@@ -47,7 +69,7 @@ static void __init calculate_hvm_max_policy(void)
     }
 
     /* 0x00000140  MSR_INTEL_MISC_FEATURES_ENABLES */
-    vp->misc_features_enables.available = dp->plaform_info.available;
+    vp->misc_features_enables.available = dp->plaform_info.cpuid_faulting;
 }
 
 static void __init calculate_pv_max_policy(void)
@@ -55,19 +77,16 @@ static void __init calculate_pv_max_policy(void)
     struct msr_domain_policy *dp = &pv_max_msr_domain_policy;
     struct msr_vcpu_policy *vp = &pv_max_msr_vcpu_policy;
 
-    /* 0x000000ce  MSR_INTEL_PLATFORM_INFO */
-    if ( cpu_has_cpuid_faulting )
-    {
-        dp->plaform_info.available = true;
-        dp->plaform_info.cpuid_faulting = true;
-    }
+    *dp = host_msr_domain_policy;
 
     /* 0x00000140  MSR_INTEL_MISC_FEATURES_ENABLES */
-    vp->misc_features_enables.available = dp->plaform_info.available;
+    vp->misc_features_enables.available = dp->plaform_info.cpuid_faulting;
 }
 
 void __init init_guest_msr_policy(void)
 {
+    calculate_raw_policy();
+    calculate_host_policy();
     calculate_hvm_max_policy();
     calculate_pv_max_policy();
 }
diff --git a/xen/include/asm-x86/msr.h b/xen/include/asm-x86/msr.h
index 928f1cc454..94c142289b 100644
--- a/xen/include/asm-x86/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -220,6 +220,14 @@ struct msr_domain_policy
     } plaform_info;
 };
 
+/* RAW msr domain policy: contains the actual values from H/W MSRs */
+extern struct msr_domain_policy raw_msr_domain_policy;
+/*
+ * HOST msr domain policy: features that Xen actually decided to use,
+ * a subset of RAW policy.
+ */
+extern struct msr_domain_policy host_msr_domain_policy;
+
 /* MSR policy object for per-vCPU MSRs */
 struct msr_vcpu_policy
 {
-- 
2.14.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.