[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/6] x86: stop handling MSR_IA32_BNDCFGS save/restore in implementation code



Saving and restoring the value of this MSR is currently handled by
implementation-specific code despite it being architectural. This patch
moves handling of accesses to this MSR from hvm.c into the msr.c, thus
allowing the common MSR save/restore code to handle it.

This patch also changes hvm_get/set_guest_bndcfgs() to check CPUID policy
for the appropriate feature, rather than hardware, and also re-works
the get/set_guest_bndcfgs() hvm_funcs so they are no longer boolean. Uses
of u64 are also converted to uint64_t.

NOTE: Because vmx_get/set_guest_bndcfgs() call vmx_vmcs_enter(), the
      struct vcpu pointer passed in cannot be const.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: "Roger Pau Monné" <roger.pau@xxxxxxxxxx>
Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx>
Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c         | 44 +++++++++++++++++++++-------------
 xen/arch/x86/hvm/vmx/vmx.c     | 31 +++++-------------------
 xen/arch/x86/msr.c             | 14 ++++++++++-
 xen/arch/x86/pv/emul-priv-op.c |  2 +-
 xen/include/asm-x86/hvm/hvm.h  | 13 ++++------
 xen/include/asm-x86/msr.h      |  2 +-
 6 files changed, 52 insertions(+), 54 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 401c4a9312..5fd5478b7d 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -308,11 +308,16 @@ int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat)
     return 1;
 }
 
-bool hvm_set_guest_bndcfgs(struct vcpu *v, u64 val)
+bool hvm_set_guest_bndcfgs(struct vcpu *v, uint64_t val)
 {
-    if ( !hvm_funcs.set_guest_bndcfgs ||
-         !is_canonical_address(val) ||
-         (val & IA32_BNDCFGS_RESERVED) )
+    const struct cpuid_policy *cp = v->domain->arch.cpuid;
+
+    if ( !cp->feat.mpx )
+        return false;
+
+    ASSERT(hvm_funcs.set_guest_bndcfgs);
+
+    if ( !is_canonical_address(val) || (val & IA32_BNDCFGS_RESERVED) )
         return false;
 
     /*
@@ -342,7 +347,22 @@ bool hvm_set_guest_bndcfgs(struct vcpu *v, u64 val)
             /* nothing, best effort only */;
     }
 
-    return hvm_funcs.set_guest_bndcfgs(v, val);
+    hvm_funcs.set_guest_bndcfgs(v, val);
+
+    return true;
+}
+
+bool hvm_get_guest_bndcfgs(struct vcpu *v, uint64_t *val)
+{
+    const struct cpuid_policy *cp = v->domain->arch.cpuid;
+
+    if ( !cp->feat.mpx )
+        return false;
+
+    ASSERT(hvm_funcs.get_guest_bndcfgs);
+    *val = hvm_funcs.get_guest_bndcfgs(v);
+
+    return true;
 }
 
 /*
@@ -1312,6 +1332,7 @@ static int hvm_load_cpu_xsave_states(struct domain *d, 
hvm_domain_context_t *h)
 static const uint32_t msrs_to_send[] = {
     MSR_SPEC_CTRL,
     MSR_INTEL_MISC_FEATURES_ENABLES,
+    MSR_IA32_BNDCFGS,
     MSR_AMD64_DR0_ADDRESS_MASK,
     MSR_AMD64_DR1_ADDRESS_MASK,
     MSR_AMD64_DR2_ADDRESS_MASK,
@@ -1449,6 +1470,7 @@ static int hvm_load_cpu_msrs(struct domain *d, 
hvm_domain_context_t *h)
 
         case MSR_SPEC_CTRL:
         case MSR_INTEL_MISC_FEATURES_ENABLES:
+        case MSR_IA32_BNDCFGS:
         case MSR_AMD64_DR0_ADDRESS_MASK:
         case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
             rc = guest_wrmsr(v, ctxt->msr[i].index, ctxt->msr[i].val);
@@ -3472,12 +3494,6 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t 
*msr_content)
         *msr_content = v->arch.hvm.msr_xss;
         break;
 
-    case MSR_IA32_BNDCFGS:
-        if ( !d->arch.cpuid->feat.mpx ||
-             !hvm_get_guest_bndcfgs(v, msr_content) )
-            goto gp_fault;
-        break;
-
     case MSR_K8_ENABLE_C1E:
     case MSR_AMD64_NB_CFG:
          /*
@@ -3624,12 +3640,6 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t 
msr_content,
         v->arch.hvm.msr_xss = msr_content;
         break;
 
-    case MSR_IA32_BNDCFGS:
-        if ( !d->arch.cpuid->feat.mpx ||
-             !hvm_set_guest_bndcfgs(v, msr_content) )
-            goto gp_fault;
-        break;
-
     case MSR_AMD64_NB_CFG:
         /* ignore the write */
         break;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 64af8bf943..4bfabe8d0e 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -805,17 +805,6 @@ static unsigned int __init vmx_init_msr(void)
 
 static void vmx_save_msr(struct vcpu *v, struct hvm_msr *ctxt)
 {
-    vmx_vmcs_enter(v);
-
-    if ( cpu_has_mpx && cpu_has_vmx_mpx )
-    {
-        __vmread(GUEST_BNDCFGS, &ctxt->msr[ctxt->count].val);
-        if ( ctxt->msr[ctxt->count].val )
-            ctxt->msr[ctxt->count++].index = MSR_IA32_BNDCFGS;
-    }
-
-    vmx_vmcs_exit(v);
-
     if ( cpu_has_xsaves && cpu_has_vmx_xsaves )
     {
         ctxt->msr[ctxt->count].val = v->arch.hvm.msr_xss;
@@ -835,14 +824,6 @@ static int vmx_load_msr(struct vcpu *v, struct hvm_msr 
*ctxt)
     {
         switch ( ctxt->msr[i].index )
         {
-        case MSR_IA32_BNDCFGS:
-            if ( cpu_has_mpx && cpu_has_vmx_mpx &&
-                 is_canonical_address(ctxt->msr[i].val) &&
-                 !(ctxt->msr[i].val & IA32_BNDCFGS_RESERVED) )
-                __vmwrite(GUEST_BNDCFGS, ctxt->msr[i].val);
-            else if ( ctxt->msr[i].val )
-                err = -ENXIO;
-            break;
         case MSR_IA32_XSS:
             if ( cpu_has_xsaves && cpu_has_vmx_xsaves )
                 v->arch.hvm.msr_xss = ctxt->msr[i].val;
@@ -1204,26 +1185,26 @@ static int vmx_get_guest_pat(struct vcpu *v, u64 *gpat)
     return 1;
 }
 
-static bool vmx_set_guest_bndcfgs(struct vcpu *v, u64 val)
+static void vmx_set_guest_bndcfgs(struct vcpu *v, uint64_t val)
 {
     ASSERT(cpu_has_mpx && cpu_has_vmx_mpx);
 
     vmx_vmcs_enter(v);
     __vmwrite(GUEST_BNDCFGS, val);
     vmx_vmcs_exit(v);
-
-    return true;
 }
 
-static bool vmx_get_guest_bndcfgs(struct vcpu *v, u64 *val)
+static uint64_t vmx_get_guest_bndcfgs(struct vcpu *v)
 {
+    uint64_t val;
+
     ASSERT(cpu_has_mpx && cpu_has_vmx_mpx);
 
     vmx_vmcs_enter(v);
-    __vmread(GUEST_BNDCFGS, val);
+    __vmread(GUEST_BNDCFGS, &val);
     vmx_vmcs_exit(v);
 
-    return true;
+    return val;
 }
 
 static void vmx_handle_cd(struct vcpu *v, unsigned long value)
diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
index 9bb38b6d66..a3406c29a8 100644
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -115,7 +115,7 @@ int init_vcpu_msr_policy(struct vcpu *v)
     return 0;
 }
 
-int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
+int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
 {
     const struct vcpu *curr = current;
     const struct domain *d = v->domain;
@@ -158,6 +158,12 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr, 
uint64_t *val)
         ret = guest_rdmsr_x2apic(v, msr, val);
         break;
 
+    case MSR_IA32_BNDCFGS:
+        if ( !is_hvm_domain(d) || !hvm_get_guest_bndcfgs(v, val) )
+            goto gp_fault;
+
+        break;
+
     case 0x40000000 ... 0x400001ff:
         if ( is_viridian_domain(d) )
         {
@@ -319,6 +325,12 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
         ret = guest_wrmsr_x2apic(v, msr, val);
         break;
 
+    case MSR_IA32_BNDCFGS:
+        if ( !is_hvm_domain(d) || !hvm_set_guest_bndcfgs(v, val) )
+            goto gp_fault;
+
+        break;
+
     case 0x40000000 ... 0x400001ff:
         if ( is_viridian_domain(d) )
         {
diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
index 942ece2ca0..678dad3792 100644
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -807,7 +807,7 @@ static inline bool is_cpufreq_controller(const struct 
domain *d)
 static int read_msr(unsigned int reg, uint64_t *val,
                     struct x86_emulate_ctxt *ctxt)
 {
-    const struct vcpu *curr = current;
+    struct vcpu *curr = current;
     const struct domain *currd = curr->domain;
     bool vpmu_msr = false;
     int ret;
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 0a10b51554..5c8237e087 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -149,8 +149,8 @@ struct hvm_function_table {
     int  (*get_guest_pat)(struct vcpu *v, u64 *);
     int  (*set_guest_pat)(struct vcpu *v, u64);
 
-    bool (*get_guest_bndcfgs)(struct vcpu *v, u64 *);
-    bool (*set_guest_bndcfgs)(struct vcpu *v, u64);
+    uint64_t (*get_guest_bndcfgs)(struct vcpu *v);
+    void (*set_guest_bndcfgs)(struct vcpu *v, uint64_t);
 
     void (*set_tsc_offset)(struct vcpu *v, u64 offset, u64 at_tsc);
 
@@ -283,8 +283,6 @@ void hvm_get_segment_register(struct vcpu *v, enum 
x86_segment seg,
 void hvm_set_segment_register(struct vcpu *v, enum x86_segment seg,
                               struct segment_register *reg);
 
-bool hvm_set_guest_bndcfgs(struct vcpu *v, u64 val);
-
 bool hvm_check_cpuid_faulting(struct vcpu *v);
 void hvm_migrate_timers(struct vcpu *v);
 void hvm_do_resume(struct vcpu *v);
@@ -446,11 +444,8 @@ static inline unsigned long hvm_get_shadow_gs_base(struct 
vcpu *v)
     return hvm_funcs.get_shadow_gs_base(v);
 }
 
-static inline bool hvm_get_guest_bndcfgs(struct vcpu *v, u64 *val)
-{
-    return hvm_funcs.get_guest_bndcfgs &&
-           hvm_funcs.get_guest_bndcfgs(v, val);
-}
+bool hvm_get_guest_bndcfgs(struct vcpu *v, uint64_t *val);
+bool hvm_set_guest_bndcfgs(struct vcpu *v, uint64_t val);
 
 #define has_hvm_params(d) \
     ((d)->arch.hvm.params != NULL)
diff --git a/xen/include/asm-x86/msr.h b/xen/include/asm-x86/msr.h
index adfa2fa05b..ad8688a61f 100644
--- a/xen/include/asm-x86/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -320,7 +320,7 @@ int init_vcpu_msr_policy(struct vcpu *v);
  * These functions are also used by the migration logic, so need to cope with
  * being used outside of v's context.
  */
-int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val);
+int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val);
 int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val);
 
 #endif /* !__ASSEMBLY__ */
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.