[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v8 10/17] x86/hvm: Permit guests direct access to MSR_{SPEC_CTRL, PRED_CMD}



For performance reasons, HVM guests should have direct access to these MSRs
when possible.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
v7:
 * Drop excess brackets
---
 xen/arch/x86/domctl.c      | 19 +++++++++++++++++++
 xen/arch/x86/hvm/svm/svm.c |  5 +++++
 xen/arch/x86/hvm/vmx/vmx.c | 18 ++++++++++++++++++
 xen/arch/x86/msr.c         |  3 ++-
 xen/include/asm-x86/msr.h  |  5 ++++-
 5 files changed, 48 insertions(+), 2 deletions(-)

diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 72b4489..e5fdde7 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -53,6 +53,7 @@ static int update_domain_cpuid_info(struct domain *d,
     struct cpuid_policy *p = d->arch.cpuid;
     const struct cpuid_leaf leaf = { ctl->eax, ctl->ebx, ctl->ecx, ctl->edx };
     int old_vendor = p->x86_vendor;
+    unsigned int old_7d0 = p->feat.raw[0].d, old_e8b = p->extd.raw[8].b;
     bool call_policy_changed = false; /* Avoid for_each_vcpu() unnecessarily */
 
     /*
@@ -218,6 +219,14 @@ static int update_domain_cpuid_info(struct domain *d,
 
             d->arch.pv_domain.cpuidmasks->_7ab0 = mask;
         }
+
+        /*
+         * If the IBSRB/STIBP policy has changed, we need to recalculate the
+         * MSR interception bitmaps and STIBP protection default.
+         */
+        call_policy_changed = ((old_7d0 ^ p->feat.raw[0].d) &
+                               (cpufeat_mask(X86_FEATURE_IBRSB) |
+                                cpufeat_mask(X86_FEATURE_STIBP)));
         break;
 
     case 0xa:
@@ -292,6 +301,16 @@ static int update_domain_cpuid_info(struct domain *d,
             d->arch.pv_domain.cpuidmasks->e1cd = mask;
         }
         break;
+
+    case 0x80000008:
+        /*
+         * If the IBRB policy has changed, we need to recalculate the MSR
+         * interception bitmaps.
+         */
+        call_policy_changed = (is_hvm_domain(d) &&
+                               ((old_e8b ^ p->extd.raw[8].b) &
+                                cpufeat_mask(X86_FEATURE_IBPB)));
+        break;
     }
 
     if ( call_policy_changed )
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index c48fdfa..ee47508 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -617,6 +617,7 @@ static void svm_cpuid_policy_changed(struct vcpu *v)
 {
     struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
     struct vmcb_struct *vmcb = arch_svm->vmcb;
+    const struct cpuid_policy *cp = v->domain->arch.cpuid;
     u32 bitmap = vmcb_get_exception_intercepts(vmcb);
 
     if ( opt_hvm_fep ||
@@ -626,6 +627,10 @@ static void svm_cpuid_policy_changed(struct vcpu *v)
         bitmap &= ~(1U << TRAP_invalid_op);
 
     vmcb_set_exception_intercepts(vmcb, bitmap);
+
+    /* Give access to MSR_PRED_CMD if the guest has been told about it. */
+    svm_intercept_msr(v, MSR_PRED_CMD,
+                      cp->extd.ibpb ? MSR_INTERCEPT_NONE : MSR_INTERCEPT_RW);
 }
 
 static void svm_sync_vmcb(struct vcpu *v)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index e036303..8609de3 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -656,6 +656,8 @@ void vmx_update_exception_bitmap(struct vcpu *v)
 
 static void vmx_cpuid_policy_changed(struct vcpu *v)
 {
+    const struct cpuid_policy *cp = v->domain->arch.cpuid;
+
     if ( opt_hvm_fep ||
          (v->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor) )
         v->arch.hvm_vmx.exception_bitmap |= (1U << TRAP_invalid_op);
@@ -665,6 +667,22 @@ static void vmx_cpuid_policy_changed(struct vcpu *v)
     vmx_vmcs_enter(v);
     vmx_update_exception_bitmap(v);
     vmx_vmcs_exit(v);
+
+    /*
+     * We can only pass though MSR_SPEC_CTRL if the guest knows about all bits
+     * in it.  Otherwise, Xen may be fixing up in the background.
+     */
+    v->arch.msr->spec_ctrl.direct_access = cp->feat.ibrsb && cp->feat.stibp;
+    if ( v->arch.msr->spec_ctrl.direct_access )
+        vmx_clear_msr_intercept(v, MSR_SPEC_CTRL, VMX_MSR_RW);
+    else
+        vmx_set_msr_intercept(v, MSR_SPEC_CTRL, VMX_MSR_RW);
+
+    /* MSR_PRED_CMD is safe to pass through if the guest knows about it. */
+    if ( cp->feat.ibrsb || cp->extd.ibpb )
+        vmx_clear_msr_intercept(v, MSR_PRED_CMD,  VMX_MSR_RW);
+    else
+        vmx_set_msr_intercept(v, MSR_PRED_CMD,  VMX_MSR_RW);
 }
 
 int vmx_guest_x86_mode(struct vcpu *v)
diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
index 02a7b49..697cc6e 100644
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -132,7 +132,8 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr, 
uint64_t *val)
     case MSR_SPEC_CTRL:
         if ( !cp->feat.ibrsb )
             goto gp_fault;
-        *val = vp->spec_ctrl.guest;
+        *val = (vp->spec_ctrl.direct_access
+                ? vp->spec_ctrl.host : vp->spec_ctrl.guest);
         break;
 
     case MSR_INTEL_PLATFORM_INFO:
diff --git a/xen/include/asm-x86/msr.h b/xen/include/asm-x86/msr.h
index 3d0012d..007e966 100644
--- a/xen/include/asm-x86/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -229,10 +229,13 @@ struct msr_vcpu_policy
          * Only the bottom two bits are defined, so no need to waste space
          * with uint64_t at the moment.  We maintain the guests idea of the
          * value it wrote, and a value to install into hardware (extended to
-         * uint32_t to simplify the asm) which might be different.
+         * uint32_t to simplify the asm) which might be different.  HVM guests
+         * might be given direct access to the MSRs, at which point the
+         * 'guest' value becomes stale.
          */
         uint32_t host;
         uint8_t guest;
+        bool direct_access;
     } spec_ctrl;
 
     /* 0x00000140  MSR_INTEL_MISC_FEATURES_ENABLES */
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.