[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen stable-4.7] x86: avoid leaking PKRU and BND* between vCPU-s



commit d6ce30d2fae66903033c629dac5206c500c60a6c
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Tue Jun 20 16:16:24 2017 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Jun 20 16:16:24 2017 +0200

    x86: avoid leaking PKRU and BND* between vCPU-s
    
    PKRU is explicitly "XSAVE-managed but not XSAVE-enabled", so guests
    might access the register (via {RD,WR}PKRU) without setting XCR0.PKRU.
    Force context switching as well as migrating the register as soon as
    CR4.PKE is being set the first time.
    
    For MPX (BND<n>, BNDCFGU, and BNDSTATUS) the situation is less clear,
    and the SDM has not entirely consistent information for that case.
    While experimentally the instructions don't change register state as
    long as the two XCR0 bits aren't both 1, be on the safe side and enable
    both if BNDCFGS.EN is being set the first time.
    
    This is XSA-220.
    
    Reported-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    master commit: de20bb6c4f65c4161e0931402613f9ffac86302d
    master date: 2017-06-20 14:36:51 +0200
---
 xen/arch/x86/hvm/hvm.c     | 21 +++++++++++++++++++
 xen/arch/x86/hvm/vmx/vmx.c | 52 ++++++++++++++++++++++++++++++++++++++--------
 2 files changed, 64 insertions(+), 9 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 656430e..db48fd4 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2452,6 +2452,27 @@ int hvm_set_cr4(unsigned long value, bool_t may_defer)
             paging_update_paging_modes(v);
     }
 
+    /*
+     * {RD,WR}PKRU are not gated on XCR0.PKRU and hence an oddly behaving
+     * guest may enable the feature in CR4 without enabling it in XCR0. We
+     * need to context switch / migrate PKRU nevertheless.
+     */
+    if ( (value & X86_CR4_PKE) && !(v->arch.xcr0_accum & XSTATE_PKRU) )
+    {
+        int rc = handle_xsetbv(XCR_XFEATURE_ENABLED_MASK,
+                               get_xcr0() | XSTATE_PKRU);
+
+        if ( rc )
+        {
+            HVM_DBG_LOG(DBG_LEVEL_1, "Failed to force XCR0.PKRU: %d", rc);
+            goto gpf;
+        }
+
+        if ( handle_xsetbv(XCR_XFEATURE_ENABLED_MASK,
+                           get_xcr0() & ~XSTATE_PKRU) )
+            /* nothing, best effort only */;
+    }
+
     return X86EMUL_OKAY;
 
  gpf:
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 9d958e8..3fe5083 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -31,6 +31,7 @@
 #include <asm/regs.h>
 #include <asm/cpufeature.h>
 #include <asm/processor.h>
+#include <asm/xstate.h>
 #include <asm/guest_access.h>
 #include <asm/debugreg.h>
 #include <asm/msr.h>
@@ -783,6 +784,45 @@ static int vmx_load_vmcs_ctxt(struct vcpu *v, struct 
hvm_hw_cpu *ctxt)
     return 0;
 }
 
+static bool_t vmx_set_guest_bndcfgs(struct vcpu *v, u64 val)
+{
+    if ( !cpu_has_mpx || !cpu_has_vmx_mpx ||
+         !is_canonical_address(val) ||
+         (val & IA32_BNDCFGS_RESERVED) )
+        return 0;
+
+    /*
+     * While MPX instructions are supposed to be gated on XCR0.BND*, let's
+     * nevertheless force the relevant XCR0 bits on when the feature is being
+     * enabled in BNDCFGS.
+     */
+    if ( (val & IA32_BNDCFGS_ENABLE) &&
+         !(v->arch.xcr0_accum & (XSTATE_BNDREGS | XSTATE_BNDCSR)) )
+    {
+        uint64_t xcr0 = get_xcr0();
+        int rc;
+
+        if ( v != current )
+            return 0;
+
+        rc = handle_xsetbv(XCR_XFEATURE_ENABLED_MASK,
+                           xcr0 | XSTATE_BNDREGS | XSTATE_BNDCSR);
+
+        if ( rc )
+        {
+            HVM_DBG_LOG(DBG_LEVEL_1, "Failed to force XCR0.BND*: %d", rc);
+            return 0;
+        }
+
+        if ( handle_xsetbv(XCR_XFEATURE_ENABLED_MASK, xcr0) )
+            /* nothing, best effort only */;
+    }
+
+    __vmwrite(GUEST_BNDCFGS, val);
+
+    return 1;
+}
+
 static unsigned int __init vmx_init_msr(void)
 {
     return (cpu_has_mpx && cpu_has_vmx_mpx) +
@@ -822,11 +862,8 @@ static int vmx_load_msr(struct vcpu *v, struct hvm_msr 
*ctxt)
         switch ( ctxt->msr[i].index )
         {
         case MSR_IA32_BNDCFGS:
-            if ( cpu_has_mpx && cpu_has_vmx_mpx &&
-                 is_canonical_address(ctxt->msr[i].val) &&
-                 !(ctxt->msr[i].val & IA32_BNDCFGS_RESERVED) )
-                __vmwrite(GUEST_BNDCFGS, ctxt->msr[i].val);
-            else if ( ctxt->msr[i].val )
+            if ( !vmx_set_guest_bndcfgs(v, ctxt->msr[i].val) &&
+                 ctxt->msr[i].val )
                 err = -ENXIO;
             break;
         case MSR_IA32_XSS:
@@ -2878,11 +2915,8 @@ static int vmx_msr_write_intercept(unsigned int msr, 
uint64_t msr_content)
         break;
     }
     case MSR_IA32_BNDCFGS:
-        if ( !cpu_has_mpx || !cpu_has_vmx_mpx ||
-             !is_canonical_address(msr_content) ||
-             (msr_content & IA32_BNDCFGS_RESERVED) )
+        if ( !vmx_set_guest_bndcfgs(v, msr_content) )
             goto gp_fault;
-        __vmwrite(GUEST_BNDCFGS, msr_content);
         break;
     case IA32_FEATURE_CONTROL_MSR:
     case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_TRUE_ENTRY_CTLS:
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.7

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.