[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen stable-4.2] x86: fix XCR0 handling



commit 0bda88abe18029c2bbe9dc5d07cc706bd775c9b7
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Thu Jun 13 11:23:56 2013 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu Jun 13 11:23:56 2013 +0200

    x86: fix XCR0 handling
    
    - both VMX and SVM ignored the ECX input to XSETBV
    - both SVM and VMX used the full 64-bit RAX when calculating the input
      mask to XSETBV
    - faults on XSETBV did not get recovered from
    
    Also consolidate the handling for PV and HVM into a single function,
    and make the per-CPU variable "xcr0" static to xstate.c.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Keir Fraser <keir@xxxxxxx>
    Acked-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
    master commit: 10b2b21a241795394637167bd4b076f2de17741f
    master date: 2013-06-04 17:25:41 +0200
---
 xen/arch/x86/domain.c             |    5 ++-
 xen/arch/x86/hvm/hvm.c            |   14 ++--------
 xen/arch/x86/hvm/svm/svm.c        |    3 +-
 xen/arch/x86/hvm/vmx/vmx.c        |    6 +---
 xen/arch/x86/i387.c               |   16 +++++++++---
 xen/arch/x86/traps.c              |   22 ++--------------
 xen/arch/x86/xstate.c             |   49 +++++++++++++++++++++++++++++++------
 xen/include/asm-x86/hvm/support.h |    2 +-
 xen/include/asm-x86/xstate.h      |    7 +----
 9 files changed, 69 insertions(+), 55 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 1d5bcb5..830c79d 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1610,8 +1610,9 @@ static void __context_switch(void)
     if ( !is_idle_vcpu(n) )
     {
         memcpy(stack_regs, &n->arch.user_regs, CTXT_SWITCH_STACK_BYTES);
-        if ( xsave_enabled(n) && n->arch.xcr0 != get_xcr0() )
-            set_xcr0(n->arch.xcr0);
+        if ( xsave_enabled(n) && n->arch.xcr0 != get_xcr0() &&
+             !set_xcr0(n->arch.xcr0) )
+            BUG();
         vcpu_restore_fpu_eager(n);
         n->arch.ctxt_switch_to(n);
     }
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 6b82997..be79fb6 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1459,25 +1459,17 @@ out:
     return rc;
 }
 
-int hvm_handle_xsetbv(u64 new_bv)
+int hvm_handle_xsetbv(u32 index, u64 new_bv)
 {
-    struct vcpu *v = current;
     struct segment_register sreg;
 
-    hvm_get_segment_register(v, x86_seg_ss, &sreg);
+    hvm_get_segment_register(current, x86_seg_ss, &sreg);
     if ( sreg.attr.fields.dpl != 0 )
         goto err;
 
-    if ( ((new_bv ^ xfeature_mask) & ~xfeature_mask) || !(new_bv & 1) )
-        goto err;
-
-    if ( (xfeature_mask & XSTATE_YMM & new_bv) && !(new_bv & XSTATE_SSE) )
+    if ( handle_xsetbv(index, new_bv) )
         goto err;
 
-    v->arch.xcr0 = new_bv;
-    v->arch.xcr0_accum |= new_bv;
-    set_xcr0(new_bv);
-
     return 0;
 err:
     hvm_inject_hw_exception(TRAP_gp_fault, 0);
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 1b00794..59ac6ab 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -2389,7 +2389,8 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
     case VMEXIT_XSETBV:
         if ( (inst_len = __get_instruction_length(current, INSTR_XSETBV))==0 )
             break;
-        if ( hvm_handle_xsetbv((((u64)regs->edx) << 32) | regs->eax) == 0 )
+        if ( hvm_handle_xsetbv(regs->ecx,
+                               ((u64)regs->edx << 32) | (u32)regs->eax) == 0 )
             __update_guest_eip(regs, inst_len);
         break;
 
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 9ebb2af..a1077b6 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2743,12 +2743,10 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
         break;
 
     case EXIT_REASON_XSETBV:
-    {
-        u64 new_bv = (((u64)regs->edx) << 32) | regs->eax;
-        if ( hvm_handle_xsetbv(new_bv) == 0 )
+        if ( hvm_handle_xsetbv(regs->ecx,
+                               ((u64)regs->edx << 32) | (u32)regs->eax) == 0 )
             update_guest_eip(); /* Safe: XSETBV */
         break;
-    }
 
     case EXIT_REASON_ACCESS_GDTR_OR_IDTR:
     case EXIT_REASON_ACCESS_LDTR_OR_TR:
diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index 8f3e723..a055224 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -36,13 +36,17 @@ static void fpu_init(void)
 /* Restore x87 extended state */
 static inline void fpu_xrstor(struct vcpu *v, uint64_t mask)
 {
+    bool_t ok;
+
     /*
      * XCR0 normally represents what guest OS set. In case of Xen itself, 
      * we set all supported feature mask before doing save/restore.
      */
-    set_xcr0(v->arch.xcr0_accum);
+    ok = set_xcr0(v->arch.xcr0_accum);
+    ASSERT(ok);
     xrstor(v, mask);
-    set_xcr0(v->arch.xcr0);
+    ok = set_xcr0(v->arch.xcr0);
+    ASSERT(ok);
 }
 
 /* Restor x87 FPU, MMX, SSE and SSE2 state */
@@ -120,12 +124,16 @@ static inline void fpu_frstor(struct vcpu *v)
 /* Save x87 extended state */
 static inline void fpu_xsave(struct vcpu *v)
 {
+    bool_t ok;
+
     /* XCR0 normally represents what guest OS set. In case of Xen itself,
      * we set all accumulated feature mask before doing save/restore.
      */
-    set_xcr0(v->arch.xcr0_accum);
+    ok = set_xcr0(v->arch.xcr0_accum);
+    ASSERT(ok);
     xsave(v, v->arch.nonlazy_xstate_used ? XSTATE_ALL : XSTATE_LAZY);
-    set_xcr0(v->arch.xcr0);    
+    ok = set_xcr0(v->arch.xcr0);
+    ASSERT(ok);
 }
 
 /* Save x87 FPU, MMX, SSE and SSE2 state */
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index d824a55..5873358 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2279,25 +2279,9 @@ static int emulate_privileged_op(struct cpu_user_regs 
*regs)
             if ( !guest_kernel_mode(v, regs) )
                 goto fail;
 
-            switch ( (u32)regs->ecx )
-            {
-                case XCR_XFEATURE_ENABLED_MASK:
-                    /* bit 0 of XCR0 must be set and reserved bit must not be 
set */
-                    if ( !(new_xfeature & XSTATE_FP) || (new_xfeature & 
~xfeature_mask) )
-                        goto fail;
-
-                    /* YMM state takes SSE state as prerequisite. */
-                    if ( (xfeature_mask & new_xfeature & XSTATE_YMM) &&
-                         !(new_xfeature & XSTATE_SSE) )
-                        goto fail;
-
-                    v->arch.xcr0 = new_xfeature;
-                    v->arch.xcr0_accum |= new_xfeature;
-                    set_xcr0(new_xfeature);
-                    break;
-                default:
-                    goto fail;
-            }
+            if ( handle_xsetbv(regs->ecx, new_xfeature) )
+                goto fail;
+
             break;
         }
         default:
diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c
index 341b857..a9c78a0 100644
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -5,7 +5,7 @@
  *
  */
 
-#include <xen/config.h>
+#include <xen/percpu.h>
 #include <xen/sched.h>
 #include <asm/current.h>
 #include <asm/processor.h>
@@ -27,24 +27,34 @@ u32 xsave_cntxt_size;
 u64 xfeature_mask;
 
 /* Cached xcr0 for fast read */
-DEFINE_PER_CPU(uint64_t, xcr0);
+static DEFINE_PER_CPU(uint64_t, xcr0);
 
 /* Because XCR0 is cached for each CPU, xsetbv() is not exposed. Users should 
  * use set_xcr0() instead.
  */
-static inline void xsetbv(u32 index, u64 xfeatures)
+static inline bool_t xsetbv(u32 index, u64 xfeatures)
 {
     u32 hi = xfeatures >> 32;
     u32 lo = (u32)xfeatures;
 
-    asm volatile (".byte 0x0f,0x01,0xd1" :: "c" (index),
-            "a" (lo), "d" (hi));
+    asm volatile ( "1: .byte 0x0f,0x01,0xd1\n"
+                   "3:                     \n"
+                   ".section .fixup,\"ax\" \n"
+                   "2: xor %0,%0           \n"
+                   "   jmp 3b              \n"
+                   ".previous              \n"
+                   _ASM_EXTABLE(1b, 2b)
+                   : "+a" (lo)
+                   : "c" (index), "d" (hi));
+    return lo != 0;
 }
 
-void set_xcr0(u64 xfeatures)
+bool_t set_xcr0(u64 xfeatures)
 {
+    if ( !xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures) )
+        return 0;
     this_cpu(xcr0) = xfeatures;
-    xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures);
+    return 1;
 }
 
 uint64_t get_xcr0(void)
@@ -242,7 +252,8 @@ void xstate_init(void)
      * Set CR4_OSXSAVE and run "cpuid" to get xsave_cntxt_size.
      */
     set_in_cr4(X86_CR4_OSXSAVE);
-    set_xcr0((((u64)edx << 32) | eax) & XCNTXT_MASK);
+    if ( !set_xcr0((((u64)edx << 32) | eax) & XCNTXT_MASK) )
+        BUG();
     cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
 
     if ( cpu == 0 )
@@ -268,6 +279,28 @@ void xstate_init(void)
     }
 }
 
+int handle_xsetbv(u32 index, u64 new_bv)
+{
+    struct vcpu *curr = current;
+
+    if ( index != XCR_XFEATURE_ENABLED_MASK )
+        return -EOPNOTSUPP;
+
+    if ( (new_bv & ~xfeature_mask) || !(new_bv & XSTATE_FP) )
+        return -EINVAL;
+
+    if ( (new_bv & XSTATE_YMM) && !(new_bv & XSTATE_SSE) )
+        return -EINVAL;
+
+    if ( !set_xcr0(new_bv) )
+        return -EFAULT;
+
+    curr->arch.xcr0 = new_bv;
+    curr->arch.xcr0_accum |= new_bv;
+
+    return 0;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/asm-x86/hvm/support.h 
b/xen/include/asm-x86/hvm/support.h
index f9b102f..7ddc806 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -128,7 +128,7 @@ void hvm_triple_fault(void);
 
 void hvm_rdtsc_intercept(struct cpu_user_regs *regs);
 
-int hvm_handle_xsetbv(u64 new_bv);
+int __must_check hvm_handle_xsetbv(u32 index, u64 new_bv);
 
 /* These functions all return X86EMUL return codes. */
 int hvm_set_efer(uint64_t value);
diff --git a/xen/include/asm-x86/xstate.h b/xen/include/asm-x86/xstate.h
index c1621cf..3c6f91d 100644
--- a/xen/include/asm-x86/xstate.h
+++ b/xen/include/asm-x86/xstate.h
@@ -9,7 +9,6 @@
 #define __ASM_XSTATE_H
 
 #include <xen/types.h>
-#include <xen/percpu.h>
 
 #define FCW_DEFAULT               0x037f
 #define MXCSR_DEFAULT             0x1f80
@@ -34,9 +33,6 @@
 #define XSTATE_NONLAZY (XSTATE_LWP)
 #define XSTATE_LAZY    (XSTATE_ALL & ~XSTATE_NONLAZY)
 
-/* extended state variables */
-DECLARE_PER_CPU(uint64_t, xcr0);
-
 extern unsigned int xsave_cntxt_size;
 extern u64 xfeature_mask;
 
@@ -77,11 +73,12 @@ struct xsave_struct
 } __attribute__ ((packed, aligned (64)));
 
 /* extended state operations */
-void set_xcr0(u64 xfeatures);
+bool_t __must_check set_xcr0(u64 xfeatures);
 uint64_t get_xcr0(void);
 void xsave(struct vcpu *v, uint64_t mask);
 void xrstor(struct vcpu *v, uint64_t mask);
 bool_t xsave_enabled(const struct vcpu *v);
+int __must_check handle_xsetbv(u32 index, u64 new_bv);
 
 /* extended state init and cleanup functions */
 void xstate_free_save_area(struct vcpu *v);
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.2

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.