[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen stable-4.10] x86: Support fully eager FPU context switching



commit 52447b36f1d59193263e1f2f001eb6bbbde4d285
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Thu Jun 7 17:00:37 2018 +0100
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Wed Jun 13 22:01:18 2018 +0100

    x86: Support fully eager FPU context switching
    
    This is controlled on a per-vcpu bases for flexibility.
    
    This is part of XSA-267 / CVE-2018-3665
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    (cherry picked from commit 146dfe9277c2b4a8c399b229e00d819065e3167b)
---
 xen/arch/x86/i387.c          | 16 +++++++++++++---
 xen/include/asm-x86/domain.h |  3 +++
 2 files changed, 16 insertions(+), 3 deletions(-)

diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index 88452522ad..50116d576f 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -210,7 +210,7 @@ void vcpu_restore_fpu_eager(struct vcpu *v)
     ASSERT(!is_idle_vcpu(v));
     
     /* Restore nonlazy extended state (i.e. parts not tracked by CR0.TS). */
-    if ( !v->arch.nonlazy_xstate_used )
+    if ( !v->arch.fully_eager_fpu && !v->arch.nonlazy_xstate_used )
         return;
 
     /* Avoid recursion */
@@ -221,11 +221,19 @@ void vcpu_restore_fpu_eager(struct vcpu *v)
      * above) we also need to restore full state, to prevent subsequently
      * saving state belonging to another vCPU.
      */
-    if ( xstate_all(v) )
+    if ( v->arch.fully_eager_fpu || (v->arch.xsave_area && xstate_all(v)) )
     {
-        fpu_xrstor(v, XSTATE_ALL);
+        if ( cpu_has_xsave )
+            fpu_xrstor(v, XSTATE_ALL);
+        else
+            fpu_fxrstor(v);
+
         v->fpu_initialised = 1;
         v->fpu_dirtied = 1;
+
+        /* Xen doesn't need TS set, but the guest might. */
+        if ( is_pv_vcpu(v) && (v->arch.pv_vcpu.ctrlreg[0] & X86_CR0_TS) )
+            stts();
     }
     else
     {
@@ -247,6 +255,8 @@ void vcpu_restore_fpu_lazy(struct vcpu *v)
     if ( v->fpu_dirtied )
         return;
 
+    ASSERT(!v->arch.fully_eager_fpu);
+
     if ( cpu_has_xsave )
         fpu_xrstor(v, XSTATE_LAZY);
     else
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 2ba21e1135..775933269e 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -569,6 +569,9 @@ struct arch_vcpu
      * and thus should be saved/restored. */
     bool_t nonlazy_xstate_used;
 
+    /* Restore all FPU state (lazy and non-lazy state) on context switch? */
+    bool fully_eager_fpu;
+
     /*
      * The SMAP check policy when updating runstate_guest(v) and the
      * secondary system time.
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.10

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.