[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/xsave: fix nonlazy state handling



commit 7d8b5dd98463524686bdee8b973b53c00c232122
Author:     Liu Jinsong <jinsong.liu@xxxxxxxxx>
AuthorDate: Mon Nov 25 11:19:04 2013 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon Nov 25 11:19:04 2013 +0100

    x86/xsave: fix nonlazy state handling
    
    Nonlazy xstates should be xsaved each time when vcpu_save_fpu.
    Operation to nonlazy xstates will not trigger #NM exception, so
    whenever vcpu scheduled in it got restored and whenever scheduled
    out it should get saved.
    
    Currently this bug affects AMD LWP feature, and later Intel MPX
    feature. With the bugfix both LWP and MPX will work fine.
    
    Signed-off-by: Liu Jinsong <jinsong.liu@xxxxxxxxx>
    
    Furthermore, during restore we also need to set nonlazy_xstate_used
    according to the incoming accumulated XCR0.
    
    Also adjust the changes to i387.c such that there won't be a pointless
    clts()/stts() pair.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/domctl.c  |    2 ++
 xen/arch/x86/hvm/hvm.c |    2 ++
 xen/arch/x86/i387.c    |   15 +++++++++++++--
 3 files changed, 17 insertions(+), 2 deletions(-)

diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index f7e4586..8644218 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1146,6 +1146,8 @@ long arch_do_domctl(
             {
                 v->arch.xcr0 = _xcr0;
                 v->arch.xcr0_accum = _xcr0_accum;
+                if ( _xcr0_accum & XSTATE_NONLAZY )
+                    v->arch.nonlazy_xstate_used = 1;
                 memcpy(v->arch.xsave_area, _xsave_area,
                        evc->size - 2 * sizeof(uint64_t));
             }
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index b76f041..0094c62 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1119,6 +1119,8 @@ static int hvm_load_cpu_xsave_states(struct domain *d, 
hvm_domain_context_t *h)
 
     v->arch.xcr0 = ctxt->xcr0;
     v->arch.xcr0_accum = ctxt->xcr0_accum;
+    if ( ctxt->xcr0_accum & XSTATE_NONLAZY )
+        v->arch.nonlazy_xstate_used = 1;
     memcpy(v->arch.xsave_area, &ctxt->save_area,
            desc->length - offsetof(struct hvm_hw_cpu_xsave, save_area));
 
diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index 7649274..bd72138 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -133,11 +133,22 @@ static inline void fpu_frstor(struct vcpu *v)
 /*******************************/
 /*      FPU Save Functions     */
 /*******************************/
+
+static inline uint64_t vcpu_xsave_mask(const struct vcpu *v)
+{
+    if ( v->fpu_dirtied )
+        return v->arch.nonlazy_xstate_used ? XSTATE_ALL : XSTATE_LAZY;
+
+    return v->arch.nonlazy_xstate_used ? XSTATE_NONLAZY : 0;
+}
+
 /* Save x87 extended state */
 static inline void fpu_xsave(struct vcpu *v)
 {
     bool_t ok;
+    uint64_t mask = vcpu_xsave_mask(v);
 
+    ASSERT(mask);
     ASSERT(v->arch.xsave_area);
     /*
      * XCR0 normally represents what guest OS set. In case of Xen itself,
@@ -145,7 +156,7 @@ static inline void fpu_xsave(struct vcpu *v)
      */
     ok = set_xcr0(v->arch.xcr0_accum | XSTATE_FP_SSE);
     ASSERT(ok);
-    xsave(v, v->arch.nonlazy_xstate_used ? XSTATE_ALL : XSTATE_LAZY);
+    xsave(v, mask);
     ok = set_xcr0(v->arch.xcr0 ?: XSTATE_FP_SSE);
     ASSERT(ok);
 }
@@ -257,7 +268,7 @@ void vcpu_restore_fpu_lazy(struct vcpu *v)
  */
 void vcpu_save_fpu(struct vcpu *v)
 {
-    if ( !v->fpu_dirtied )
+    if ( !v->fpu_dirtied && !v->arch.nonlazy_xstate_used )
         return;
 
     ASSERT(!is_idle_vcpu(v));
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.