[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 14/14] x86/xstate: Make xstate_all() and vcpu_xsave_mask() take explicit xstate



No functional change.

Signed-off-by: Alejandro Vallejo <alejandro.vallejo@xxxxxxxxx>
---
 xen/arch/x86/i387.c               | 9 +++++----
 xen/arch/x86/include/asm/xstate.h | 5 +++--
 xen/arch/x86/xstate.c             | 2 +-
 3 files changed, 9 insertions(+), 7 deletions(-)

diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index 7e1fb8ad8779..87b44dc11b55 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -108,7 +108,8 @@ static inline void fpu_fxrstor(struct vcpu *v, const 
fpusse_t *fpu_ctxt)
 /*      FPU Save Functions     */
 /*******************************/
 
-static inline uint64_t vcpu_xsave_mask(const struct vcpu *v)
+static inline uint64_t vcpu_xsave_mask(const struct vcpu *v,
+                                       const struct xsave_struct *xsave_area)
 {
     if ( v->fpu_dirtied )
         return v->arch.nonlazy_xstate_used ? XSTATE_ALL : XSTATE_LAZY;
@@ -125,14 +126,14 @@ static inline uint64_t vcpu_xsave_mask(const struct vcpu 
*v)
      * XSTATE_FP_SSE), vcpu_xsave_mask will return XSTATE_ALL. Otherwise
      * return XSTATE_NONLAZY.
      */
-    return xstate_all(v) ? XSTATE_ALL : XSTATE_NONLAZY;
+    return xstate_all(v, xsave_area) ? XSTATE_ALL : XSTATE_NONLAZY;
 }
 
 /* Save x87 extended state */
 static inline void fpu_xsave(struct vcpu *v, struct xsave_struct *xsave_area)
 {
     bool ok;
-    uint64_t mask = vcpu_xsave_mask(v);
+    uint64_t mask = vcpu_xsave_mask(v, xsave_area);
 
     ASSERT(mask);
     ASSERT(cpu_has_xsave);
@@ -213,7 +214,7 @@ void vcpu_restore_fpu_nonlazy(struct vcpu *v, bool 
need_stts)
      * saving state belonging to another vCPU.
      */
     xsave_area = vcpu_map_xsave_area(v);
-    if ( v->arch.fully_eager_fpu || xstate_all(v) )
+    if ( v->arch.fully_eager_fpu || xstate_all(v, xsave_area) )
     {
         if ( cpu_has_xsave )
             fpu_xrstor(v, xsave_area, XSTATE_ALL);
diff --git a/xen/arch/x86/include/asm/xstate.h 
b/xen/arch/x86/include/asm/xstate.h
index 43f7731c2b17..81350d0105bb 100644
--- a/xen/arch/x86/include/asm/xstate.h
+++ b/xen/arch/x86/include/asm/xstate.h
@@ -132,14 +132,15 @@ xsave_area_compressed(const struct xsave_struct 
*xsave_area)
     return xsave_area->xsave_hdr.xcomp_bv & XSTATE_COMPACTION_ENABLED;
 }
 
-static inline bool xstate_all(const struct vcpu *v)
+static inline bool xstate_all(const struct vcpu *v,
+                              const struct xsave_struct *xsave_area)
 {
     /*
      * XSTATE_FP_SSE may be excluded, because the offsets of XSTATE_FP_SSE
      * (in the legacy region of xsave area) are fixed, so saving
      * XSTATE_FP_SSE will not cause overwriting problem with XSAVES/XSAVEC.
      */
-    return xsave_area_compressed(v->arch.xsave_area) &&
+    return xsave_area_compressed(xsave_area) &&
            (v->arch.xcr0_accum & XSTATE_LAZY & ~XSTATE_FP_SSE);
 }
 
diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c
index aa5c062f7e51..cbe56eba89eb 100644
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -1002,7 +1002,7 @@ int handle_xsetbv(u32 index, u64 new_bv)
             asm ( "stmxcsr %0" : "=m" (xsave_area->fpu_sse.mxcsr) );
             vcpu_unmap_xsave_area(curr, xsave_area);
         }
-        else if ( xstate_all(curr) )
+        else if ( xstate_all(curr, xsave_area) )
         {
             /* See the comment in i387.c:vcpu_restore_fpu_eager(). */
             mask |= XSTATE_LAZY;
-- 
2.47.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.