[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [V3] x86/xsaves: fix overwriting between non-lazy/lazy xsaves



The offset at which components xsaved by xsave[sc] are not fixed.
So when when a save with v->fpu_dirtied set is followed by one
with v->fpu_dirtied clear, non-lazy xsave[sc] may overwriting data
written by the lazy one.

The solution is when xsave[sc] is enabled and taking xcr0_accum into
consideration, if guest has ever used XSTATE_LAZY & ~XSTATE_FP_SSE
(XSTATE_FP_SSE will be excluded beacause xsave will write XSTATE_FP_SSE part
in legacy region of xsave area which is fixed, saving XSTATE_FS_SSE will not
cause overwriting problem), vcpu_xsave_mask will return XSTATE_ALL. Otherwise
vcpu_xsave_mask  will return XSTATE_NONLAZY.

This may cause overhead save on lazy states which will cause performance impact.
As xsavec support code will be cleaned up (reason is list below), so the patch
only take xsaves into consideration.

After doing some performance test on xsavec and xsaveopt(suggested by jan),
the result show xsaveopt performs better than xsavec. This patch will clean
up xsavec suppot code in xen.

Also xsaves will be disabled (xsaves will be used when supervised state is
introduced). Here in this patch do not change xc_cpuid_config_xsave in
tools/libxc/xc_cpuid_x86.c but add some check in hvm_cpuid. Next time
xsaves is needed, only add some code in xstate_init is enough.

Signed-off-by: Shuai Ruan <shuai.ruan@xxxxxxxxxxxxxxx>
Reported-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v3:
1. Add xsavc clean up code and disable xsaves.
2. Add comment on why certain mask should be return in vcpu_xsave_mask.

v2:
add performance impact and next step to do in the description.

 xen/arch/x86/domctl.c  |  2 +-
 xen/arch/x86/hvm/hvm.c |  3 +++
 xen/arch/x86/i387.c    | 14 +++++++++++++-
 xen/arch/x86/xstate.c  | 16 ++++++----------
 4 files changed, 23 insertions(+), 12 deletions(-)

diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 55aecdc..d71541c 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -922,7 +922,7 @@ long arch_do_domctl(
                 ret = -EFAULT;
 
             offset += sizeof(v->arch.xcr0_accum);
-            if ( !ret && (cpu_has_xsaves || cpu_has_xsavec) )
+            if ( !ret && cpu_has_xsaves )
             {
                 void *xsave_area;
 
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 5bc2812..8264ff8 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4762,7 +4762,10 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, 
unsigned int *ebx,
                             *ebx += xstate_sizes[sub_leaf];
             }
             else
+            {
+                *eax &= ~(cpufeat_mask(X86_FEATURE_XSAVES) | 
cpufeat_mask(X86_FEATURE_XSAVEC));
                 *ebx = *ecx = *edx = 0;
+            }
         }
         break;
 
diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index c29d0fa..651b6b8 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -118,7 +118,19 @@ static inline uint64_t vcpu_xsave_mask(const struct vcpu 
*v)
     if ( v->fpu_dirtied )
         return v->arch.nonlazy_xstate_used ? XSTATE_ALL : XSTATE_LAZY;
 
-    return v->arch.nonlazy_xstate_used ? XSTATE_NONLAZY : 0;
+    /*
+     * The offsets of components in the extended region of xsave area xsaved by
+     * xasves are not fixed. This may cause overwriting xsave area  when
+     * v->fpu_dirtied set is followed by one with v->fpu_dirtied clear.
+     * The way solve this problem is taking xcro_accum into consideration.
+     * if guest has ever used lazy states (exclude XSTATE_FP_SSE),
+     * vcpu_xsave_mask will return XSTATE_ALL. Otherwise return XSTATE_NONLAZY.
+     * The reason XSTATE_FP_SSE should be excluded is that the offsets of
+     * XSTATE_FP_SSE (in the legacy region of xsave area) are fixed, saving
+     * XSTATE_FS_SSE using xsaves will not cause overwriting problem.
+     */
+    return cpu_has_xsaves && (v->arch.xcr0_accum & XSTATE_LAZY & 
~XSTATE_FP_SSE) ?
+           XSTATE_ALL : XSTATE_NONLAZY;
 }
 
 /* Save x87 extended state */
diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c
index 8316bd9..c57f3a4 100644
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -165,7 +165,7 @@ void expand_xsave_states(struct vcpu *v, void *dest, 
unsigned int size)
     u64 xstate_bv = xsave->xsave_hdr.xstate_bv;
     u64 valid;
 
-    if ( !cpu_has_xsaves && !cpu_has_xsavec )
+    if ( !cpu_has_xsaves )
     {
         memcpy(dest, xsave, size);
         return;
@@ -206,7 +206,7 @@ void compress_xsave_states(struct vcpu *v, const void *src, 
unsigned int size)
     u64 xstate_bv = ((const struct xsave_struct *)src)->xsave_hdr.xstate_bv;
     u64 valid;
 
-    if ( !cpu_has_xsaves && !cpu_has_xsavec )
+    if ( !cpu_has_xsaves )
     {
         memcpy(xsave, src, size);
         return;
@@ -251,11 +251,9 @@ void xsave(struct vcpu *v, uint64_t mask)
     uint32_t lmask = mask;
     unsigned int fip_width = v->domain->arch.x87_fip_width;
 #define XSAVE(pfx) \
-        alternative_io_3(".byte " pfx "0x0f,0xae,0x27\n", /* xsave */ \
+        alternative_io_2(".byte " pfx "0x0f,0xae,0x27\n", /* xsave */ \
                          ".byte " pfx "0x0f,0xae,0x37\n", /* xsaveopt */ \
                          X86_FEATURE_XSAVEOPT, \
-                         ".byte " pfx "0x0f,0xc7,0x27\n", /* xsavec */ \
-                         X86_FEATURE_XSAVEC, \
                          ".byte " pfx "0x0f,0xc7,0x2f\n", /* xsaves */ \
                          X86_FEATURE_XSAVES, \
                          "=m" (*ptr), \
@@ -409,7 +407,7 @@ void xrstor(struct vcpu *v, uint64_t mask)
                   ((mask & XSTATE_YMM) &&
                    !(ptr->xsave_hdr.xcomp_bv & XSTATE_COMPACTION_ENABLED))) )
                 ptr->fpu_sse.mxcsr &= mxcsr_mask;
-            if ( cpu_has_xsaves || cpu_has_xsavec )
+            if ( cpu_has_xsaves )
             {
                 ptr->xsave_hdr.xcomp_bv &= this_cpu(xcr0) | this_cpu(xss);
                 ptr->xsave_hdr.xstate_bv &= ptr->xsave_hdr.xcomp_bv;
@@ -565,9 +563,7 @@ void xstate_init(struct cpuinfo_x86 *c)
 
     /* Mask out features not currently understood by Xen. */
     eax &= (cpufeat_mask(X86_FEATURE_XSAVEOPT) |
-            cpufeat_mask(X86_FEATURE_XSAVEC) |
-            cpufeat_mask(X86_FEATURE_XGETBV1) |
-            cpufeat_mask(X86_FEATURE_XSAVES));
+            cpufeat_mask(X86_FEATURE_XGETBV1));
 
     c->x86_capability[cpufeat_word(X86_FEATURE_XSAVEOPT)] = eax;
 
@@ -575,7 +571,7 @@ void xstate_init(struct cpuinfo_x86 *c)
 
     if ( setup_xstate_features(bsp) && bsp )
         BUG();
-    if ( bsp && (cpu_has_xsaves || cpu_has_xsavec) )
+    if ( bsp && cpu_has_xsaves )
         setup_xstate_comp();
 }
 
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.