[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 5/6] x86/domctl: Fix migration of guests which are not using xsave



c/s da62246e "x86/xsaves: enable xsaves/xrstors/xsavec in xen" broke migration
of PV guests which were not using xsave.

In such a case, compress_xsave_states() gets passed a zero length buffer.  The
first thing it tries to do is ASSERT() on user-provided data, if it hadn't
already wandered off the end of the buffer to do so.

Perform more verification of the input buffer before passing it to
compress_xsave_states().  This involves making xsave_area_compressed() public.

Similar problems exist on the HVM side, so make equivalent adjustments there.
This doesn't manifest in general, as hvm_save_cpu_xsave_states() elides the
entire record if xsave isn't used, but is a problem if a caller were to
construct an xsave record manually.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Reviewed-by: Jan Beulich <JBeulich@xxxxxxxx>
---
v2:
 * Use a shadow variable in hvm_load_cpu_xsave_states() rather than mutating
   the stream itself.  Make desc const while at it.
 * Alter the alignment of xsave_area_compressed()
---
 xen/arch/x86/domctl.c        | 12 +++++++++---
 xen/arch/x86/hvm/hvm.c       | 27 ++++++++++++++++++++++-----
 xen/arch/x86/xstate.c        |  6 ------
 xen/include/asm-x86/xstate.h |  6 ++++++
 4 files changed, 37 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 5aa9f3a..2a2fe04 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1158,7 +1158,15 @@ long arch_do_domctl(
                 goto vcpuextstate_out;
             }
 
-            if ( evc->size <= PV_XSAVE_SIZE(_xcr0_accum) )
+            if ( evc->size == PV_XSAVE_HDR_SIZE )
+                ; /* Nothing to restore. */
+            else if ( evc->size < PV_XSAVE_HDR_SIZE + XSTATE_AREA_MIN_SIZE )
+                ret = -EINVAL; /* Can't be legitimate data. */
+            else if ( xsave_area_compressed(_xsave_area) )
+                ret = -EOPNOTSUPP; /* Don't support compressed data. */
+            else if ( evc->size != PV_XSAVE_SIZE(_xcr0_accum) )
+                ret = -EINVAL; /* Not legitimate data. */
+            else
             {
                 vcpu_pause(v);
                 v->arch.xcr0 = _xcr0;
@@ -1169,8 +1177,6 @@ long arch_do_domctl(
                                       evc->size - PV_XSAVE_HDR_SIZE);
                 vcpu_unpause(v);
             }
-            else
-                ret = -EINVAL;
 
             xfree(receive_buf);
         }
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index ca96643..7bad845 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1265,8 +1265,8 @@ static int hvm_load_cpu_xsave_states(struct domain *d, 
hvm_domain_context_t *h)
     int err;
     struct vcpu *v;
     struct hvm_hw_cpu_xsave *ctxt;
-    struct hvm_save_descriptor *desc;
-    unsigned int i, desc_start;
+    const struct hvm_save_descriptor *desc;
+    unsigned int i, desc_start, desc_length;
 
     /* Which vcpu is this? */
     vcpuid = hvm_load_instance(h);
@@ -1325,7 +1325,8 @@ static int hvm_load_cpu_xsave_states(struct domain *d, 
hvm_domain_context_t *h)
         return err;
     }
     size = HVM_CPU_XSAVE_SIZE(ctxt->xcr0_accum);
-    if ( desc->length > size )
+    desc_length = desc->length;
+    if ( desc_length > size )
     {
         /*
          * Xen 4.3.0, 4.2.3 and older used to send longer-than-needed
@@ -1345,6 +1346,23 @@ static int hvm_load_cpu_xsave_states(struct domain *d, 
hvm_domain_context_t *h)
         printk(XENLOG_G_WARNING
                "HVM%d.%u restore mismatch: xsave length %#x > %#x\n",
                d->domain_id, vcpuid, desc->length, size);
+        /* Rewind desc_length to ignore the extraneous zeros. */
+        desc_length = size;
+    }
+
+    if ( xsave_area_compressed((const void *)&ctxt->save_area) )
+    {
+        printk(XENLOG_G_WARNING
+               "HVM%d.%u restore: compressed xsave state not supported\n",
+               d->domain_id, vcpuid);
+        return -EOPNOTSUPP;
+    }
+    else if ( desc_length != size )
+    {
+        printk(XENLOG_G_WARNING
+               "HVM%d.%u restore mismatch: xsave length %#x != %#x\n",
+               d->domain_id, vcpuid, desc_length, size);
+        return -EINVAL;
     }
     /* Checking finished */
 
@@ -1353,8 +1371,7 @@ static int hvm_load_cpu_xsave_states(struct domain *d, 
hvm_domain_context_t *h)
     if ( ctxt->xcr0_accum & XSTATE_NONLAZY )
         v->arch.nonlazy_xstate_used = 1;
     compress_xsave_states(v, &ctxt->save_area,
-                          min(desc->length, size) -
-                          offsetof(struct hvm_hw_cpu_xsave,save_area));
+                          size - offsetof(struct hvm_hw_cpu_xsave, save_area));
 
     return 0;
 }
diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c
index 2684190..ed9c4c7 100644
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -86,12 +86,6 @@ uint64_t get_msr_xss(void)
     return this_cpu(xss);
 }
 
-static bool_t xsave_area_compressed(const struct xsave_struct *xsave_area)
-{
-     return xsave_area && (xsave_area->xsave_hdr.xcomp_bv
-                           & XSTATE_COMPACTION_ENABLED);
-}
-
 static int setup_xstate_features(bool_t bsp)
 {
     unsigned int leaf, eax, ebx, ecx, edx;
diff --git a/xen/include/asm-x86/xstate.h b/xen/include/asm-x86/xstate.h
index 51a9ed4..45b2a9b 100644
--- a/xen/include/asm-x86/xstate.h
+++ b/xen/include/asm-x86/xstate.h
@@ -131,4 +131,10 @@ static inline bool_t xstate_all(const struct vcpu *v)
            (v->arch.xcr0_accum & XSTATE_LAZY & ~XSTATE_FP_SSE);
 }
 
+static inline bool __nonnull(1)
+xsave_area_compressed(const struct xsave_struct *xsave_area)
+{
+    return xsave_area->xsave_hdr.xcomp_bv & XSTATE_COMPACTION_ENABLED;
+}
+
 #endif /* __ASM_XSTATE_H */
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.