[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 10/12] xenpaging: handle HVMCOPY_gfn_paged_out in copy_from/to_user


  • To: xen-devel@xxxxxxxxxxxxxxxxxxx
  • From: Olaf Hering <olaf@xxxxxxxxx>
  • Date: Mon, 10 Jan 2011 17:43:55 +0100
  • Delivery-date: Mon, 10 Jan 2011 08:54:55 -0800
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>

copy_from_user_hvm can fail when __hvm_copy returns
HVMCOPY_gfn_paged_out for a referenced gfn, for example during guests
pagetable walk.  This has to be handled in some way.

For the time being, return -EAGAIN for the most common case (xen_balloon
driver crashing in guest) until the recently added waitqueues will be
used.

Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>

---
 xen/arch/x86/hvm/hvm.c |    4 ++++
 xen/common/memory.c    |   39 ++++++++++++++++++++++++++++++++++-----
 2 files changed, 38 insertions(+), 5 deletions(-)

--- xen-unstable.hg-4.1.22697.orig/xen/arch/x86/hvm/hvm.c
+++ xen-unstable.hg-4.1.22697/xen/arch/x86/hvm/hvm.c
@@ -2154,6 +2154,8 @@ unsigned long copy_to_user_hvm(void *to,
 
     rc = hvm_copy_to_guest_virt_nofault((unsigned long)to, (void *)from,
                                         len, 0);
+    if ( unlikely(rc == HVMCOPY_gfn_paged_out) )
+        return -EAGAIN;
     return rc ? len : 0; /* fake a copy_to_user() return code */
 }
 
@@ -2171,6 +2173,8 @@ unsigned long copy_from_user_hvm(void *t
 #endif
 
     rc = hvm_copy_from_guest_virt_nofault(to, (unsigned long)from, len, 0);
+    if ( unlikely(rc == HVMCOPY_gfn_paged_out) )
+        return -EAGAIN;
     return rc ? len : 0; /* fake a copy_from_user() return code */
 }
 
--- xen-unstable.hg-4.1.22697.orig/xen/common/memory.c
+++ xen-unstable.hg-4.1.22697/xen/common/memory.c
@@ -48,6 +48,7 @@ static void increase_reservation(struct
 {
     struct page_info *page;
     unsigned long i;
+    unsigned long ctg_ret;
     xen_pfn_t mfn;
     struct domain *d = a->domain;
 
@@ -81,8 +82,13 @@ static void increase_reservation(struct
         if ( !guest_handle_is_null(a->extent_list) )
         {
             mfn = page_to_mfn(page);
-            if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
+            ctg_ret = __copy_to_guest_offset(a->extent_list, i, &mfn, 1);
+            if ( unlikely(ctg_ret) )
+            {
+                if ( (long)ctg_ret == -EAGAIN )
+                    a->preempted = 1;
                 goto out;
+            }
         }
     }
 
@@ -94,6 +100,7 @@ static void populate_physmap(struct memo
 {
     struct page_info *page;
     unsigned long i, j;
+    unsigned long cftg_ret;
     xen_pfn_t gpfn, mfn;
     struct domain *d = a->domain;
 
@@ -112,8 +119,13 @@ static void populate_physmap(struct memo
             goto out;
         }
 
-        if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
+        cftg_ret = __copy_from_guest_offset(&gpfn, a->extent_list, i, 1);
+        if ( unlikely(cftg_ret) )
+        {
+            if ( (long)cftg_ret == -EAGAIN )
+                a->preempted = 1;
             goto out;
+        }
 
         if ( a->memflags & MEMF_populate_on_demand )
         {
@@ -143,8 +155,13 @@ static void populate_physmap(struct memo
                     set_gpfn_from_mfn(mfn + j, gpfn + j);
 
                 /* Inform the domain of the new page's machine address. */ 
-                if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 
1)) )
+                cftg_ret = __copy_to_guest_offset(a->extent_list, i, &mfn, 1);
+                if ( unlikely(cftg_ret) )
+                {
+                    if ( (long)cftg_ret == -EAGAIN )
+                        a->preempted = 1;
                     goto out;
+                }
             }
         }
     }
@@ -213,6 +230,7 @@ int guest_remove_page(struct domain *d,
 static void decrease_reservation(struct memop_args *a)
 {
     unsigned long i, j;
+    unsigned long cfg_ret;
     xen_pfn_t gmfn;
 
     if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
@@ -227,8 +245,13 @@ static void decrease_reservation(struct
             goto out;
         }
 
-        if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) )
+        cfg_ret = __copy_from_guest_offset(&gmfn, a->extent_list, i, 1);
+        if ( unlikely(cfg_ret) )
+        {
+            if ( (long)cfg_ret == -EAGAIN )
+                a->preempted = 1;
             goto out;
+        }
 
         if ( tb_init_done )
         {
@@ -509,6 +532,7 @@ long do_memory_op(unsigned long cmd, XEN
     int rc, op;
     unsigned int address_bits;
     unsigned long start_extent;
+    unsigned long cfg_ret;
     struct xen_memory_reservation reservation;
     struct memop_args args;
     domid_t domid;
@@ -522,8 +546,13 @@ long do_memory_op(unsigned long cmd, XEN
     case XENMEM_populate_physmap:
         start_extent = cmd >> MEMOP_EXTENT_SHIFT;
 
-        if ( copy_from_guest(&reservation, arg, 1) )
+       cfg_ret = copy_from_guest(&reservation, arg, 1);
+       if ( unlikely(cfg_ret) )
+        {
+            if ( (long)cfg_ret == -EAGAIN )
+                return hypercall_create_continuation(__HYPERVISOR_memory_op, 
"lh", cmd, arg);
             return start_extent;
+        }
 
         /* Is size too large for us to encode a continuation? */
         if ( reservation.nr_extents > (ULONG_MAX >> MEMOP_EXTENT_SHIFT) )


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.