[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [XEN] memory_op hypercall does not use guest_handle_add_offset().



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID 5cdd4da17036cb5c5a9353a0d341c18279706c5e
# Parent  f5b98471d6ffdf3714acdf5a878e4cf31149c369
[XEN] memory_op hypercall does not use guest_handle_add_offset().

It was causing compatibility issues across architectures as
on x86 the effect would not persist across a continuation. On
x86/64 and powerpc (both of which use xencomm) the effect would
persist. This patch sidesteps the whole issue.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/common/memory.c |  289 +++++++++++++++++++++++-----------------------------
 1 files changed, 131 insertions(+), 158 deletions(-)

diff -r f5b98471d6ff -r 5cdd4da17036 xen/common/memory.c
--- a/xen/common/memory.c       Fri Nov 10 13:09:01 2006 +0000
+++ b/xen/common/memory.c       Fri Nov 10 14:22:17 2006 +0000
@@ -29,98 +29,105 @@
  */
 #define START_EXTENT_SHIFT 4 /* cmd[:4] == start_extent */
 
-static long
-increase_reservation(
-    struct domain *d, 
-    XEN_GUEST_HANDLE(xen_pfn_t) extent_list,
-    unsigned int   nr_extents,
-    unsigned int   extent_order,
-    unsigned int   memflags,
-    int           *preempted)
+struct memop_args {
+    /* INPUT */
+    struct domain *domain;     /* Domain to be affected. */
+    XEN_GUEST_HANDLE(xen_pfn_t) extent_list; /* List of extent base addrs. */
+    unsigned int nr_extents;   /* Number of extents to allocate or free. */
+    unsigned int extent_order; /* Size of each extent. */
+    unsigned int memflags;     /* Allocation flags. */
+
+    /* INPUT/OUTPUT */
+    unsigned int nr_done;    /* Number of extents processed so far. */
+    int          preempted;  /* Was the hypercall preempted? */
+};
+
+static unsigned int select_local_cpu(struct domain *d)
+{
+    struct vcpu *v = d->vcpu[0];
+    return (v ? v->processor : 0);
+}
+
+static void increase_reservation(struct memop_args *a)
 {
     struct page_info *page;
     unsigned long i;
     xen_pfn_t mfn;
-    /* use domain's first processor for locality parameter */
-    unsigned int cpu = d->vcpu[0]->processor;
-
-    if ( !guest_handle_is_null(extent_list) &&
-         !guest_handle_okay(extent_list, nr_extents) )
-        return 0;
-
-    if ( (extent_order != 0) &&
+    struct domain *d = a->domain;
+    unsigned int cpu = select_local_cpu(d);
+
+    if ( !guest_handle_is_null(a->extent_list) &&
+         !guest_handle_okay(a->extent_list, a->nr_extents) )
+        return;
+
+    if ( (a->extent_order != 0) &&
          !multipage_allocation_permitted(current->domain) )
-        return 0;
-
-    for ( i = 0; i < nr_extents; i++ )
+        return;
+
+    for ( i = a->nr_done; i < a->nr_extents; i++ )
     {
         if ( hypercall_preempt_check() )
         {
-            *preempted = 1;
-            return i;
-        }
-
-        if ( unlikely((page = __alloc_domheap_pages( d, cpu, 
-            extent_order, memflags )) == NULL) ) 
+            a->preempted = 1;
+            goto out;
+        }
+
+        page = __alloc_domheap_pages(d, cpu, a->extent_order, a->memflags);
+        if ( unlikely(page == NULL) ) 
         {
             gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
                     "id=%d memflags=%x (%ld of %d)\n",
-                    extent_order, d->domain_id, memflags, i, nr_extents);
-            return i;
+                     a->extent_order, d->domain_id, a->memflags,
+                     i, a->nr_extents);
+            goto out;
         }
 
         /* Inform the domain of the new page's machine address. */ 
-        if ( !guest_handle_is_null(extent_list) )
+        if ( !guest_handle_is_null(a->extent_list) )
         {
             mfn = page_to_mfn(page);
-            if ( unlikely(__copy_to_guest_offset(extent_list, i, &mfn, 1)) )
-                return i;
-        }
-    }
-
-    return nr_extents;
-}
-
-static long
-populate_physmap(
-    struct domain *d, 
-    XEN_GUEST_HANDLE(xen_pfn_t) extent_list,
-    unsigned int  nr_extents,
-    unsigned int  extent_order,
-    unsigned int  memflags,
-    int          *preempted)
+            if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
+                goto out;
+        }
+    }
+
+ out:
+    a->nr_done = i;
+}
+
+static void populate_physmap(struct memop_args *a)
 {
     struct page_info *page;
     unsigned long i, j;
-    xen_pfn_t gpfn;
-    xen_pfn_t mfn;
-    /* use domain's first processor for locality parameter */
-    unsigned int cpu = d->vcpu[0]->processor;
-
-    if ( !guest_handle_okay(extent_list, nr_extents) )
-        return 0;
-
-    if ( (extent_order != 0) &&
+    xen_pfn_t gpfn, mfn;
+    struct domain *d = a->domain;
+    unsigned int cpu = select_local_cpu(d);
+
+    if ( !guest_handle_okay(a->extent_list, a->nr_extents) )
+        return;
+
+    if ( (a->extent_order != 0) &&
          !multipage_allocation_permitted(current->domain) )
-        return 0;
-
-    for ( i = 0; i < nr_extents; i++ )
+        return;
+
+    for ( i = a->nr_done; i < a->nr_extents; i++ )
     {
         if ( hypercall_preempt_check() )
         {
-            *preempted = 1;
-            goto out;
-        }
-
-        if ( unlikely(__copy_from_guest_offset(&gpfn, extent_list, i, 1)) )
-            goto out;
-
-        if ( unlikely((page = __alloc_domheap_pages( d, cpu, 
-            extent_order, memflags )) == NULL) ) 
+            a->preempted = 1;
+            goto out;
+        }
+
+        if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
+            goto out;
+
+        page = __alloc_domheap_pages(d, cpu, a->extent_order, a->memflags);
+        if ( unlikely(page == NULL) ) 
         {
             gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
-                    "id=%d memflags=%x (%ld of %d)\n",
-                    extent_order, d->domain_id, memflags, i, nr_extents);
+                     "id=%d memflags=%x (%ld of %d)\n",
+                     a->extent_order, d->domain_id, a->memflags,
+                     i, a->nr_extents);
             goto out;
         }
 
@@ -128,28 +135,25 @@ populate_physmap(
 
         if ( unlikely(shadow_mode_translate(d)) )
         {
-            for ( j = 0; j < (1 << extent_order); j++ )
+            for ( j = 0; j < (1 << a->extent_order); j++ )
                 guest_physmap_add_page(d, gpfn + j, mfn + j);
         }
         else
         {
-            for ( j = 0; j < (1 << extent_order); j++ )
+            for ( j = 0; j < (1 << a->extent_order); j++ )
                 set_gpfn_from_mfn(mfn + j, gpfn + j);
 
             /* Inform the domain of the new page's machine address. */ 
-            if ( unlikely(__copy_to_guest_offset(extent_list, i, &mfn, 1)) )
+            if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
                 goto out;
         }
     }
 
  out:
-    return i;
-}
-
-int
-guest_remove_page(
-    struct domain *d,
-    unsigned long gmfn)
+    a->nr_done = i;
+}
+
+int guest_remove_page(struct domain *d, unsigned long gmfn)
 {
     struct page_info *page;
     unsigned long mfn;
@@ -191,43 +195,35 @@ guest_remove_page(
     return 1;
 }
 
-static long
-decrease_reservation(
-    struct domain *d,
-    XEN_GUEST_HANDLE(xen_pfn_t) extent_list,
-    unsigned int   nr_extents,
-    unsigned int   extent_order,
-    int           *preempted)
+static void decrease_reservation(struct memop_args *a)
 {
     unsigned long i, j;
     xen_pfn_t gmfn;
 
-    if ( !guest_handle_okay(extent_list, nr_extents) )
-        return 0;
-
-    for ( i = 0; i < nr_extents; i++ )
+    if ( !guest_handle_okay(a->extent_list, a->nr_extents) )
+        return;
+
+    for ( i = a->nr_done; i < a->nr_extents; i++ )
     {
         if ( hypercall_preempt_check() )
         {
-            *preempted = 1;
-            return i;
-        }
-
-        if ( unlikely(__copy_from_guest_offset(&gmfn, extent_list, i, 1)) )
-            return i;
-
-        for ( j = 0; j < (1 << extent_order); j++ )
-        {
-            if ( !guest_remove_page(d, gmfn + j) )
-                return i;
-        }
-    }
-
-    return nr_extents;
-}
-
-static long
-translate_gpfn_list(
+            a->preempted = 1;
+            goto out;
+        }
+
+        if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) )
+            goto out;
+
+        for ( j = 0; j < (1 << a->extent_order); j++ )
+            if ( !guest_remove_page(a->domain, gmfn + j) )
+                goto out;
+    }
+
+ out:
+    a->nr_done = i;
+}
+
+static long translate_gpfn_list(
     XEN_GUEST_HANDLE(xen_translate_gpfn_list_t) uop, unsigned long *progress)
 {
     struct xen_translate_gpfn_list op;
@@ -289,8 +285,7 @@ translate_gpfn_list(
     return 0;
 }
 
-static long
-memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg)
+static long memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg)
 {
     struct xen_memory_exchange exch;
     LIST_HEAD(in_chunk_list);
@@ -341,24 +336,15 @@ memory_exchange(XEN_GUEST_HANDLE(xen_mem
         memflags = MEMF_dma;
     }
 
-    guest_handle_add_offset(exch.in.extent_start, exch.nr_exchanged);
-    exch.in.nr_extents -= exch.nr_exchanged;
-
     if ( exch.in.extent_order <= exch.out.extent_order )
     {
         in_chunk_order  = exch.out.extent_order - exch.in.extent_order;
         out_chunk_order = 0;
-        guest_handle_add_offset(
-            exch.out.extent_start, exch.nr_exchanged >> in_chunk_order);
-        exch.out.nr_extents -= exch.nr_exchanged >> in_chunk_order;
     }
     else
     {
         in_chunk_order  = 0;
         out_chunk_order = exch.in.extent_order - exch.out.extent_order;
-        guest_handle_add_offset(
-            exch.out.extent_start, exch.nr_exchanged << out_chunk_order);
-        exch.out.nr_extents -= exch.nr_exchanged << out_chunk_order;
     }
 
     /*
@@ -372,14 +358,15 @@ memory_exchange(XEN_GUEST_HANDLE(xen_mem
     }
     d = current->domain;
 
-    /* use domain's first processor for locality parameter */
-    cpu = d->vcpu[0]->processor;
-
-    for ( i = 0; i < (exch.in.nr_extents >> in_chunk_order); i++ )
+    cpu = select_local_cpu(d);
+
+    for ( i = (exch.nr_exchanged >> in_chunk_order);
+          i < (exch.in.nr_extents >> in_chunk_order);
+          i++ )
     {
         if ( hypercall_preempt_check() )
         {
-            exch.nr_exchanged += i << in_chunk_order;
+            exch.nr_exchanged = i << in_chunk_order;
             if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
                 return -EFAULT;
             return hypercall_create_continuation(
@@ -420,8 +407,8 @@ memory_exchange(XEN_GUEST_HANDLE(xen_mem
         /* Allocate a chunk's worth of anonymous output pages. */
         for ( j = 0; j < (1UL << out_chunk_order); j++ )
         {
-            page = __alloc_domheap_pages( NULL, cpu, 
-                  exch.out.extent_order, memflags);
+            page = __alloc_domheap_pages(
+                NULL, cpu, exch.out.extent_order, memflags);
             if ( unlikely(page == NULL) )
             {
                 rc = -ENOMEM;
@@ -480,7 +467,7 @@ memory_exchange(XEN_GUEST_HANDLE(xen_mem
         BUG_ON(j != (1UL << out_chunk_order));
     }
 
-    exch.nr_exchanged += exch.in.nr_extents;
+    exch.nr_exchanged = exch.in.nr_extents;
     if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
         rc = -EFAULT;
     return rc;
@@ -507,7 +494,7 @@ memory_exchange(XEN_GUEST_HANDLE(xen_mem
         free_domheap_pages(page, exch.out.extent_order);
     }
 
-    exch.nr_exchanged += i << in_chunk_order;
+    exch.nr_exchanged = i << in_chunk_order;
 
  fail_early:
     if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
@@ -518,10 +505,10 @@ long do_memory_op(unsigned long cmd, XEN
 long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE(void) arg)
 {
     struct domain *d;
-    int rc, op, preempted = 0;
-    unsigned int memflags = 0;
+    int rc, op;
     unsigned long start_extent, progress;
     struct xen_memory_reservation reservation;
+    struct memop_args args;
     domid_t domid;
 
     op = cmd & ((1 << START_EXTENT_SHIFT) - 1);
@@ -543,9 +530,12 @@ long do_memory_op(unsigned long cmd, XEN
         if ( unlikely(start_extent > reservation.nr_extents) )
             return start_extent;
 
-        if ( !guest_handle_is_null(reservation.extent_start) )
-            guest_handle_add_offset(reservation.extent_start, start_extent);
-        reservation.nr_extents -= start_extent;
+        args.extent_list  = reservation.extent_start;
+        args.nr_extents   = reservation.nr_extents;
+        args.extent_order = reservation.extent_order;
+        args.nr_done      = start_extent;
+        args.preempted    = 0;
+        args.memflags     = 0;
 
         if ( (reservation.address_bits != 0) &&
              (reservation.address_bits <
@@ -553,7 +543,7 @@ long do_memory_op(unsigned long cmd, XEN
         {
             if ( reservation.address_bits < 31 )
                 return start_extent;
-            memflags = MEMF_dma;
+            args.memflags = MEMF_dma;
         }
 
         if ( likely(reservation.domid == DOMID_SELF) )
@@ -561,44 +551,27 @@ long do_memory_op(unsigned long cmd, XEN
         else if ( !IS_PRIV(current->domain) ||
                   ((d = find_domain_by_id(reservation.domid)) == NULL) )
             return start_extent;
+        args.domain = d;
 
         switch ( op )
         {
         case XENMEM_increase_reservation:
-            rc = increase_reservation(
-                d,
-                reservation.extent_start,
-                reservation.nr_extents,
-                reservation.extent_order,
-                memflags,
-                &preempted);
+            increase_reservation(&args);
             break;
         case XENMEM_decrease_reservation:
-            rc = decrease_reservation(
-                d,
-                reservation.extent_start,
-                reservation.nr_extents,
-                reservation.extent_order,
-                &preempted);
+            decrease_reservation(&args);
             break;
-        case XENMEM_populate_physmap:
-        default:
-            rc = populate_physmap(
-                d,
-                reservation.extent_start,
-                reservation.nr_extents,
-                reservation.extent_order,
-                memflags,
-                &preempted);
+        default: /* XENMEM_populate_physmap */
+            populate_physmap(&args);
             break;
         }
 
         if ( unlikely(reservation.domid != DOMID_SELF) )
             put_domain(d);
 
-        rc += start_extent;
-
-        if ( preempted )
+        rc = args.nr_done;
+
+        if ( args.preempted )
             return hypercall_create_continuation(
                 __HYPERVISOR_memory_op, "lh",
                 op | (rc << START_EXTENT_SHIFT), arg);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.