[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v4 1/6] xen: improve changes to xen_add_to_physmap



On Mon, 2012-09-17 at 11:54 +0100, Stefano Stabellini wrote:
> On Sat, 15 Sep 2012, Mukesh Rathor wrote:
> > On Fri, 14 Sep 2012 13:59:47 +0100
> > Ian Campbell <Ian.Campbell@xxxxxxxxxx> wrote:
> > 
> > > 
> > > On Wed, 2012-08-22 at 12:08 +0100, Stefano Stabellini wrote:
> > > > This is an incremental patch on top of
> > > > c0bc926083b5987a3e9944eec2c12ad0580100e2: in order to retain binary
> > > > compatibility, it is better to introduce foreign_domid as part of a
> > > > union containing both size and foreign_domid.
> > > [...]
> > > > -    domid_t foreign_domid; /* IFF gmfn_foreign */
> > > > +    unsigned int space;
> > > >  
> > > >  #define XENMAPIDX_grant_table_status 0x80000000
> > > 
> > > Was this the final consensus on what this interface ought to look
> > > like?
> > > 
> > > Does it work for PVH too (Mukesh CCd)?
> > 
> > Yes it does. Please lmk if the final version asap so I can put in
> > my patch, and also test it.
> 
> It the final version. I think it should go in, unless we want to drop it
> entirely in favor of xen_add_to_physmap_range.

Here is what I came up with on the ARM side. I'd image the X86 version
would not be that different.

Processing the batch in reverse order allows for continuations without
needing an explicit pdone type thing. Still seems a bit wrong somehow.

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index aec57e5..7d6101c 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -26,6 +26,8 @@
 #include <xen/preempt.h>
 #include <xen/errno.h>
 #include <xen/grant_table.h>
+#include <xen/softirq.h>
+#include <xen/event.h>
 #include <xen/guest_access.h>
 #include <xen/domain_page.h>
 #include <asm/page.h>
@@ -462,14 +464,17 @@ void share_xen_page_with_guest(struct page_info *page,
     spin_unlock(&d->page_alloc_lock);
 }
 
-static int xenmem_add_to_physmap_once(
+static int xenmem_add_to_physmap_one(
     struct domain *d,
-    const struct xen_add_to_physmap *xatp)
+    uint16_t space,
+    domid_t foreign_domid,
+    unsigned long idx,
+    xen_pfn_t gpfn)
 {
-    unsigned long mfn = 0, idx = 0;
+    unsigned long mfn = 0;
     int rc;
 
-    switch ( xatp->space )
+    switch ( space )
     {
     case XENMAPSPACE_grant_table:
         spin_lock(&d->grant_table->lock);
@@ -477,9 +482,8 @@ static int xenmem_add_to_physmap_once(
         if ( d->grant_table->gt_version == 0 )
             d->grant_table->gt_version = 1;
 
-        idx = xatp->idx;
         if ( d->grant_table->gt_version == 2 &&
-                (xatp->idx & XENMAPIDX_grant_table_status) )
+                (idx & XENMAPIDX_grant_table_status) )
         {
             idx &= ~XENMAPIDX_grant_table_status;
             if ( idx < nr_status_frames(d->grant_table) )
@@ -498,33 +502,31 @@ static int xenmem_add_to_physmap_once(
         spin_unlock(&d->grant_table->lock);
         break;
     case XENMAPSPACE_shared_info:
-        if ( xatp->idx == 0 )
+        if ( idx == 0 )
             mfn = virt_to_mfn(d->shared_info);
         break;
     case XENMAPSPACE_gmfn_foreign:
     {
         paddr_t maddr;
         struct domain *od;
-        static int x = 0;
-        rc = rcu_lock_target_domain_by_id(xatp->foreign_domid, &od);
+        rc = rcu_lock_target_domain_by_id(foreign_domid, &od);
         if ( rc < 0 )
             return rc;
-        maddr = p2m_lookup(od, xatp->idx << PAGE_SHIFT);
+
+        maddr = p2m_lookup(od, idx << PAGE_SHIFT);
         if ( maddr == INVALID_PADDR )
         {
-            printk("bad p2m lookup\n");
-            dump_p2m_lookup(od, xatp->idx << PAGE_SHIFT);
+            dump_p2m_lookup(od, idx << PAGE_SHIFT);
             rcu_unlock_domain(od);
             return -EINVAL;
         }
+
         mfn = maddr >> PAGE_SHIFT;
-       if (x && x--) {
-            printk("Mapping dom%d mfn 0x%lx to dom%d mfn 0x%"PRIpaddr"\n",
-                   od->domain_id, mfn, d->domain_id, xatp->gpfn);
-        }
+
         rcu_unlock_domain(od);
         break;
     }
+
     default:
         return -ENOSYS;
     }
@@ -532,19 +534,51 @@ static int xenmem_add_to_physmap_once(
     domain_lock(d);
 
     /* Map at new location. */
-    rc = guest_physmap_add_page(d, xatp->gpfn, mfn, 0);
-    if ( 0 && xatp->space == XENMAPSPACE_gmfn_foreign )
-        dump_p2m_lookup(d, xatp->gpfn << PAGE_SHIFT);
+    rc = guest_physmap_add_page(d, gpfn, mfn, 0);
 
     domain_unlock(d);
 
     return rc;
 }
 
-static int xenmem_add_to_physmap(struct domain *d,
-                                 struct xen_add_to_physmap *xatp)
+static int xenmem_add_to_physmap_range(struct domain *d,
+                                       struct xen_add_to_physmap_range *xatpr)
 {
-    return xenmem_add_to_physmap_once(d, xatp);
+    int rc;
+
+    /* Process entries in reverse order to allow continuations */
+    while ( xatpr->size > 0 )
+    {
+        xen_ulong_t idx;
+        xen_pfn_t gpfn;
+
+        rc = copy_from_guest_offset(&idx, xatpr->idxs, xatpr->size-1, 1);
+        if ( rc < 0 )
+            goto out;
+
+        rc = copy_from_guest_offset(&gpfn, xatpr->gpfns, xatpr->size-1, 1);
+        if ( rc < 0 )
+            goto out;
+
+        rc = xenmem_add_to_physmap_one(d, xatpr->space,
+                                       xatpr->foreign_domid,
+                                       idx, gpfn);
+
+        xatpr->size--;
+
+        /* Check for continuation if it's not the last interation */
+        if ( xatpr->size > 0 && hypercall_preempt_check() )
+        {
+            rc = -EAGAIN;
+            goto out;
+        }
+    }
+
+    rc = 0;
+
+out:
+    return rc;
+
 }
 
 long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
@@ -565,13 +599,41 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
         if ( rc != 0 )
             return rc;
 
-        rc = xenmem_add_to_physmap(d, &xatp);
+        rc = xenmem_add_to_physmap_one(d, xatp.space,
+                                       xatp.foreign_domid,
+                                       xatp.idx, xatp.gpfn);
 
         rcu_unlock_domain(d);
 
         return rc;
     }
 
+    case XENMEM_add_to_physmap_range:
+    {
+        struct xen_add_to_physmap_range xatpr;
+        struct domain *d;
+
+        if ( copy_from_guest(&xatpr, arg, 1) )
+            return -EFAULT;
+
+        rc = rcu_lock_target_domain_by_id(xatpr.domid, &d);
+        if ( rc != 0 )
+            return rc;
+
+        rc = xenmem_add_to_physmap_range(d, &xatpr);
+
+        rcu_unlock_domain(d);
+
+        if ( rc && copy_to_guest(arg, &xatpr, 1) )
+            rc = -EFAULT;
+
+        if ( rc == -EAGAIN )
+            rc = hypercall_create_continuation(
+                __HYPERVISOR_memory_op, "ih", op, arg);
+
+        return rc;
+    }
+
     default:
         return -ENOSYS;
     }
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index b2adfbe..1cc7a80 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -198,6 +198,15 @@ struct xen_machphys_mapping {
 typedef struct xen_machphys_mapping xen_machphys_mapping_t;
 DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t);
 
+/* Source mapping space. */
+/* ` enum phys_map_space { */
+#define XENMAPSPACE_shared_info  0 /* shared info page */
+#define XENMAPSPACE_grant_table  1 /* grant table page */
+#define XENMAPSPACE_gmfn         2 /* GMFN */
+#define XENMAPSPACE_gmfn_range   3 /* GMFN range */
+#define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom */
+/* ` } */
+
 /*
  * Sets the GPFN at which a particular page appears in the specified guest's
  * pseudophysical address space.
@@ -211,26 +220,40 @@ struct xen_add_to_physmap {
     /* Number of pages to go through for gmfn_range */
     uint16_t    size;
 
-    /* Source mapping space. */
-#define XENMAPSPACE_shared_info  0 /* shared info page */
-#define XENMAPSPACE_grant_table  1 /* grant table page */
-#define XENMAPSPACE_gmfn         2 /* GMFN */
-#define XENMAPSPACE_gmfn_range   3 /* GMFN range */
-#define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another guest */
-    uint16_t space;
+    uint16_t space; /* => enum phys_map_space */
     domid_t foreign_domid; /* IFF gmfn_foreign */
 
 #define XENMAPIDX_grant_table_status 0x80000000
 
-    /* Index into source mapping space. */
+    /* Index into space being mapped. */
     xen_ulong_t idx;
 
-    /* GPFN where the source mapping page should appear. */
+    /* GPFN in domid where the source mapping page should appear. */
     xen_pfn_t     gpfn;
 };
 typedef struct xen_add_to_physmap xen_add_to_physmap_t;
 DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
 
+/* A batched version of add_to_physmap. */
+#define XENMEM_add_to_physmap_range 23
+struct xen_add_to_physmap_range {
+    /* Which domain to change the mapping for. */
+    domid_t domid;
+    uint16_t space; /* => enum phys_map_space */
+
+    /* Number of pages to go through */
+    uint16_t size;
+    domid_t foreign_domid; /* IFF gmfn_foreign */
+
+    /* Indexes into space being mapped. */
+    XEN_GUEST_HANDLE(xen_ulong_t) idxs;
+
+    /* GPFN in domdwhere the source mapping page should appear. */
+    XEN_GUEST_HANDLE(xen_pfn_t) gpfns;
+};
+typedef struct xen_add_to_physmap_range xen_add_to_physmap_range_t;
+DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t);
+
 /*
  * Unmaps the page appearing at a particular GPFN from the specified guest's
  * pseudophysical address space.
diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h
index b2f6c50..9425520 100644
--- a/xen/include/public/xen.h
+++ b/xen/include/public/xen.h
@@ -51,6 +51,7 @@ DEFINE_XEN_GUEST_HANDLE(void);
 
 DEFINE_XEN_GUEST_HANDLE(uint64_t);
 DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
+DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
 #endif
 
 /*



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.