[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2] tools/libxc: Batch memory allocations for PV guests



The current code for allocating memory for PV guests batches the
hypercalls to allocate memory by allocating 1024*1024 extents of order 0
at a time. To make this faster, first try allocating extents of order 9
(2 MiB) before falling back to the order 0 allocating if the order 9
allocation fails.

On my test machine this reduced the time to start a 128 GiB PV guest by
about 60 seconds.

Signed-off-by: Ross Lagerwall <ross.lagerwall@xxxxxxxxxx>
---

Changed in v2: Batched hypercalls for order 9 allocations.

 tools/libxc/xc_dom_x86.c | 26 ++++++++++++++++++++++++--
 1 file changed, 24 insertions(+), 2 deletions(-)

diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c
index 783f749..2d461e3 100644
--- a/tools/libxc/xc_dom_x86.c
+++ b/tools/libxc/xc_dom_x86.c
@@ -761,7 +761,7 @@ int arch_setup_meminit(struct xc_dom_image *dom)
 {
     int rc;
     xen_pfn_t pfn, allocsz, mfn, total, pfn_base;
-    int i, j;
+    int i, j, k;
 
     rc = x86_compat(dom->xch, dom->guest_domid, dom->guest_type);
     if ( rc )
@@ -869,6 +869,8 @@ int arch_setup_meminit(struct xc_dom_image *dom)
             unsigned int memflags;
             uint64_t pages;
             unsigned int pnode = dom->vnode_to_pnode[dom->vmemranges[i].nid];
+            int count = dom->total_pages >> SUPERPAGE_PFN_SHIFT;
+            xen_pfn_t extents[count];
 
             memflags = 0;
             if ( pnode != XC_NUMA_NO_NODE )
@@ -881,7 +883,26 @@ int arch_setup_meminit(struct xc_dom_image *dom)
             for ( pfn = pfn_base; pfn < pfn_base+pages; pfn++ )
                 dom->p2m_host[pfn] = pfn;
 
-            for ( j = 0; j < pages; j += allocsz )
+            for ( pfn = pfn_base, j = 0;
+                  pfn < pfn_base + pages;
+                  pfn += SUPERPAGE_NR_PFNS, j++ )
+                extents[j] = dom->p2m_host[pfn];
+            rc = xc_domain_populate_physmap(dom->xch, dom->guest_domid, count,
+                                            SUPERPAGE_PFN_SHIFT, memflags,
+                                            extents);
+            if ( rc < 0 )
+                return rc;
+
+            /* Expand the returned mfns into the p2m array. */
+            pfn = pfn_base;
+            for ( j = 0; j < rc; j++ )
+            {
+                mfn = extents[j];
+                for ( k = 0; k < SUPERPAGE_NR_PFNS; k++, pfn++ )
+                    dom->p2m_host[pfn] = mfn + k;
+            }
+
+            for ( j = rc * SUPERPAGE_NR_PFNS; j < pages; j += allocsz )
             {
                 allocsz = pages - j;
                 if ( allocsz > 1024*1024 )
@@ -904,6 +925,7 @@ int arch_setup_meminit(struct xc_dom_image *dom)
                     return rc;
                 }
             }
+            rc = 0;
         }
 
         /* Ensure no unclaimed pages are left unused.
-- 
2.1.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.