[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC v2 3/7] libxc/vNUMA: vnodes allocation on NUMA nodes.



vNUMA nodes allocation on NUMA nodes.
If vnuma topology is defined and running on hardware NUMA machine,
allocate vnodes on physical numa nodes based on vnode_to_pnode map.

Signed-off-by: Elena Ufimtseva <ufimtseva@xxxxxxxxx>

---
Changes since v1:
* fixed incorrect output of number of pages per vnode for domain
in case of a failure.
* fixed returned error codes.

TODO:
* vNUMA nodes memory allocation for domain with superpages;
---
 tools/libxc/xc_dom.h     |    1 +
 tools/libxc/xc_dom_x86.c |   77 ++++++++++++++++++++++++++++++++++++++++------
 tools/libxc/xg_private.h |    1 +
 3 files changed, 70 insertions(+), 9 deletions(-)

diff --git a/tools/libxc/xc_dom.h b/tools/libxc/xc_dom.h
index 790f145..751357a 100644
--- a/tools/libxc/xc_dom.h
+++ b/tools/libxc/xc_dom.h
@@ -370,6 +370,7 @@ static inline xen_pfn_t xc_dom_p2m_guest(struct 
xc_dom_image *dom,
 int arch_setup_meminit(struct xc_dom_image *dom);
 int arch_setup_bootearly(struct xc_dom_image *dom);
 int arch_setup_bootlate(struct xc_dom_image *dom);
+int arch_boot_numa_alloc(struct xc_dom_image *dom);
 
 /*
  * Local variables:
diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c
index 126c0f8..7a22f91 100644
--- a/tools/libxc/xc_dom_x86.c
+++ b/tools/libxc/xc_dom_x86.c
@@ -789,27 +789,47 @@ int arch_setup_meminit(struct xc_dom_image *dom)
     else
     {
         /* try to claim pages for early warning of insufficient memory avail */
+        rc = 0;
         if ( dom->claim_enabled ) {
             rc = xc_domain_claim_pages(dom->xch, dom->guest_domid,
                                        dom->total_pages);
             if ( rc )
+            {
+                xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
+                             "%s: Failed to claim mem for dom\n",
+                             __FUNCTION__);
                 return rc;
+            }
         }
         /* setup initial p2m */
         for ( pfn = 0; pfn < dom->total_pages; pfn++ )
             dom->p2m_host[pfn] = pfn;
         
         /* allocate guest memory */
-        for ( i = rc = allocsz = 0;
-              (i < dom->total_pages) && !rc;
-              i += allocsz )
+        if (dom->nr_vnodes > 0)
         {
-            allocsz = dom->total_pages - i;
-            if ( allocsz > 1024*1024 )
-                allocsz = 1024*1024;
-            rc = xc_domain_populate_physmap_exact(
-                dom->xch, dom->guest_domid, allocsz,
-                0, 0, &dom->p2m_host[i]);
+            rc = arch_boot_numa_alloc(dom);
+            if ( rc )
+            {
+                xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
+                             "%s: Failed to allocate memory on NUMA nodes\n",
+                             __FUNCTION__);
+                return rc;
+            }
+        }
+        else
+        {
+            for ( i = rc = allocsz = 0;
+                  (i < dom->total_pages) && !rc;
+                  i += allocsz )
+            {
+                allocsz = dom->total_pages - i;
+                if ( allocsz > 1024*1024 )
+                    allocsz = 1024*1024;
+                rc = xc_domain_populate_physmap_exact(
+                    dom->xch, dom->guest_domid, allocsz,
+                    0, 0, &dom->p2m_host[i]);
+            }
         }
 
         /* Ensure no unclaimed pages are left unused.
@@ -817,7 +837,46 @@ int arch_setup_meminit(struct xc_dom_image *dom)
         (void)xc_domain_claim_pages(dom->xch, dom->guest_domid,
                                     0 /* cancels the claim */);
     }
+    return rc;
+}
+
+int arch_boot_numa_alloc(struct xc_dom_image *dom)
+{ 
+    int rc, n;
+    unsigned long long guest_pages;
+    unsigned long allocsz, i, k;
+    unsigned long memflags;
 
+    rc = allocsz = k = 0;
+    for(n = 0; n < dom->nr_vnodes; n++)
+    {
+        memflags = 0;
+        if ( dom->vnode_to_pnode[n] != NUMA_NO_NODE )
+        {
+            memflags |= XENMEMF_exact_node(dom->vnode_to_pnode[n]);
+            memflags |= XENMEMF_exact_node_request;
+        }
+        guest_pages = dom->vmemsizes[n] >> PAGE_SHIFT_X86;
+        for ( i = 0;
+            (i < guest_pages) && !rc;
+                i += allocsz )
+        {
+            allocsz = guest_pages - i;
+            if ( allocsz > 1024*1024 )
+                allocsz = 1024*1024;
+                rc = xc_domain_populate_physmap_exact(
+                                    dom->xch, dom->guest_domid, allocsz,
+                                    0, memflags, &dom->p2m_host[i + k]);
+            k += allocsz;
+        }
+        if ( rc )
+        {
+            xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
+                    "%s: Failed allocation of %Lu pages for vnode %d on pnode 
%d out of %lu\n",
+                    __FUNCTION__, guest_pages, n, dom->vnode_to_pnode[n], 
dom->total_pages);
+            return rc;
+        }
+    }
     return rc;
 }
 
diff --git a/tools/libxc/xg_private.h b/tools/libxc/xg_private.h
index db02ccf..a8d8e19 100644
--- a/tools/libxc/xg_private.h
+++ b/tools/libxc/xg_private.h
@@ -127,6 +127,7 @@ typedef uint64_t l4_pgentry_64_t;
 #define ROUNDUP(_x,_w) (((unsigned long)(_x)+(1UL<<(_w))-1) & ~((1UL<<(_w))-1))
 #define NRPAGES(x) (ROUNDUP(x, PAGE_SHIFT) >> PAGE_SHIFT)
 
+#define NUMA_NO_NODE ~((uint32_t)0)
 
 /* XXX SMH: following skanky macros rely on variable p2m_size being set */
 /* XXX TJD: also, "guest_width" should be the guest's sizeof(unsigned long) */
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.