[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] page_alloc: Check neighbouring chunks belong to same NUMA node before



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1284393610 -3600
# Node ID 9c5f084135b8dfafc661b586b3230a7b0e94a538
# Parent  3985fea87987a5061573ab7f6b13cd885261830a
page_alloc: Check neighbouring chunks belong to same NUMA node before
merging in free_heap_pages(). This avoids more convoluted logic in
init_heap_pages().

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/common/page_alloc.c |   37 ++++++++++++-------------------------
 1 files changed, 12 insertions(+), 25 deletions(-)

diff -r 3985fea87987 -r 9c5f084135b8 xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Fri Sep 10 19:06:33 2010 +0100
+++ b/xen/common/page_alloc.c   Mon Sep 13 17:00:10 2010 +0100
@@ -579,7 +579,8 @@ static void free_heap_pages(
             /* Merge with predecessor block? */
             if ( !mfn_valid(page_to_mfn(pg-mask)) ||
                  !page_state_is(pg-mask, free) ||
-                 (PFN_ORDER(pg-mask) != order) )
+                 (PFN_ORDER(pg-mask) != order) ||
+                 (phys_to_nid(page_to_maddr(pg-mask)) != node) )
                 break;
             pg -= mask;
             page_list_del(pg, &heap(node, zone, order));
@@ -589,15 +590,13 @@ static void free_heap_pages(
             /* Merge with successor block? */
             if ( !mfn_valid(page_to_mfn(pg+mask)) ||
                  !page_state_is(pg+mask, free) ||
-                 (PFN_ORDER(pg+mask) != order) )
+                 (PFN_ORDER(pg+mask) != order) ||
+                 (phys_to_nid(page_to_maddr(pg+mask)) != node) )
                 break;
             page_list_del(pg + mask, &heap(node, zone, order));
         }
 
         order++;
-
-        /* After merging, pg should remain in the same node. */
-        ASSERT(phys_to_nid(page_to_maddr(pg)) == node);
     }
 
     PFN_ORDER(pg) = order;
@@ -849,25 +848,22 @@ static void init_heap_pages(
 static void init_heap_pages(
     struct page_info *pg, unsigned long nr_pages)
 {
-    unsigned int nid_curr, nid_prev;
     unsigned long i;
 
-    nid_prev = phys_to_nid(page_to_maddr(pg-1));
-
-    for ( i = 0; i < nr_pages; nid_prev = nid_curr, i++ )
-    {
-        nid_curr = phys_to_nid(page_to_maddr(pg+i));
-
-        if ( unlikely(!avail[nid_curr]) )
+    for ( i = 0; i < nr_pages; i++ )
+    {
+        unsigned int nid = phys_to_nid(page_to_maddr(pg+i));
+
+        if ( unlikely(!avail[nid]) )
         {
             unsigned long s = page_to_mfn(pg + i);
             unsigned long e = page_to_mfn(pg + nr_pages - 1) + 1;
-            bool_t use_tail = (nid_curr == phys_to_nid(pfn_to_paddr(e - 1))) &&
+            bool_t use_tail = (nid == phys_to_nid(pfn_to_paddr(e - 1))) &&
                               !(s & ((1UL << MAX_ORDER) - 1)) &&
                               (find_first_set_bit(e) <= find_first_set_bit(s));
             unsigned long n;
 
-            n = init_node_heap(nid_curr, page_to_mfn(pg+i), nr_pages - i,
+            n = init_node_heap(nid, page_to_mfn(pg+i), nr_pages - i,
                                &use_tail);
             BUG_ON(i + n > nr_pages);
             if ( n && !use_tail )
@@ -880,16 +876,7 @@ static void init_heap_pages(
             nr_pages -= n;
         }
 
-        /*
-         * Free pages of the same node, or if they differ, but are on a
-         * MAX_ORDER alignment boundary (which already get reserved).
-         */
-        if ( (nid_curr == nid_prev) ||
-             !(page_to_mfn(pg+i) & ((1UL << MAX_ORDER) - 1)) )
-            free_heap_pages(pg+i, 0);
-        else
-            printk("Reserving non-aligned node boundary @ mfn %#lx\n",
-                   page_to_mfn(pg+i));
+        free_heap_pages(pg+i, 0);
     }
 }
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.