[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Do not use bitmap allocator after boot time.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1247068078 -3600
# Node ID ef38784f9f85ba8db24af84c345b33da08970b13
# Parent  721c14d7f60b45cd0a4ff5c57e05d2a345f180b7
Do not use bitmap allocator after boot time.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/common/page_alloc.c   |  130 +++++++++++++++++++++++-----------------------
 xen/include/asm-ia64/mm.h |   20 ++-----
 xen/include/asm-x86/mm.h  |   23 +++-----
 3 files changed, 83 insertions(+), 90 deletions(-)

diff -r 721c14d7f60b -r ef38784f9f85 xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Wed Jul 08 14:22:00 2009 +0100
+++ b/xen/common/page_alloc.c   Wed Jul 08 16:47:58 2009 +0100
@@ -389,8 +389,7 @@ static struct page_info *alloc_heap_page
         page_list_add_tail(pg, &heap(node, zone, j));
         pg += 1 << j;
     }
-    
-    map_alloc(page_to_mfn(pg), request);
+
     ASSERT(avail[node][zone] >= request);
     avail[node][zone] -= request;
 
@@ -401,7 +400,8 @@ static struct page_info *alloc_heap_page
     for ( i = 0; i < (1 << order); i++ )
     {
         /* Reference count must continuously be zero for free pages. */
-        BUG_ON(pg[i].count_info != 0);
+        BUG_ON(pg[i].count_info != PGC_state_free);
+        pg[i].count_info = PGC_state_inuse;
 
         if ( pg[i].u.free.need_tlbflush )
         {
@@ -444,7 +444,7 @@ static int reserve_offlined_page(struct 
         struct page_info *pg;
         int next_order;
 
-        if ( test_bit(_PGC_offlined, &cur_head->count_info) )
+        if ( page_state_is(cur_head, offlined) )
         {
             cur_head++;
             continue;
@@ -462,7 +462,7 @@ static int reserve_offlined_page(struct 
             for ( i = (1 << cur_order), pg = cur_head + (1 << cur_order );
                   i < (1 << next_order);
                   i++, pg++ )
-                if ( test_bit(_PGC_offlined, &pg->count_info) )
+                if ( page_state_is(pg, offlined) )
                     break;
             if ( i == ( 1 << next_order) )
             {
@@ -483,12 +483,10 @@ static int reserve_offlined_page(struct 
 
     for ( cur_head = head; cur_head < head + ( 1UL << head_order); cur_head++ )
     {
-        if ( !test_bit(_PGC_offlined, &cur_head->count_info) )
+        if ( !page_state_is(cur_head, offlined) )
             continue;
 
         avail[node][zone]--;
-
-        map_alloc(page_to_mfn(cur_head), 1);
 
         page_list_add_tail(cur_head,
                            test_bit(_PGC_broken, &cur_head->count_info) ?
@@ -525,14 +523,13 @@ static void free_heap_pages(
          *     in its pseudophysical address space).
          * In all the above cases there can be no guest mappings of this page.
          */
-        ASSERT(!(pg[i].count_info & PGC_offlined));
-        pg[i].count_info &= PGC_offlining | PGC_broken;
-        if ( pg[i].count_info & PGC_offlining )
-        {
-            pg[i].count_info &= ~PGC_offlining;
-            pg[i].count_info |= PGC_offlined;
+        ASSERT(!page_state_is(&pg[i], offlined));
+        pg[i].count_info =
+            ((pg[i].count_info & PGC_broken) |
+             (page_state_is(&pg[i], offlining)
+              ? PGC_state_offlined : PGC_state_free));
+        if ( page_state_is(&pg[i], offlined) )
             tainted = 1;
-        }
 
         /* If a page has no owner it will need no safety TLB flush. */
         pg[i].u.free.need_tlbflush = (page_get_owner(&pg[i]) != NULL);
@@ -542,7 +539,6 @@ static void free_heap_pages(
 
     spin_lock(&heap_lock);
 
-    map_free(page_to_mfn(pg), 1 << order);
     avail[node][zone] += 1 << order;
 
     /* Merge chunks as far as possible. */
@@ -553,7 +549,7 @@ static void free_heap_pages(
         if ( (page_to_mfn(pg) & mask) )
         {
             /* Merge with predecessor block? */
-            if ( allocated_in_map(page_to_mfn(pg)-mask) ||
+            if ( !page_state_is(pg-mask, free) ||
                  (PFN_ORDER(pg-mask) != order) )
                 break;
             pg -= mask;
@@ -562,7 +558,7 @@ static void free_heap_pages(
         else
         {
             /* Merge with successor block? */
-            if ( allocated_in_map(page_to_mfn(pg)+mask) ||
+            if ( !page_state_is(pg+mask, free) ||
                  (PFN_ORDER(pg+mask) != order) )
                 break;
             page_list_del(pg + mask, &heap(node, zone, order));
@@ -593,7 +589,6 @@ static void free_heap_pages(
  * Once a page is broken, it can't be assigned anymore
  * A page will be offlined only if it is free
  * return original count_info
- *
  */
 static unsigned long mark_page_offline(struct page_info *pg, int broken)
 {
@@ -605,26 +600,19 @@ static unsigned long mark_page_offline(s
     do {
         nx = x = y;
 
-        if ( ((x & PGC_offlined_broken) == PGC_offlined_broken) )
-            return y;
-
-        if ( x & PGC_offlined )
-        {
-            /* PGC_offlined means it is a free page. */
-            if ( broken && !(nx & PGC_broken) )
-                nx |= PGC_broken;
-            else
-                return y;
-        }
-        else
-        {
-            /* It is not offlined, not reserved page */
-            nx |= (allocated_in_map(page_to_mfn(pg)) ?
-                   PGC_offlining : PGC_offlined);
+        if ( ((x & PGC_state) != PGC_state_offlined) &&
+             ((x & PGC_state) != PGC_state_offlining) )
+        {
+            nx &= ~PGC_state;
+            nx |= (((x & PGC_state) == PGC_state_free)
+                   ? PGC_state_offlined : PGC_state_offlining);
         }
 
         if ( broken )
             nx |= PGC_broken;
+
+        if ( x == nx )
+            break;
     } while ( (y = cmpxchg(&pg->count_info, x, nx)) != x );
 
     return y;
@@ -698,13 +686,13 @@ int offline_page(unsigned long mfn, int 
 
     old_info = mark_page_offline(pg, broken);
 
-    if ( !allocated_in_map(mfn) )
+    if ( page_state_is(pg, free) )
     {
         /* Free pages are reserve directly */
         reserve_heap_page(pg);
         *status = PG_OFFLINE_OFFLINED;
     }
-    else if ( test_bit(_PGC_offlined, &pg->count_info) )
+    else if ( page_state_is(pg, offlined) )
     {
         *status = PG_OFFLINE_OFFLINED;
     }
@@ -749,8 +737,9 @@ int offline_page(unsigned long mfn, int 
  */
 unsigned int online_page(unsigned long mfn, uint32_t *status)
 {
+    unsigned long x, nx, y;
     struct page_info *pg;
-    int ret = 0, free = 0;
+    int ret;
 
     if ( mfn > max_page )
     {
@@ -760,30 +749,40 @@ unsigned int online_page(unsigned long m
 
     pg = mfn_to_page(mfn);
 
-    *status = 0;
-
     spin_lock(&heap_lock);
 
-    if ( unlikely(is_page_broken(pg)) )
-    {
-        ret = -EINVAL;
-        *status = PG_ONLINE_FAILED |PG_ONLINE_BROKEN;
-    }
-    else if ( pg->count_info & PGC_offlined )
-    {
-        clear_bit(_PGC_offlined, &pg->count_info);
-        page_list_del(pg, &page_offlined_list);
-        *status = PG_ONLINE_ONLINED;
-        free = 1;
-    }
-    else if ( pg->count_info & PGC_offlining )
-    {
-        clear_bit(_PGC_offlining, &pg->count_info);
-        *status = PG_ONLINE_ONLINED;
-    }
+    y = pg->count_info;
+    do {
+        ret = *status = 0;
+
+        if ( y & PGC_broken )
+        {
+            ret = -EINVAL;
+            *status = PG_ONLINE_FAILED |PG_ONLINE_BROKEN;
+            break;
+        }
+
+        if ( (y & PGC_state) == PGC_state_offlined )
+        {
+            page_list_del(pg, &page_offlined_list);
+            *status = PG_ONLINE_ONLINED;
+        }
+        else if ( (y & PGC_state) == PGC_state_offlining )
+        {
+            *status = PG_ONLINE_ONLINED;
+        }
+        else
+        {
+            break;
+        }
+
+        x = y;
+        nx = (x & ~PGC_state) | PGC_state_inuse;
+    } while ( (y = cmpxchg(&pg->count_info, x, nx)) != x );
+
     spin_unlock(&heap_lock);
 
-    if ( free )
+    if ( (y & PGC_state) == PGC_state_offlined )
         free_heap_pages(pg, 0);
 
     return ret;
@@ -804,11 +803,11 @@ int query_page_offline(unsigned long mfn
 
     pg = mfn_to_page(mfn);
 
-    if (pg->count_info & PGC_offlining)
+    if ( page_state_is(pg, offlining) )
         *status |= PG_OFFLINE_STATUS_OFFLINE_PENDING;
-    if (pg->count_info & PGC_broken)
+    if ( pg->count_info & PGC_broken )
         *status |= PG_OFFLINE_STATUS_BROKEN;
-    if (pg->count_info & PGC_offlined)
+    if ( page_state_is(pg, offlined) )
         *status |= PG_OFFLINE_STATUS_OFFLINED;
 
     spin_unlock(&heap_lock);
@@ -934,6 +933,7 @@ void __init scrub_heap_pages(void)
 void __init scrub_heap_pages(void)
 {
     unsigned long mfn;
+    struct page_info *pg;
 
     if ( !opt_bootscrub )
         return;
@@ -944,8 +944,10 @@ void __init scrub_heap_pages(void)
     {
         process_pending_timers();
 
+        pg = mfn_to_page(mfn);
+
         /* Quick lock-free check. */
-        if ( allocated_in_map(mfn) )
+        if ( !page_state_is(pg, free) )
             continue;
 
         /* Every 100MB, print a progress dot. */
@@ -955,8 +957,8 @@ void __init scrub_heap_pages(void)
         spin_lock(&heap_lock);
 
         /* Re-check page status with lock held. */
-        if ( !allocated_in_map(mfn) )
-            scrub_one_page(mfn_to_page(mfn));
+        if ( page_state_is(pg, free) )
+            scrub_one_page(pg);
 
         spin_unlock(&heap_lock);
     }
diff -r 721c14d7f60b -r ef38784f9f85 xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Wed Jul 08 14:22:00 2009 +0100
+++ b/xen/include/asm-ia64/mm.h Wed Jul 08 16:47:58 2009 +0100
@@ -135,18 +135,14 @@ struct page_info
  /* Page is broken? */
 #define _PGC_broken       PG_shift(7)
 #define PGC_broken        PG_mask(1, 7)
- /* Page is offline pending ? */
-#define _PGC_offlining    PG_shift(8)
-#define PGC_offlining     PG_mask(1, 8)
- /* Page is offlined */
-#define _PGC_offlined     PG_shift(9)
-#define PGC_offlined      PG_mask(1, 9)
-#define PGC_offlined_broken (PGC_offlined | PGC_broken)
-
-#define is_page_offlining(page) ((page)->count_info & PGC_offlining)
-#define is_page_offlined(page)  ((page)->count_info & PGC_offlined)
-#define is_page_broken(page)    ((page)->count_info & PGC_broken)
-#define is_page_online(page)    (!is_page_offlined(page))
+
+ /* Mutually-exclusive page states: { inuse, offlining, offlined, free }. */
+#define PGC_state         PG_mask(3, 9)
+#define PGC_state_inuse   PG_mask(0, 9)
+#define PGC_state_offlining PG_mask(1, 9)
+#define PGC_state_offlined PG_mask(2, 9)
+#define PGC_state_free    PG_mask(3, 9)
+#define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st)
 
  /* Count of references to this frame. */
 #define PGC_count_width   PG_shift(9)
diff -r 721c14d7f60b -r ef38784f9f85 xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  Wed Jul 08 14:22:00 2009 +0100
+++ b/xen/include/asm-x86/mm.h  Wed Jul 08 16:47:58 2009 +0100
@@ -199,24 +199,19 @@ struct page_info
 #define PGC_cacheattr_base PG_shift(6)
 #define PGC_cacheattr_mask PG_mask(7, 6)
  /* Page is broken? */
-#define _PGC_broken         PG_shift(7)
-#define PGC_broken          PG_mask(1, 7)
- /* Page is offline pending ? */
-#define _PGC_offlining      PG_shift(8)
-#define PGC_offlining       PG_mask(1, 8)
- /* Page is offlined */
-#define _PGC_offlined       PG_shift(9)
-#define PGC_offlined        PG_mask(1, 9)
-#define PGC_offlined_broken (PGC_offlined | PGC_broken)
+#define _PGC_broken       PG_shift(7)
+#define PGC_broken        PG_mask(1, 7)
+ /* Mutually-exclusive page states: { inuse, offlining, offlined, free }. */
+#define PGC_state         PG_mask(3, 9)
+#define PGC_state_inuse   PG_mask(0, 9)
+#define PGC_state_offlining PG_mask(1, 9)
+#define PGC_state_offlined PG_mask(2, 9)
+#define PGC_state_free    PG_mask(3, 9)
+#define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st)
 
  /* Count of references to this frame. */
 #define PGC_count_width   PG_shift(9)
 #define PGC_count_mask    ((1UL<<PGC_count_width)-1)
-
-#define is_page_offlining(page)  ((page)->count_info & PGC_offlining)
-#define is_page_offlined(page)   ((page)->count_info & PGC_offlined)
-#define is_page_broken(page)     ((page)->count_info & PGC_broken)
-#define is_page_online(page)     (!is_page_offlined(page))
 
 #if defined(__i386__)
 #define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.