[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC 7/8] mm: Keep pages available for allocation while scrubbing



Instead of scrubbing pages while holding heap lock we can mark
buddy's head as being scrubbed and drop the lock temporarily.
If someone (most likely alloc_heap_pages()) tries to access
this chunk it will signal the scrubber to abort scrub by setting
head's PAGE_SCRUB_ABORT bit. The scrubber checks this bit after
processing each page and stops its work as soon as it sees it.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
 xen/common/page_alloc.c  |   67 +++++++++++++++++++++++++++++++++++++++++----
 xen/include/asm-x86/mm.h |    4 +++
 2 files changed, 65 insertions(+), 6 deletions(-)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 3469185..a39afd4 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -687,6 +687,18 @@ static void check_low_mem_virq(void)
     }
 }
 
+static void check_and_stop_scrub(struct page_info *head)
+{
+    if ( head->u.free.scrub_state & PAGE_SCRUBBING )
+    {
+        head->u.free.scrub_state |= PAGE_SCRUB_ABORT;
+        smp_mb();
+        spink_lock_kick();
+        while ( ACCESS_ONCE(head->u.free.scrub_state) & PAGE_SCRUB_ABORT )
+            cpu_relax();
+    }
+}
+
 /* Allocate 2^@order contiguous pages. */
 static struct page_info *alloc_heap_pages(
     unsigned int zone_lo, unsigned int zone_hi,
@@ -773,10 +785,15 @@ static struct page_info *alloc_heap_pages(
             {
                 if ( (pg = page_list_remove_head(&heap(node, zone, j))) )
                 {
-                    if ( (order == 0) || use_unscrubbed ||
-                         !test_bit(_PGC_need_scrub, &pg[0].count_info) )
+                    if ( !test_bit(_PGC_need_scrub, &pg[0].count_info) )
                         goto found;
 
+                    if ( (order == 0) || use_unscrubbed )
+                    {
+                        check_and_stop_scrub(pg);
+                        goto found;
+                    }
+
                     page_list_add_tail(pg, &heap(node, zone, j));
                 }
             }
@@ -911,6 +928,8 @@ static int reserve_offlined_page(struct page_info *head)
 
     cur_head = head;
 
+    check_and_stop_scrub(head);
+
     page_list_del(head, &heap(node, zone, head_order));
 
     while ( cur_head < (head + (1 << head_order)) )
@@ -990,6 +1009,9 @@ static bool_t can_merge(struct page_info *head, unsigned 
int node,
          !!test_bit(_PGC_need_scrub, &head->count_info) )
         return 0;
 
+    if ( head->u.free.scrub_state & PAGE_SCRUBBING )
+        return 0;
+
     return 1;
 }
 
@@ -1033,6 +1055,14 @@ static void merge_chunks(struct page_info *pg, unsigned 
int node,
 }
 
 #define SCRUB_CHUNK_ORDER  8
+
+static bool_t scrub_continue(void *data)
+{
+    struct page_info *head = (struct page_info *)data;
+
+    return !(ACCESS_ONCE(head->u.free.scrub_state) & PAGE_SCRUB_ABORT);
+}
+
 bool_t scrub_free_pages(unsigned int node)
 {
     struct page_info *pg;
@@ -1064,14 +1094,25 @@ bool_t scrub_free_pages(unsigned int node)
                 if ( !test_bit(_PGC_need_scrub, &pg[0].count_info) )
                     break;
 
-                page_list_del(pg, &heap(node, zone, order));
+                ASSERT(!pg[0].u.free.scrub_state);
+                pg[0].u.free.scrub_state = PAGE_SCRUBBING;
+
+                spin_unlock(&heap_lock);
 
                 scrub_order = (order > SCRUB_CHUNK_ORDER) ? SCRUB_CHUNK_ORDER 
: order;
                 num_scrubbed = 0;
-                while ( num_scrubbed < (1 << order) )
+                while ( scrub_continue(&pg[0]) && num_scrubbed < (1 << order) )
                 {
                     for ( i = 0; i < (1 << scrub_order); i++ )
+                    {
                         scrub_one_page(&pg[num_scrubbed + i]);
+                        if ( !scrub_continue(&pg[0]) )
+                        {
+                            /* Someone wants this chunk. Drop everything. */
+                            pg[0].u.free.scrub_state = 0;
+                            goto out_nolock;
+                        }
+                    }
 
                     num_scrubbed += (1 << scrub_order);
                     if ( softirq_pending(cpu) )
@@ -1080,7 +1121,15 @@ bool_t scrub_free_pages(unsigned int node)
                         break;
                     }
                  }
- 
+
+               if ( !spin_lock_cond(&heap_lock, scrub_continue, &pg[0]) )
+                {
+                    pg[0].u.free.scrub_state = 0;
+                    goto out_nolock;
+                }
+
+                page_list_del(pg, &heap(node, zone, order));
+
                 start = 0;
                 end = num_scrubbed;
 
@@ -1118,7 +1167,9 @@ bool_t scrub_free_pages(unsigned int node)
                     merge_chunks(ppg, node, zone, chunk_order, 1);
                     end += (1 << chunk_order);
                 }
- 
+
+                pg[0].u.free.scrub_state = 0;
+
                 if ( preempt )
                     goto out;
             }
@@ -1127,6 +1178,8 @@ bool_t scrub_free_pages(unsigned int node)
 
  out:
     spin_unlock(&heap_lock);
+
+ out_nolock:
     clear_bit(node, &node_scrubbing);
     return (node_need_scrub[node] != 0);
 }
@@ -1165,6 +1218,8 @@ static void free_heap_pages(
         if ( page_state_is(&pg[i], offlined) )
             tainted = 1;
 
+        pg[i].u.free.scrub_state=0;
+
         /* If a page has no owner it will need no safety TLB flush. */
         pg[i].u.free.need_tlbflush = (page_get_owner(&pg[i]) != NULL);
         if ( pg[i].u.free.need_tlbflush )
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index b11124f..dd84835 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -87,6 +87,10 @@ struct page_info
 
         /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
         struct {
+#define PAGE_SCRUBBING      (1<<1)
+#define PAGE_SCRUB_ABORT    (1<<2)
+            unsigned char scrub_state;
+
             /* Do TLBs need flushing for safety before next page use? */
             bool_t need_tlbflush;
         } free;
-- 
1.7.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.