[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RESEND RFC 5/8] mm: Do not discard already-scrubbed pages softirqs are pending



Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
 xen/common/page_alloc.c |   65 +++++++++++++++++++++++++++++++++++++++++-----
 1 files changed, 58 insertions(+), 7 deletions(-)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index ac15406..3469185 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1032,11 +1032,14 @@ static void merge_chunks(struct page_info *pg, unsigned 
int node,
         page_list_add(pg, &heap(node, zone, order));
 }
 
+#define SCRUB_CHUNK_ORDER  8
 bool_t scrub_free_pages(unsigned int node)
 {
     struct page_info *pg;
     unsigned int i, zone, cpu;
     int order;
+    unsigned int num_scrubbed, scrub_order, start, end;
+    bool_t preempt;
     static unsigned node_scrubbing;
 
     if ( !node_need_scrub[node] )
@@ -1046,6 +1049,7 @@ bool_t scrub_free_pages(unsigned int node)
         return 0;
 
     cpu = smp_processor_id();
+    preempt = 0;
 
     spin_lock(&heap_lock);
 
@@ -1060,16 +1064,63 @@ bool_t scrub_free_pages(unsigned int node)
                 if ( !test_bit(_PGC_need_scrub, &pg[0].count_info) )
                     break;
 
-                for ( i = 0; i < (1 << order); i++)
+                page_list_del(pg, &heap(node, zone, order));
+
+                scrub_order = (order > SCRUB_CHUNK_ORDER) ? SCRUB_CHUNK_ORDER 
: order;
+                num_scrubbed = 0;
+                while ( num_scrubbed < (1 << order) )
                 {
-                    scrub_one_page(&pg[i]);
+                    for ( i = 0; i < (1 << scrub_order); i++ )
+                        scrub_one_page(&pg[num_scrubbed + i]);
+
+                    num_scrubbed += (1 << scrub_order);
                     if ( softirq_pending(cpu) )
-                        goto out;
+                    {
+                        preempt = 1;
+                        break;
+                    }
+                 }
+ 
+                start = 0;
+                end = num_scrubbed;
+
+                /* Merge clean pages */
+                while ( start < end )
+                 {
+                    /* 
+                     * Largest power-of-two chunk starting @start,
+                     * not greater than @end
+                     */
+                    unsigned chunk_order = flsl(end - start) - 1;
+                    struct page_info *ppg = &pg[start];
+
+                    for ( i = 0; i < (1 << chunk_order); i++ )
+                            ppg[i].count_info &= ~PGC_need_scrub;
+
+                    node_need_scrub[node] -= (1 << chunk_order);
+
+                    PFN_ORDER(ppg) = chunk_order;
+                    merge_chunks(ppg, node, zone, chunk_order, 0);
+                    start += (1 << chunk_order);
+                 }
+ 
+                /* Merge unscrubbed pages */
+                while ( end < (1 << order) )
+                {
+                    /*
+                     * Largest power-of-two chunk starting @end, not crossing
+                     * next power-of-two boundary
+                     */
+                    unsigned chunk_order = ffsl(end) - 1;
+                    struct page_info *ppg = &pg[end];
+
+                    PFN_ORDER(ppg) = chunk_order;
+                    merge_chunks(ppg, node, zone, chunk_order, 1);
+                    end += (1 << chunk_order);
                 }
-
-                node_need_scrub[node] -= (1 << order);
-                for ( i = 0; i < (1 << order); i++)
-                    pg[i].count_info &= ~PGC_need_scrub;
+ 
+                if ( preempt )
+                    goto out;
             }
         }
     }
-- 
1.7.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.