[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC 4/8] mm: Scrub memory from idle loop



Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
 xen/arch/x86/domain.c   |    3 ++-
 xen/common/page_alloc.c |   29 ++++++++++++++++++++---------
 xen/include/xen/mm.h    |    1 +
 3 files changed, 23 insertions(+), 10 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 7d3071e..ce1d97f 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -117,7 +117,8 @@ static void idle_loop(void)
     {
         if ( cpu_is_offline(smp_processor_id()) )
             play_dead();
-        (*pm_idle)();
+        if ( !scrub_free_pages(cpu_to_node(smp_processor_id())) )
+            (*pm_idle)();
         do_tasklet();
         do_softirq();
         /*
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 6dbe13c..ac15406 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1032,16 +1032,22 @@ static void merge_chunks(struct page_info *pg, unsigned 
int node,
         page_list_add(pg, &heap(node, zone, order));
 }
 
-static void scrub_free_pages(unsigned int node)
+bool_t scrub_free_pages(unsigned int node)
 {
     struct page_info *pg;
-    unsigned int i, zone;
+    unsigned int i, zone, cpu;
     int order;
-
-    ASSERT(spin_is_locked(&heap_lock));
+    static unsigned node_scrubbing;
 
     if ( !node_need_scrub[node] )
-        return;
+        return 0;
+
+    if (test_and_set_bit(node, &node_scrubbing) )
+        return 0;
+
+    cpu = smp_processor_id();
+
+    spin_lock(&heap_lock);
 
     for ( zone = 0; zone < NR_ZONES; zone++ )
     {
@@ -1057,13 +1063,21 @@ static void scrub_free_pages(unsigned int node)
                 for ( i = 0; i < (1 << order); i++)
                 {
                     scrub_one_page(&pg[i]);
-                    pg[i].count_info &= ~PGC_need_scrub;
+                    if ( softirq_pending(cpu) )
+                        goto out;
                 }
 
                 node_need_scrub[node] -= (1 << order);
+                for ( i = 0; i < (1 << order); i++)
+                    pg[i].count_info &= ~PGC_need_scrub;
             }
         }
     }
+
+ out:
+    spin_unlock(&heap_lock);
+    clear_bit(node, &node_scrubbing);
+    return (node_need_scrub[node] != 0);
 }
 
 /* Free 2^@order set of pages. */
@@ -1130,9 +1144,6 @@ static void free_heap_pages(
     if ( tainted )
         reserve_offlined_page(pg);
 
-    if ( need_scrub )
-        scrub_free_pages(node);
-
     spin_unlock(&heap_lock);
 }
 
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 88de3c1..048d4bf 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -138,6 +138,7 @@ void init_xenheap_pages(paddr_t ps, paddr_t pe);
 void xenheap_max_mfn(unsigned long mfn);
 void *alloc_xenheap_pages(unsigned int order, unsigned int memflags);
 void free_xenheap_pages(void *v, unsigned int order);
+bool_t scrub_free_pages(unsigned int node);
 #define alloc_xenheap_page() (alloc_xenheap_pages(0,0))
 #define free_xenheap_page(v) (free_xenheap_pages(v,0))
 /* Map machine page range in Xen virtual address space. */
-- 
1.7.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.