[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v1 4/9] mm: Scrub memory from idle loop



Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---
 xen/arch/arm/domain.c   |   13 ++++++++-----
 xen/arch/x86/domain.c   |    3 ++-
 xen/common/page_alloc.c |   41 +++++++++++++++++++++++++++++++++--------
 xen/include/xen/mm.h    |    1 +
 4 files changed, 44 insertions(+), 14 deletions(-)

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index bb327da..fdf06e1 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -45,13 +45,16 @@ void idle_loop(void)
         if ( cpu_is_offline(smp_processor_id()) )
             stop_cpu();
 
-        local_irq_disable();
-        if ( cpu_is_haltable(smp_processor_id()) )
+        if ( !scrub_free_pages() )
         {
-            dsb(sy);
-            wfi();
+            local_irq_disable();
+            if ( cpu_is_haltable(smp_processor_id()) )
+            {
+                dsb(sy);
+                wfi();
+            }
+            local_irq_enable();
         }
-        local_irq_enable();
 
         do_tasklet();
         do_softirq();
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 90e2b1f..a5f62b5 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -118,7 +118,8 @@ static void idle_loop(void)
     {
         if ( cpu_is_offline(smp_processor_id()) )
             play_dead();
-        (*pm_idle)();
+        if ( !scrub_free_pages() )
+            (*pm_idle)();
         do_tasklet();
         do_softirq();
         /*
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 61f873a..e438547 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1044,16 +1044,35 @@ static void merge_chunks(struct page_info *pg, unsigned 
int node,
         page_list_add(pg, &heap(node, zone, order));
 }
 
-static void scrub_free_pages(unsigned int node)
+bool_t scrub_free_pages()
 {
     struct page_info *pg;
     unsigned int i, zone;
-    int order;
+    int order, cpu = smp_processor_id();
+    nodeid_t node = cpu_to_node(cpu), local_node;
+    static nodemask_t node_scrubbing;
 
-    ASSERT(spin_is_locked(&heap_lock));
+    if ( node == NUMA_NO_NODE )
+        node = 0;
+    local_node = node;
 
-    if ( !node_need_scrub[node] )
-        return;
+    while ( 1 )
+    {
+        if ( node_need_scrub[node] && !node_test_and_set(node, node_scrubbing) 
)
+            break;
+
+        /*
+         * If local node is already being scrubbed then see if there is a
+         * memory-only node that needs scrubbing.
+         */
+        do {
+            node = cycle_node(node, node_online_map);
+            if ( node == local_node )
+                return 0;
+        } while ( !cpumask_empty(&node_to_cpumask(node)) );
+    }
+
+    spin_lock(&heap_lock);
 
     for ( zone = 0; zone < NR_ZONES; zone++ )
     {
@@ -1067,7 +1086,11 @@ static void scrub_free_pages(unsigned int node)
                     break;
 
                 for ( i = 0; i < (1UL << order); i++)
+                {
                     scrub_one_page(&pg[i]);
+                    if ( softirq_pending(cpu) )
+                        goto out;
+                }
 
                 pg->count_info &= ~PGC_need_scrub;
 
@@ -1078,6 +1101,11 @@ static void scrub_free_pages(unsigned int node)
             }
         }
     }
+
+ out:
+    spin_unlock(&heap_lock);
+    node_clear(node, node_scrubbing);
+    return (node_need_scrub[node] != 0);
 }
 
 /* Free 2^@order set of pages. */
@@ -1142,9 +1170,6 @@ static void free_heap_pages(
     if ( tainted )
         reserve_offlined_page(pg);
 
-    if ( need_scrub )
-        scrub_free_pages(node);
-
     spin_unlock(&heap_lock);
 }
 
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 88de3c1..3d93fcc 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -138,6 +138,7 @@ void init_xenheap_pages(paddr_t ps, paddr_t pe);
 void xenheap_max_mfn(unsigned long mfn);
 void *alloc_xenheap_pages(unsigned int order, unsigned int memflags);
 void free_xenheap_pages(void *v, unsigned int order);
+bool_t scrub_free_pages(void);
 #define alloc_xenheap_page() (alloc_xenheap_pages(0,0))
 #define free_xenheap_page(v) (free_xenheap_pages(v,0))
 /* Map machine page range in Xen virtual address space. */
-- 
1.7.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.