[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Remove cpumask for page_info struct.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1233761391 0
# Node ID de853e901b5c6cfe3687404623c750b0f2c66058
# Parent  13a0272c8c024fca83bc991c7e2da992d07bc8eb
Remove cpumask for page_info struct.

This makes TLB flushing on page allocation more conservative, but the
flush clock should still save us most of the time (page freeing and
alloc'ing tends to happen in batches, and not necesasrily close
together). We could add some optimisations to the flush filter if this
does turn out to be a significant overhead for some (useful)
workloads.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/common/page_alloc.c   |   15 ++++-----------
 xen/include/asm-ia64/mm.h |    9 ---------
 xen/include/asm-x86/mm.h  |    6 ------
 3 files changed, 4 insertions(+), 26 deletions(-)

diff -r 13a0272c8c02 -r de853e901b5c xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Wed Feb 04 15:08:46 2009 +0000
+++ b/xen/common/page_alloc.c   Wed Feb 04 15:29:51 2009 +0000
@@ -400,7 +400,7 @@ static struct page_info *alloc_heap_page
         BUG_ON(pg[i].count_info != 0);
 
         /* Add in any extra CPUs that need flushing because of this page. */
-        cpus_andnot(extra_cpus_mask, pg[i].u.free.cpumask, mask);
+        cpus_andnot(extra_cpus_mask, cpu_online_map, mask);
         tlbflush_filter(extra_cpus_mask, pg[i].tlbflush_timestamp);
         cpus_or(mask, mask, extra_cpus_mask);
 
@@ -425,7 +425,6 @@ static void free_heap_pages(
     unsigned long mask;
     unsigned int i, node = phys_to_nid(page_to_maddr(pg));
     unsigned int zone = page_to_zone(pg);
-    struct domain *d;
 
     ASSERT(order <= MAX_ORDER);
     ASSERT(node >= 0);
@@ -446,15 +445,9 @@ static void free_heap_pages(
          */
         pg[i].count_info = 0;
 
-        if ( (d = page_get_owner(&pg[i])) != NULL )
-        {
-            pg[i].tlbflush_timestamp = tlbflush_current_time();
-            pg[i].u.free.cpumask     = d->domain_dirty_cpumask;
-        }
-        else
-        {
-            cpus_clear(pg[i].u.free.cpumask);
-        }
+        /* If a page has no owner it will need no safety TLB flush. */
+        pg[i].tlbflush_timestamp =
+            page_get_owner(&pg[i]) ? tlbflush_current_time() : 0;
     }
 
     spin_lock(&heap_lock);
diff -r 13a0272c8c02 -r de853e901b5c xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Wed Feb 04 15:08:46 2009 +0000
+++ b/xen/include/asm-ia64/mm.h Wed Feb 04 15:29:51 2009 +0000
@@ -62,21 +62,12 @@ struct page_info
         struct {
             /* Order-size of the free chunk this page is the head of. */
             u32 order;
-            /* Mask of possibly-tainted TLBs. */
-            cpumask_t cpumask;
         } free;
 
     } u;
 
     /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
     u32 tlbflush_timestamp;
-
-#if 0
-// following added for Linux compiling
-    page_flags_t flags;
-    atomic_t _count;
-    struct list_head lru;      // is this the same as above "list"?
-#endif
 };
 
 #define set_page_count(p,v)    atomic_set(&(p)->_count, v - 1)
diff -r 13a0272c8c02 -r de853e901b5c xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  Wed Feb 04 15:08:46 2009 +0000
+++ b/xen/include/asm-x86/mm.h  Wed Feb 04 15:29:51 2009 +0000
@@ -65,12 +65,6 @@ struct page_info
             unsigned long pinned:1; /* Is the shadow pinned? */
             unsigned long count:26; /* Reference count */
         } sh;
-
-        /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
-        struct {
-            /* Mask of possibly-tainted TLBs. */
-            cpumask_t cpumask;
-        } free;
 
     } u;
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.