[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Fix TLB flushing on page type changes for SMP guests.



ChangeSet 1.1364, 2005/03/24 21:44:30+00:00, kaf24@xxxxxxxxxxxxxxxxxxxx

        Fix TLB flushing on page type changes for SMP guests.
        Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>



 arch/x86/mm.c              |   15 +++++++++------
 common/page_alloc.c        |   35 +++++++++++------------------------
 include/asm-x86/flushtlb.h |   20 ++++++++++++++++++++
 3 files changed, 40 insertions(+), 30 deletions(-)


diff -Nru a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c 2005-03-24 17:02:49 -05:00
+++ b/xen/arch/x86/mm.c 2005-03-24 17:02:49 -05:00
@@ -1147,13 +1147,16 @@
                  * may be unnecessary (e.g., page was GDT/LDT) but those
                  * circumstances should be very rare.
                  */
-                struct domain *d = page_get_owner(page);
-                if ( unlikely(NEED_FLUSH(tlbflush_time[d->exec_domain[0]->
-                                                      processor],
-                                         page->tlbflush_timestamp)) )
+                struct exec_domain *ed;
+                unsigned long mask = 0;
+                for_each_exec_domain ( page_get_owner(page), ed )
+                    mask |= 1 << ed->processor;
+                mask = tlbflush_filter_cpuset(mask, page->tlbflush_timestamp);
+
+                if ( unlikely(mask != 0) )
                 {
-                    perfc_incr(need_flush_tlb_flush);
-                    flush_tlb_cpu(d->exec_domain[0]->processor);
+                    perfc_incrc(need_flush_tlb_flush);
+                    flush_tlb_mask(mask);
                 }
 
                 /* We lose existing type, back pointer, and validity. */
diff -Nru a/xen/common/page_alloc.c b/xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   2005-03-24 17:02:49 -05:00
+++ b/xen/common/page_alloc.c   2005-03-24 17:02:49 -05:00
@@ -470,43 +470,30 @@
 struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order)
 {
     struct pfn_info *pg;
-    unsigned long mask, flushed_mask, pfn_stamp, cpu_stamp;
-    int i, j;
+    unsigned long mask = 0;
+    int i;
 
     ASSERT(!in_irq());
 
     if ( unlikely((pg = alloc_heap_pages(MEMZONE_DOM, order)) == NULL) )
         return NULL;
 
-    flushed_mask = 0;
     for ( i = 0; i < (1 << order); i++ )
     {
-        if ( (mask = (pg[i].u.free.cpu_mask & ~flushed_mask)) != 0 )
-        {
-            pfn_stamp = pg[i].tlbflush_timestamp;
-            for ( j = 0; (mask != 0) && (j < smp_num_cpus); j++ )
-            {
-                if ( mask & (1UL<<j) )
-                {
-                    cpu_stamp = tlbflush_time[j];
-                    if ( !NEED_FLUSH(cpu_stamp, pfn_stamp) )
-                        mask &= ~(1UL<<j);
-                }
-            }
-            
-            if ( unlikely(mask != 0) )
-            {
-                flush_tlb_mask(mask);
-                perfc_incrc(need_flush_tlb_flush);
-                flushed_mask |= mask;
-            }
-        }
+        mask |= tlbflush_filter_cpuset(
+            pg[i].u.free.cpu_mask & ~mask, pg[i].tlbflush_timestamp);
 
         pg[i].count_info        = 0;
         pg[i].u.inuse._domain   = 0;
         pg[i].u.inuse.type_info = 0;
     }
 
+    if ( unlikely(mask != 0) )
+    {
+        perfc_incrc(need_flush_tlb_flush);
+        flush_tlb_mask(mask);
+    }
+
     if ( d == NULL )
         return pg;
 
@@ -570,7 +557,7 @@
         /* NB. May recursively lock from domain_relinquish_memory(). */
         spin_lock_recursive(&d->page_alloc_lock);
 
-        for_each_exec_domain(d, ed)
+        for_each_exec_domain ( d, ed )
             cpu_mask |= 1 << ed->processor;
 
         for ( i = 0; i < (1 << order); i++ )
diff -Nru a/xen/include/asm-x86/flushtlb.h b/xen/include/asm-x86/flushtlb.h
--- a/xen/include/asm-x86/flushtlb.h    2005-03-24 17:02:49 -05:00
+++ b/xen/include/asm-x86/flushtlb.h    2005-03-24 17:02:49 -05:00
@@ -43,6 +43,26 @@
              (lastuse_stamp <= curr_time)));
 }
 
+/*
+ * Filter the given set of CPUs, returning only those that may not have
+ * flushed their TLBs since @page_timestamp.
+ */
+static inline unsigned long tlbflush_filter_cpuset(
+    unsigned long cpuset, u32 page_timestamp)
+{
+    int i;
+    unsigned long remain;
+
+    for ( i = 0, remain = ~0UL; (cpuset & remain) != 0; i++, remain <<= 1 )
+    {
+        if ( (cpuset & (1UL << i)) &&
+             !NEED_FLUSH(tlbflush_time[i], page_timestamp) )
+            cpuset &= ~(1UL << i);
+    }
+
+    return cpuset;
+}
+
 extern void new_tlbflush_clock_period(void);
 
 /* Read pagetable base. */


-------------------------------------------------------
SF email is sponsored by - The IT Product Guide
Read honest & candid reviews on hundreds of IT Products from real users.
Discover which products truly live up to the hype. Start reading now.
http://ads.osdn.com/?ad_id=6595&alloc_id=14396&op=click
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxxx
https://lists.sourceforge.net/lists/listinfo/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.