[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: Avoid deep recusrsion when destroying a domain and reaping pagetables.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1210670214 -3600
# Node ID 7211d37de25b6073f0346bc8ff3f66e735952859
# Parent  22f589f09da549314761cfd2ce8207c438afc617
x86: Avoid deep recusrsion when destroying a domain and reaping pagetables.

From: Jan Beulich <jbeulich@xxxxxxxxxx>
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/domain.c        |   26 ++++++++++++++++++++++++++
 xen/arch/x86/mm.c            |   10 ++++++++++
 xen/include/asm-x86/config.h |    8 ++++++++
 3 files changed, 44 insertions(+)

diff -r 22f589f09da5 -r 7211d37de25b xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Tue May 13 09:58:18 2008 +0100
+++ b/xen/arch/x86/domain.c     Tue May 13 10:16:54 2008 +0100
@@ -1725,6 +1725,27 @@ static int relinquish_memory(
         if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
             put_page(page);
 
+#ifdef DOMAIN_DESTRUCT_AVOID_RECURSION
+        /*
+         * Forcibly drop reference counts of page tables above top most (which
+         * were skipped to prevent long latencies due to deep recursion - see
+         * the special treatment in free_lX_table()).
+         */
+        y = page->u.inuse.type_info;
+        if ( (type < PGT_root_page_table) &&
+             unlikely(((y + PGT_type_mask) &
+                       (PGT_type_mask|PGT_validated)) == type) )
+        {
+            BUG_ON((y & PGT_count_mask) >=
+                   (page->count_info & PGC_count_mask));
+            while ( y & PGT_count_mask )
+            {
+                put_page_and_type(page);
+                y = page->u.inuse.type_info;
+            }
+        }
+#endif
+
         /*
          * Forcibly invalidate top-most, still valid page tables at this point
          * to break circular 'linear page table' references. This is okay
@@ -1896,6 +1917,11 @@ int domain_relinquish_resources(struct d
         /* fallthrough */
 
     case RELMEM_done:
+#ifdef DOMAIN_DESTRUCT_AVOID_RECURSION
+        ret = relinquish_memory(d, &d->page_list, PGT_l1_page_table);
+        if ( ret )
+            return ret;
+#endif
         break;
 
     default:
diff -r 22f589f09da5 -r 7211d37de25b xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Tue May 13 09:58:18 2008 +0100
+++ b/xen/arch/x86/mm.c Tue May 13 10:16:54 2008 +0100
@@ -1320,6 +1320,11 @@ static void free_l3_table(struct page_in
     l3_pgentry_t *pl3e;
     int           i;
 
+#ifdef DOMAIN_DESTRUCT_AVOID_RECURSION
+    if ( d->arch.relmem == RELMEM_dom_l3 )
+        return;
+#endif
+
     pl3e = map_domain_page(pfn);
 
     for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
@@ -1342,6 +1347,11 @@ static void free_l4_table(struct page_in
     unsigned long pfn = page_to_mfn(page);
     l4_pgentry_t *pl4e = page_to_virt(page);
     int           i;
+
+#ifdef DOMAIN_DESTRUCT_AVOID_RECURSION
+    if ( d->arch.relmem == RELMEM_dom_l4 )
+        return;
+#endif
 
     for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++ )
         if ( is_guest_l4_slot(d, i) )
diff -r 22f589f09da5 -r 7211d37de25b xen/include/asm-x86/config.h
--- a/xen/include/asm-x86/config.h      Tue May 13 09:58:18 2008 +0100
+++ b/xen/include/asm-x86/config.h      Tue May 13 10:16:54 2008 +0100
@@ -40,6 +40,14 @@
 
 #define CONFIG_HOTPLUG 1
 #define CONFIG_HOTPLUG_CPU 1
+
+/*
+ * Avoid deep recursion when tearing down pagetables during domain destruction,
+ * causing dom0 to become unresponsive and Xen to miss time-critical softirq
+ * deadlines. This will ultimately be replaced by built-in preemptibility of
+ * get_page_type().
+ */
+#define DOMAIN_DESTRUCT_AVOID_RECURSION 1
 
 #define HZ 100
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.