[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [linux-2.6.18-xen] xen/i386: Fix vmalloc_sync_all() for PAE.



# HG changeset patch
# User Keir Fraser <keir@xxxxxxxxxxxxx>
# Date 1184403059 -3600
# Node ID 34ebf92ad28d53f70ca02966c9f926f7d83bafbb
# Parent  9debaf36090515b4ce54712c4641781bc263b1a6
xen/i386: Fix vmalloc_sync_all() for PAE.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 arch/i386/mm/fault-xen.c |   33 +++++++++++++++++++++++----------
 1 files changed, 23 insertions(+), 10 deletions(-)

diff -r 9debaf360905 -r 34ebf92ad28d arch/i386/mm/fault-xen.c
--- a/arch/i386/mm/fault-xen.c  Fri Jul 13 16:15:37 2007 +0100
+++ b/arch/i386/mm/fault-xen.c  Sat Jul 14 09:50:59 2007 +0100
@@ -739,18 +739,31 @@ void vmalloc_sync_all(void)
         * problematic: insync can only get set bits added, and updates to
         * start are only improving performance (without affecting correctness
         * if undone).
-        */
-       static DECLARE_BITMAP(insync, PTRS_PER_PGD);
+        * XEN: To work on PAE, we need to iterate over PMDs rather than PGDs.
+        *      This change works just fine with 2-level paging too.
+        */
+#define sync_index(a) ((a) >> PMD_SHIFT)
+       static DECLARE_BITMAP(insync, PTRS_PER_PGD*PTRS_PER_PMD);
        static unsigned long start = TASK_SIZE;
        unsigned long address;
 
        BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
-       for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
-               if (!test_bit(pgd_index(address), insync)) {
+       for (address = start;
+            address >= TASK_SIZE && address < hypervisor_virt_start;
+            address += 1UL << PMD_SHIFT) {
+               if (!test_bit(sync_index(address), insync)) {
                        unsigned long flags;
                        struct page *page;
 
                        spin_lock_irqsave(&pgd_lock, flags);
+                       /*
+                        * XEN: vmalloc_sync_one() failure path logic assumes
+                        * pgd_list is non-empty.
+                        */
+                       if (unlikely(!pgd_list)) {
+                               spin_unlock_irqrestore(&pgd_lock, flags);
+                               return;
+                       }
                        for (page = pgd_list; page; page =
                                        (struct page *)page->index)
                                if (!vmalloc_sync_one(page_address(page),
@@ -760,10 +773,10 @@ void vmalloc_sync_all(void)
                                }
                        spin_unlock_irqrestore(&pgd_lock, flags);
                        if (!page)
-                               set_bit(pgd_index(address), insync);
+                               set_bit(sync_index(address), insync);
                }
-               if (address == start && test_bit(pgd_index(address), insync))
-                       start = address + PGDIR_SIZE;
-       }
-}
-#endif
+               if (address == start && test_bit(sync_index(address), insync))
+                       start = address + (1UL << PMD_SHIFT);
+       }
+}
+#endif

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.