[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 15 of 36] x86: simplify vmalloc_sync_all



vmalloc_sync_all() is only called from register_die_notifier and
alloc_vm_area.  Neither is on any performance-critical paths, so
vmalloc_sync_all() itself is not on any hot paths.

Given that the optimisations in vmalloc_sync_all add a fair amount of
code and complexity, and are fairly hard to evaluate for correctness,
it's better to just remove them to simplify the code rather than worry
about its absolute performance.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@xxxxxxxxxx>
---
 arch/x86/mm/fault.c |   73 ++++++++++++++++-----------------------------------
 1 file changed, 24 insertions(+), 49 deletions(-)

diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -949,14 +949,7 @@
 void vmalloc_sync_all(void)
 {
 #ifdef CONFIG_X86_32
-       /*
-        * Note that races in the updates of insync and start aren't
-        * problematic: insync can only get set bits added, and updates to
-        * start are only improving performance (without affecting correctness
-        * if undone).
-        */
-       static DECLARE_BITMAP(insync, PTRS_PER_PGD);
-       static unsigned long start = TASK_SIZE;
+       unsigned long start = VMALLOC_START & PGDIR_MASK;
        unsigned long address;
 
        if (SHARED_KERNEL_PMD)
@@ -964,56 +957,38 @@
 
        BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
        for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
-               if (!test_bit(pgd_index(address), insync)) {
-                       unsigned long flags;
-                       struct page *page;
+               unsigned long flags;
+               struct page *page;
 
-                       spin_lock_irqsave(&pgd_lock, flags);
-                       list_for_each_entry(page, &pgd_list, lru) {
-                               if (!vmalloc_sync_one(page_address(page),
-                                                     address))
-                                       break;
-                       }
-                       spin_unlock_irqrestore(&pgd_lock, flags);
-                       if (!page)
-                               set_bit(pgd_index(address), insync);
+               spin_lock_irqsave(&pgd_lock, flags);
+               list_for_each_entry(page, &pgd_list, lru) {
+                       if (!vmalloc_sync_one(page_address(page),
+                                             address))
+                               break;
                }
-               if (address == start && test_bit(pgd_index(address), insync))
-                       start = address + PGDIR_SIZE;
+               spin_unlock_irqrestore(&pgd_lock, flags);
        }
 #else /* CONFIG_X86_64 */
-       /*
-        * Note that races in the updates of insync and start aren't
-        * problematic: insync can only get set bits added, and updates to
-        * start are only improving performance (without affecting correctness
-        * if undone).
-        */
-       static DECLARE_BITMAP(insync, PTRS_PER_PGD);
-       static unsigned long start = VMALLOC_START & PGDIR_MASK;
+       unsigned long start = VMALLOC_START & PGDIR_MASK;
        unsigned long address;
 
        for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
-               if (!test_bit(pgd_index(address), insync)) {
-                       const pgd_t *pgd_ref = pgd_offset_k(address);
-                       unsigned long flags;
-                       struct page *page;
+               const pgd_t *pgd_ref = pgd_offset_k(address);
+               unsigned long flags;
+               struct page *page;
 
-                       if (pgd_none(*pgd_ref))
-                               continue;
-                       spin_lock_irqsave(&pgd_lock, flags);
-                       list_for_each_entry(page, &pgd_list, lru) {
-                               pgd_t *pgd;
-                               pgd = (pgd_t *)page_address(page) + 
pgd_index(address);
-                               if (pgd_none(*pgd))
-                                       set_pgd(pgd, *pgd_ref);
-                               else
-                                       BUG_ON(pgd_page_vaddr(*pgd) != 
pgd_page_vaddr(*pgd_ref));
-                       }
-                       spin_unlock_irqrestore(&pgd_lock, flags);
-                       set_bit(pgd_index(address), insync);
+               if (pgd_none(*pgd_ref))
+                       continue;
+               spin_lock_irqsave(&pgd_lock, flags);
+               list_for_each_entry(page, &pgd_list, lru) {
+                       pgd_t *pgd;
+                       pgd = (pgd_t *)page_address(page) + pgd_index(address);
+                       if (pgd_none(*pgd))
+                               set_pgd(pgd, *pgd_ref);
+                       else
+                               BUG_ON(pgd_page_vaddr(*pgd) != 
pgd_page_vaddr(*pgd_ref));
                }
-               if (address == start)
-                       start = address + PGDIR_SIZE;
+               spin_unlock_irqrestore(&pgd_lock, flags);
        }
 #endif
 }



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.