[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [PATCH] [patch] pgtable.c cleanups



ChangeSet 1.1402, 2005/04/29 13:54:16+01:00, kraxel@xxxxxxxxxxx

        [PATCH] [patch] pgtable.c cleanups
        
          Hi guys,
        
        The new mm_(un)pin stuff is certainly a nice optimization.
        
        But can we take care a bit more about coding style issues
        and write code not *that* hackish please?  Casts, code
        duplication, "unsigned long" for page table entries *again*.
        And a few more variables also make the code more readable
        (and gcc should be clever enougth that this isn't a
        performance hit).  We want to have that merged some day in
        mainline, don't we?
        
        The patch below cleans up the pin/unpin code in pgtable.c
        I've created two helper functions to factor out common code,
        the page table walk is basically the same for both pin and
        unpin, it's just different page flags.  Also fixed up the
        walk code to correcly walk through all 4 levels linux has,
        so it works correctly for the PAE case.  For x86_64 it
        should work fine as well (untested though).
        
        please apply,
        
          Gerd



 pgtable.c |   77 ++++++++++++++++++++++++++++++++++++--------------------------
 1 files changed, 45 insertions(+), 32 deletions(-)


diff -Nru a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/pgtable.c 
b/linux-2.6.11-xen-sparse/arch/xen/i386/mm/pgtable.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/pgtable.c        2005-04-29 
09:03:19 -04:00
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/mm/pgtable.c        2005-04-29 
09:03:19 -04:00
@@ -407,30 +407,58 @@
 }
 #endif /* CONFIG_XEN_SHADOW_MODE */
 
-void mm_pin(struct mm_struct *mm)
+static inline void mm_walk_set_prot(void *pt, pgprot_t flags)
 {
-    pgd_t       *pgd;
-    struct page *page;
-    int          i;
+       struct page *page = virt_to_page(pt);
+       unsigned long pfn = page_to_pfn(page);
 
-    spin_lock(&mm->page_table_lock);
+       if (PageHighMem(page))
+               return;
+       HYPERVISOR_update_va_mapping(
+               (unsigned long)__va(pfn << PAGE_SHIFT),
+               pfn_pte(pfn, flags), 0);
+}
 
-    for ( i = 0, pgd = mm->pgd; i < USER_PTRS_PER_PGD; i++, pgd++ )
-    {
-        if ( *(unsigned long *)pgd == 0 )
-            continue;
-        page = pmd_page(*(pmd_t *)pgd);
-        if ( !PageHighMem(page) )
-            HYPERVISOR_update_va_mapping(
-                (unsigned long)__va(page_to_pfn(page)<<PAGE_SHIFT),
-                pfn_pte(page_to_pfn(page), PAGE_KERNEL_RO), 0);
-    }
+static void mm_walk(struct mm_struct *mm, pgprot_t flags)
+{
+       pgd_t       *pgd;
+       pud_t       *pud;
+       pmd_t       *pmd;
+       pte_t       *pte;
+       int          g,u,m;
+
+       pgd = mm->pgd;
+       for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
+               if (pgd_none(*pgd))
+                       continue;
+               pud = pud_offset(pgd, 0);
+               if (PTRS_PER_PUD > 1) /* not folded */
+                       mm_walk_set_prot(pud,flags);
+               for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
+                       if (pud_none(*pud))
+                               continue;
+                       pmd = pmd_offset(pud, 0);
+                       if (PTRS_PER_PMD > 1) /* not folded */
+                               mm_walk_set_prot(pmd,flags);
+                       for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
+                               if (pmd_none(*pmd))
+                                       continue;
+                               pte = pte_offset_kernel(pmd,0);
+                               mm_walk_set_prot(pte,flags);
+                       }
+               }
+       }
+}
 
+void mm_pin(struct mm_struct *mm)
+{
+    spin_lock(&mm->page_table_lock);
+
+    mm_walk(mm, PAGE_KERNEL_RO);
     HYPERVISOR_update_va_mapping(
         (unsigned long)mm->pgd,
         pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL_RO), 0);
     xen_pgd_pin(__pa(mm->pgd));
-
     mm->context.pinned = 1;
 
     spin_unlock(&mm->page_table_lock);
@@ -438,28 +466,13 @@
 
 void mm_unpin(struct mm_struct *mm)
 {
-    pgd_t       *pgd;
-    struct page *page;
-    int          i;
-
     spin_lock(&mm->page_table_lock);
 
     xen_pgd_unpin(__pa(mm->pgd));
     HYPERVISOR_update_va_mapping(
         (unsigned long)mm->pgd,
         pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL), 0);
-
-    for ( i = 0, pgd = mm->pgd; i < USER_PTRS_PER_PGD; i++, pgd++ )
-    {
-        if ( *(unsigned long *)pgd == 0 )
-            continue;
-        page = pmd_page(*(pmd_t *)pgd);
-        if ( !PageHighMem(page) )
-            HYPERVISOR_update_va_mapping(
-                (unsigned long)__va(page_to_pfn(page)<<PAGE_SHIFT),
-                pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
-    }
-
+    mm_walk(mm, PAGE_KERNEL);
     mm->context.pinned = 0;
 
     spin_unlock(&mm->page_table_lock);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.