[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] linux/x86-64: initialization code cleanup



Since init_memory_mapping() installs the pgd entry after populating all
lower level tables, the whole mechanism can be coded to avoid using
xen_l?_entry_update() on pages that aren't page tables (yet). Also,
__set_pte() is a pure duplicate of set_pte() and hence can go away.

As usual, written and tested on 2.6.25.2 and made apply to the 2.6.18
tree without further testing.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

Index: head-2008-05-08/arch/x86_64/mm/init-xen.c
===================================================================
--- head-2008-05-08.orig/arch/x86_64/mm/init-xen.c      2008-05-08 
15:02:32.000000000 +0200
+++ head-2008-05-08/arch/x86_64/mm/init-xen.c   2008-05-15 10:42:17.000000000 
+0200
@@ -60,6 +60,8 @@ unsigned int __kernel_page_user;
 EXPORT_SYMBOL(__kernel_page_user);
 #endif
 
+int after_bootmem;
+
 extern unsigned long *contiguous_bitmap;
 
 static unsigned long dma_reserve __initdata;
@@ -99,6 +101,8 @@ void __meminit early_make_page_readonly(
        pte_t pte, *ptep;
        unsigned long *page = (unsigned long *) init_level4_pgt;
 
+       BUG_ON(after_bootmem);
+
        if (xen_feature(feature))
                return;
 
@@ -236,7 +240,6 @@ void show_mem(void)
        printk(KERN_INFO "%lu pages swap cached\n",cached);
 }
 
-int after_bootmem;
 
 static __init void *spp_getpage(void)
 { 
@@ -440,11 +443,6 @@ static __meminit void *alloc_static_page
 
 #define PTE_SIZE PAGE_SIZE
 
-static inline void __set_pte(pte_t *dst, pte_t val)
-{
-       *dst = val;
-}
-
 static inline int make_readonly(unsigned long paddr)
 {
        extern char __vsyscall_0;
@@ -539,12 +537,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned 
                unsigned long pte_phys;
                pte_t *pte, *pte_save;
 
-               if (address >= end) {
-                       if (!after_bootmem)
-                               for (; i < PTRS_PER_PMD; i++, pmd++)
-                                       set_pmd(pmd, __pmd(0));
+               if (address >= end)
                        break;
-               }
                pte = alloc_static_page(&pte_phys);
                pte_save = pte;
                for (k = 0; k < PTRS_PER_PTE; pte++, k++, address += PTE_SIZE) {
@@ -557,17 +550,21 @@ phys_pmd_init(pmd_t *pmd_page, unsigned 
                for (k = 0; k < PTRS_PER_PTE; pte++, k++, address += PTE_SIZE) {
                        unsigned long pteval = address | _PAGE_NX | 
_KERNPG_TABLE;
 
-                       if ((address >= end) ||
-                           ((address >> PAGE_SHIFT) >=
-                            xen_start_info->nr_pages))
+                       if (address >= (after_bootmem
+                                       ? end
+                                       : xen_start_info->nr_pages << 
PAGE_SHIFT))
                                pteval = 0;
                        else if (make_readonly(address))
                                pteval &= ~_PAGE_RW;
-                       __set_pte(pte, __pte(pteval & __supported_pte_mask));
+                       set_pte(pte, __pte(pteval & __supported_pte_mask));
+               }
+               if (!after_bootmem) {
+                       early_make_page_readonly(pte_save, 
XENFEAT_writable_page_tables);
+                       *pmd = __pmd(pte_phys | _KERNPG_TABLE);
+               } else {
+                       make_page_readonly(pte_save, 
XENFEAT_writable_page_tables);
+                       set_pmd(pmd, __pmd(pte_phys | _KERNPG_TABLE));
                }
-               pte = pte_save;
-               early_make_page_readonly(pte, XENFEAT_writable_page_tables);
-               set_pmd(pmd, __pmd(pte_phys | _KERNPG_TABLE));
        }
 }
 
@@ -600,11 +597,13 @@ phys_pud_init(pud_t *pud_page, unsigned 
                        break;
 
                pmd = alloc_static_page(&pmd_phys);
-               early_make_page_readonly(pmd, XENFEAT_writable_page_tables);
+
                spin_lock(&init_mm.page_table_lock);
-               set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
+               *pud = __pud(pmd_phys | _KERNPG_TABLE);
                phys_pmd_init(pmd, paddr, end);
                spin_unlock(&init_mm.page_table_lock);
+
+               early_make_page_readonly(pmd, XENFEAT_writable_page_tables);
        }
        __flush_tlb();
 } 
@@ -857,20 +855,18 @@ void init_memory_mapping(unsigned long
                pgd_t *pgd = pgd_offset_k(start);
                pud_t *pud;
 
-               if (after_bootmem) {
+               if (after_bootmem)
                        pud = pud_offset(pgd, start & PGDIR_MASK);
-                       make_page_readonly(pud, XENFEAT_writable_page_tables);
-                       pud_phys = __pa(pud);
-               } else {
+               else
                        pud = alloc_static_page(&pud_phys);
-                       early_make_page_readonly(pud, 
XENFEAT_writable_page_tables);
-               }
                next = start + PGDIR_SIZE;
                if (next > end) 
                        next = end; 
                phys_pud_init(pud, __pa(start), __pa(next));
-               if (!after_bootmem)
+               if (!after_bootmem) {
+                       early_make_page_readonly(pud, 
XENFEAT_writable_page_tables);
                        set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
+               }
        }
 
        if (!after_bootmem) {



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.