[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] More PAE cleanups, this time for the page-type management.



ChangeSet 1.1690, 2005/06/07 15:02:39+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx

        More PAE cleanups, this time for the page-type management.
        Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>



 arch/x86/mm.c                        |  193 ++++++++++++++++++-----------------
 include/asm-x86/x86_32/page-3level.h |   10 -
 2 files changed, 107 insertions(+), 96 deletions(-)


diff -Nru a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c 2005-06-07 11:02:50 -04:00
+++ b/xen/arch/x86/mm.c 2005-06-07 11:02:50 -04:00
@@ -685,53 +685,78 @@
 }
 
 #ifdef CONFIG_X86_PAE
-static inline int fixup_pae_linear_mappings(l3_pgentry_t *pl3e)
+static int create_pae_xen_mappings(l3_pgentry_t *pl3e)
 {
-    l2_pgentry_t *pl2e;
-    unsigned long vaddr;
-    int i,idx;
-
-    while ((unsigned long)pl3e & ~PAGE_MASK)
-        pl3e--;
-
-    if (!(l3e_get_flags(pl3e[3]) & _PAGE_PRESENT)) {
-        printk("Installing a L3 PAE pt without L2 in slot #3 isn't going to 
fly ...\n");
+    struct pfn_info *page;
+    l2_pgentry_t    *pl2e;
+    l3_pgentry_t     l3e3;
+    int              i;
+
+    pl3e = (l3_pgentry_t *)((unsigned long)pl3e & PAGE_MASK);
+
+    /* 3rd L3 slot contains L2 with Xen-private mappings. It *must* exist. */
+    l3e3 = pl3e[3];
+    if ( !(l3e_get_flags(l3e3) & _PAGE_PRESENT) )
+    {
+        MEM_LOG("PAE L3 3rd slot is empty");
         return 0;
     }
 
-    pl2e = map_domain_page(l3e_get_pfn(pl3e[3]));
-    for (i = 0; i < 4; i++) {
-        vaddr = LINEAR_PT_VIRT_START + (i << L2_PAGETABLE_SHIFT);
-        idx = (vaddr >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES-1);
-        if (l3e_get_flags(pl3e[i]) & _PAGE_PRESENT) {
-            pl2e[idx] = l2e_from_paddr(l3e_get_paddr(pl3e[i]),
-                                       __PAGE_HYPERVISOR);
-        } else
-            pl2e[idx] = l2e_empty();
+    /*
+     * The Xen-private mappings include linear mappings. The L2 thus cannot
+     * be shared by multiple L3 tables. The test here is adequate because:
+     *  1. Cannot appear in slots != 3 because the page would then then have
+     *     unknown va backpointer, which get_page_type() explicitly disallows.
+     *  2. Cannot appear in another page table's L3:
+     *     a. alloc_l3_table() calls this function and this check will fail
+     *     b. mod_l3_entry() disallows updates to slot 3 in an existing table
+     */
+    page = l3e_get_page(l3e3);
+    BUG_ON(page->u.inuse.type_info & PGT_pinned);
+    BUG_ON((page->u.inuse.type_info & PGT_count_mask) == 0);
+    if ( (page->u.inuse.type_info & PGT_count_mask) != 1 )
+    {
+        MEM_LOG("PAE L3 3rd slot is shared");
+        return 0;
     }
+
+    /* Xen private mappings. */
+    pl2e = map_domain_page(l3e_get_pfn(l3e3));
+    memcpy(&pl2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],
+           &idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],
+           L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
+    for ( i = 0; i < (PERDOMAIN_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
+        pl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
+            l2e_from_page(
+                virt_to_page(page_get_owner(page)->arch.mm_perdomain_pt) + i,
+                __PAGE_HYPERVISOR);
+    for ( i = 0; i < (LINEARPT_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
+        pl2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
+            (l3e_get_flags(pl3e[i]) & _PAGE_PRESENT) ?
+            l2e_from_pfn(l3e_get_pfn(pl3e[i]), __PAGE_HYPERVISOR) :
+            l2e_empty();
     unmap_domain_page(pl2e);
 
     return 1;
 }
 
-static inline unsigned long fixup_pae_vaddr(unsigned long l2vaddr,
-                                            unsigned long l2type)
+static inline int l1_backptr(
+    unsigned long *backptr, unsigned long offset_in_l2, unsigned long l2_type)
 {
-    unsigned long l3vaddr;
-    
-    if ((l2type & PGT_va_mask) == PGT_va_unknown) {
-        printk("%s: hooking one l2 pt into multiple l3 slots isn't allowed, 
sorry\n",
-               __FUNCTION__);
-        domain_crash();
-    }
-    l3vaddr = ((l2type & PGT_va_mask) >> PGT_va_shift)
-        << L3_PAGETABLE_SHIFT;
-    return l3vaddr + l2vaddr;
+    unsigned long l2_backptr = l2_type & PGT_va_mask;
+    BUG_ON(l2_backptr == PGT_va_unknown);
+    if ( l2_backptr == PGT_va_mutable )
+        return 0;
+    *backptr = 
+        ((l2_backptr >> PGT_va_shift) << L3_PAGETABLE_SHIFT) | 
+        (offset_in_l2 << L2_PAGETABLE_SHIFT);
+    return 1;
 }
 
 #else
-# define fixup_pae_linear_mappings(unused) (1)
-# define fixup_pae_vaddr(vaddr, type) (vaddr)
+# define create_pae_xen_mappings(pl3e) (1)
+# define l1_backptr(bp,l2o,l2t) \
+    ({ *(bp) = (l2o) << L2_PAGETABLE_SHIFT; 1; })
 #endif
 
 static int alloc_l2_table(struct pfn_info *page, unsigned int type)
@@ -742,18 +767,18 @@
     l2_pgentry_t  *pl2e;
     int            i;
 
-    // See the code in shadow_promote() to understand why this is here...
+    /* See the code in shadow_promote() to understand why this is here. */
     if ( (PGT_base_page_table == PGT_l2_page_table) &&
          unlikely(shadow_mode_refcounts(d)) )
         return 1;
-    ASSERT( !shadow_mode_refcounts(d) );
-   
+    ASSERT(!shadow_mode_refcounts(d));
     
     pl2e = map_domain_page(pfn);
 
-    for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ ) {
-        vaddr = i << L2_PAGETABLE_SHIFT;
-        vaddr = fixup_pae_vaddr(vaddr,type);
+    for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
+    {
+        if ( !l1_backptr(&vaddr, i, type) )
+            goto fail;
         if ( is_guest_l2_slot(type, i) &&
              unlikely(!get_page_from_l2e(pl2e[i], pfn, d, vaddr)) )
             goto fail;
@@ -771,24 +796,6 @@
             virt_to_page(page_get_owner(page)->arch.mm_perdomain_pt),
             __PAGE_HYPERVISOR);
 #endif
-#if CONFIG_PAGING_LEVELS == 3
-    if (3 == ((type & PGT_va_mask) >> PGT_va_shift)) {
-        unsigned long v,src,dst;
-        void *virt;
-        /* Xen private mappings. */
-        dst = L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1);
-        src = L2_PAGETABLE_FIRST_XEN_SLOT;
-        memcpy(&pl2e[dst], &idle_pg_table_l2[src],
-               L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
-        for (v = PERDOMAIN_VIRT_START; v < PERDOMAIN_VIRT_END;
-             v += (1 << L2_PAGETABLE_SHIFT)) {
-            dst = (v >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES-1);
-            virt = page_get_owner(page)->arch.mm_perdomain_pt + 
(v-PERDOMAIN_VIRT_START);
-            pl2e[dst] = l2e_from_page(virt_to_page(virt), __PAGE_HYPERVISOR);
-        }
-        /* see fixup_pae_linear_mappings() for linear pagetables */
-    }
-#endif
 
     unmap_domain_page(pl2e);
     return 1;
@@ -804,7 +811,6 @@
 
 
 #if CONFIG_PAGING_LEVELS >= 3
-
 static int alloc_l3_table(struct pfn_info *page)
 {
     struct domain *d = page_get_owner(page);
@@ -813,18 +819,20 @@
     l3_pgentry_t  *pl3e;
     int            i;
 
-    ASSERT( !shadow_mode_refcounts(d) );
+    ASSERT(!shadow_mode_refcounts(d));
 
     pl3e = map_domain_page(pfn);
-    for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ ) {
+    for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
+    {
         vaddr = i << L3_PAGETABLE_SHIFT;
         if ( is_guest_l3_slot(i) &&
              unlikely(!get_page_from_l3e(pl3e[i], pfn, d, vaddr)) )
             goto fail;
     }
 
-    if (!fixup_pae_linear_mappings(pl3e))
+    if ( !create_pae_xen_mappings(pl3e) )
         goto fail;
+
     unmap_domain_page(pl3e);
     return 1;
 
@@ -836,11 +844,11 @@
     unmap_domain_page(pl3e);
     return 0;
 }
-
+#else
+#define alloc_l3_table(page) (0)
 #endif
 
 #if CONFIG_PAGING_LEVELS >= 4
-
 static int alloc_l4_table(struct pfn_info *page)
 {
     struct domain *d = page_get_owner(page);
@@ -848,12 +856,11 @@
     l4_pgentry_t  *pl4e = page_to_virt(page);
     int            i;
 
-    // See the code in shadow_promote() to understand why this is here...
+    /* See the code in shadow_promote() to understand why this is here. */
     if ( (PGT_base_page_table == PGT_l4_page_table) &&
          shadow_mode_refcounts(d) )
         return 1;
-
-    ASSERT( !shadow_mode_refcounts(d) );
+    ASSERT(!shadow_mode_refcounts(d));
 
     for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++ )
         if ( is_guest_l4_slot(i) &&
@@ -880,8 +887,9 @@
 
     return 0;
 }
-
-#endif /* __x86_64__ */
+#else
+#define alloc_l4_table(page) (0)
+#endif
 
 
 static void free_l1_table(struct pfn_info *page)
@@ -909,10 +917,9 @@
 
     pl2e = map_domain_page(pfn);
 
-    for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ ) {
+    for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
         if ( is_guest_l2_slot(page->u.inuse.type_info, i) )
             put_page_from_l2e(pl2e[i], pfn);
-    }
 
     unmap_domain_page(pl2e);
 }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.