[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Merge maf46@xxxxxxxxxxxxxxxxxxxxxxx:/usr/groups/xeno/BK/xen-unstable.bk



ChangeSet 1.1714, 2005/06/13 12:22:00+01:00, mafetter@xxxxxxxxxxxxxxxx

        Merge maf46@xxxxxxxxxxxxxxxxxxxxxxx:/usr/groups/xeno/BK/xen-unstable.bk
        into fleming.research:/scratch/fleming/mafetter/xen-unstable.bk



 mm.c |  315 ++++++++++++++++++++++++++++++++++---------------------------------
 1 files changed, 163 insertions(+), 152 deletions(-)


diff -Nru a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c 2005-06-13 19:02:57 -04:00
+++ b/xen/arch/x86/mm.c 2005-06-13 19:02:57 -04:00
@@ -94,12 +94,12 @@
 #include <xen/perfc.h>
 #include <xen/irq.h>
 #include <xen/softirq.h>
+#include <xen/domain_page.h>
 #include <asm/shadow.h>
 #include <asm/page.h>
 #include <asm/flushtlb.h>
 #include <asm/io.h>
 #include <asm/uaccess.h>
-#include <asm/domain_page.h>
 #include <asm/ldt.h>
 #include <asm/x86_emulate.h>
 
@@ -145,31 +145,28 @@
 
 /* Frame table and its size in pages. */
 struct pfn_info *frame_table;
-unsigned long frame_table_size;
 unsigned long max_page;
 
 void __init init_frametable(void)
 {
-    unsigned long i, p, step;
+    unsigned long nr_pages, page_step, i, pfn;
 
-    frame_table      = (struct pfn_info *)FRAMETABLE_VIRT_START;
-    frame_table_size = max_page * sizeof(struct pfn_info);
-    frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
+    frame_table = (struct pfn_info *)FRAMETABLE_VIRT_START;
 
-    step = (1 << L2_PAGETABLE_SHIFT);
-    for ( i = 0; i < frame_table_size; i += step )
+    nr_pages  = PFN_UP(max_page * sizeof(*frame_table));
+    page_step = (1 << L2_PAGETABLE_SHIFT) >> PAGE_SHIFT;
+
+    for ( i = 0; i < nr_pages; i += page_step )
     {
-        p = alloc_boot_pages(min(frame_table_size - i, step), step);
-        if ( p == 0 )
+        pfn = alloc_boot_pages(min(nr_pages - i, page_step), page_step);
+        if ( pfn == 0 )
             panic("Not enough memory for frame table\n");
         map_pages_to_xen(
-            FRAMETABLE_VIRT_START + i,
-            p >> PAGE_SHIFT,
-            step >> PAGE_SHIFT,
-            PAGE_HYPERVISOR);
+            FRAMETABLE_VIRT_START + (i << PAGE_SHIFT),
+            pfn, page_step, PAGE_HYPERVISOR);
     }
 
-    memset(frame_table, 0, frame_table_size);
+    memset(frame_table, 0, nr_pages << PAGE_SHIFT);
 }
 
 void arch_init_memory(void)
@@ -269,17 +266,17 @@
     struct desc_struct *descs;
     int i;
 
-    descs = map_domain_mem((page-frame_table) << PAGE_SHIFT);
+    descs = map_domain_page(page_to_pfn(page));
 
     for ( i = 0; i < 512; i++ )
         if ( unlikely(!check_descriptor(&descs[i])) )
             goto fail;
 
-    unmap_domain_mem(descs);
+    unmap_domain_page(descs);
     return 1;
 
  fail:
-    unmap_domain_mem(descs);
+    unmap_domain_page(descs);
     return 0;
 }
 
@@ -665,14 +662,14 @@
 
     ASSERT(!shadow_mode_refcounts(d));
 
-    pl1e = map_domain_mem(pfn << PAGE_SHIFT);
+    pl1e = map_domain_page(pfn);
 
     for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
         if ( is_guest_l1_slot(i) &&
              unlikely(!get_page_from_l1e(pl1e[i], d)) )
             goto fail;
 
-    unmap_domain_mem(pl1e);
+    unmap_domain_page(pl1e);
     return 1;
 
  fail:
@@ -680,58 +677,83 @@
         if ( is_guest_l1_slot(i) )
             put_page_from_l1e(pl1e[i], d);
 
-    unmap_domain_mem(pl1e);
+    unmap_domain_page(pl1e);
     return 0;
 }
 
 #ifdef CONFIG_X86_PAE
-static inline int fixup_pae_linear_mappings(l3_pgentry_t *pl3e)
+static int create_pae_xen_mappings(l3_pgentry_t *pl3e)
 {
-    l2_pgentry_t *pl2e;
-    unsigned long vaddr;
-    int i,idx;
+    struct pfn_info *page;
+    l2_pgentry_t    *pl2e;
+    l3_pgentry_t     l3e3;
+    int              i;
 
-    while ((unsigned long)pl3e & ~PAGE_MASK)
-        pl3e--;
+    pl3e = (l3_pgentry_t *)((unsigned long)pl3e & PAGE_MASK);
 
-    if (!(l3e_get_flags(pl3e[3]) & _PAGE_PRESENT)) {
-        printk("Installing a L3 PAE pt without L2 in slot #3 isn't going to 
fly ...\n");
+    /* 3rd L3 slot contains L2 with Xen-private mappings. It *must* exist. */
+    l3e3 = pl3e[3];
+    if ( !(l3e_get_flags(l3e3) & _PAGE_PRESENT) )
+    {
+        MEM_LOG("PAE L3 3rd slot is empty");
         return 0;
     }
 
-    pl2e = map_domain_mem(l3e_get_paddr(pl3e[3]));
-    for (i = 0; i < 4; i++) {
-        vaddr = LINEAR_PT_VIRT_START + (i << L2_PAGETABLE_SHIFT);
-        idx = (vaddr >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES-1);
-        if (l3e_get_flags(pl3e[i]) & _PAGE_PRESENT) {
-            pl2e[idx] = l2e_from_paddr(l3e_get_paddr(pl3e[i]),
-                                       __PAGE_HYPERVISOR);
-        } else
-            pl2e[idx] = l2e_empty();
+    /*
+     * The Xen-private mappings include linear mappings. The L2 thus cannot
+     * be shared by multiple L3 tables. The test here is adequate because:
+     *  1. Cannot appear in slots != 3 because the page would then then have
+     *     unknown va backpointer, which get_page_type() explicitly disallows.
+     *  2. Cannot appear in another page table's L3:
+     *     a. alloc_l3_table() calls this function and this check will fail
+     *     b. mod_l3_entry() disallows updates to slot 3 in an existing table
+     */
+    page = l3e_get_page(l3e3);
+    BUG_ON(page->u.inuse.type_info & PGT_pinned);
+    BUG_ON((page->u.inuse.type_info & PGT_count_mask) == 0);
+    if ( (page->u.inuse.type_info & PGT_count_mask) != 1 )
+    {
+        MEM_LOG("PAE L3 3rd slot is shared");
+        return 0;
     }
-    unmap_domain_mem(pl2e);
+
+    /* Xen private mappings. */
+    pl2e = map_domain_page(l3e_get_pfn(l3e3));
+    memcpy(&pl2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],
+           &idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],
+           L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
+    for ( i = 0; i < (PERDOMAIN_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
+        pl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
+            l2e_from_page(
+                virt_to_page(page_get_owner(page)->arch.mm_perdomain_pt) + i,
+                __PAGE_HYPERVISOR);
+    for ( i = 0; i < (LINEARPT_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
+        pl2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
+            (l3e_get_flags(pl3e[i]) & _PAGE_PRESENT) ?
+            l2e_from_pfn(l3e_get_pfn(pl3e[i]), __PAGE_HYPERVISOR) :
+            l2e_empty();
+    unmap_domain_page(pl2e);
 
     return 1;
 }
 
-static inline unsigned long fixup_pae_vaddr(unsigned long l2vaddr,
-                                            unsigned long l2type)
+static inline int l1_backptr(
+    unsigned long *backptr, unsigned long offset_in_l2, unsigned long l2_type)
 {
-    unsigned long l3vaddr;
-    
-    if ((l2type & PGT_va_mask) == PGT_va_unknown) {
-        printk("%s: hooking one l2 pt into multiple l3 slots isn't allowed, 
sorry\n",
-               __FUNCTION__);
-        domain_crash();
-    }
-    l3vaddr = ((l2type & PGT_va_mask) >> PGT_va_shift)
-        << L3_PAGETABLE_SHIFT;
-    return l3vaddr + l2vaddr;
+    unsigned long l2_backptr = l2_type & PGT_va_mask;
+    BUG_ON(l2_backptr == PGT_va_unknown);
+    if ( l2_backptr == PGT_va_mutable )
+        return 0;
+    *backptr = 
+        ((l2_backptr >> PGT_va_shift) << L3_PAGETABLE_SHIFT) | 
+        (offset_in_l2 << L2_PAGETABLE_SHIFT);
+    return 1;
 }
 
 #else
-# define fixup_pae_linear_mappings(unused) (1)
-# define fixup_pae_vaddr(vaddr, type) (vaddr)
+# define create_pae_xen_mappings(pl3e) (1)
+# define l1_backptr(bp,l2o,l2t) \
+    ({ *(bp) = (l2o) << L2_PAGETABLE_SHIFT; 1; })
 #endif
 
 static int alloc_l2_table(struct pfn_info *page, unsigned int type)
@@ -742,18 +764,18 @@
     l2_pgentry_t  *pl2e;
     int            i;
 
-    // See the code in shadow_promote() to understand why this is here...
+    /* See the code in shadow_promote() to understand why this is here. */
     if ( (PGT_base_page_table == PGT_l2_page_table) &&
          unlikely(shadow_mode_refcounts(d)) )
         return 1;
-    ASSERT( !shadow_mode_refcounts(d) );
-   
+    ASSERT(!shadow_mode_refcounts(d));
     
-    pl2e = map_domain_mem(pfn << PAGE_SHIFT);
+    pl2e = map_domain_page(pfn);
 
-    for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ ) {
-        vaddr = i << L2_PAGETABLE_SHIFT;
-        vaddr = fixup_pae_vaddr(vaddr,type);
+    for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
+    {
+        if ( !l1_backptr(&vaddr, i, type) )
+            goto fail;
         if ( is_guest_l2_slot(type, i) &&
              unlikely(!get_page_from_l2e(pl2e[i], pfn, d, vaddr)) )
             goto fail;
@@ -771,26 +793,8 @@
             virt_to_page(page_get_owner(page)->arch.mm_perdomain_pt),
             __PAGE_HYPERVISOR);
 #endif
-#if CONFIG_PAGING_LEVELS == 3
-    if (3 == ((type & PGT_va_mask) >> PGT_va_shift)) {
-        unsigned long v,src,dst;
-        void *virt;
-        /* Xen private mappings. */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.