[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: We can assume CONFIG_PAGING_LEVELS==4.


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Fri, 14 Sep 2012 10:55:32 +0000
  • Delivery-date: Fri, 14 Sep 2012 10:55:41 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1347454766 -3600
# Node ID 7b658d31b5e1ebaaf9ea9cdc31cdba25d34d68ea
# Parent  bc8cb47787025aaa987a5a01719d014d8ede8665
x86: We can assume CONFIG_PAGING_LEVELS==4.

Signed-off-by: Keir Fraser <keir@xxxxxxx>
---


diff -r bc8cb4778702 -r 7b658d31b5e1 xen/arch/x86/boot/trampoline.S
--- a/xen/arch/x86/boot/trampoline.S    Wed Sep 12 13:29:30 2012 +0100
+++ b/xen/arch/x86/boot/trampoline.S    Wed Sep 12 13:59:26 2012 +0100
@@ -94,10 +94,8 @@ trampoline_protmode_entry:
         jz      .Lskip_efer
         movl    $MSR_EFER,%ecx
         rdmsr
-#if CONFIG_PAGING_LEVELS == 4
         btsl    $_EFER_LME,%eax /* Long Mode      */
         btsl    $_EFER_SCE,%eax /* SYSCALL/SYSRET */
-#endif
         btl     $20,%edi        /* No Execute?    */
         jnc     1f
         btsl    $_EFER_NX,%eax  /* No Execute     */
diff -r bc8cb4778702 -r 7b658d31b5e1 xen/arch/x86/boot/wakeup.S
--- a/xen/arch/x86/boot/wakeup.S        Wed Sep 12 13:29:30 2012 +0100
+++ b/xen/arch/x86/boot/wakeup.S        Wed Sep 12 13:59:26 2012 +0100
@@ -143,10 +143,8 @@ wakeup_32:
         jz      .Lskip_eferw
         movl    $MSR_EFER,%ecx
         rdmsr
-#if CONFIG_PAGING_LEVELS == 4
         btsl    $_EFER_LME,%eax /* Long Mode      */
         btsl    $_EFER_SCE,%eax /* SYSCALL/SYSRET */
-#endif
         btl     $20,%edi        /* No Execute?    */
         jnc     1f
         btsl    $_EFER_NX,%eax  /* No Execute     */
diff -r bc8cb4778702 -r 7b658d31b5e1 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Wed Sep 12 13:29:30 2012 +0100
+++ b/xen/arch/x86/domain.c     Wed Sep 12 13:59:26 2012 +0100
@@ -588,9 +588,8 @@ int arch_domain_create(struct domain *d,
     }
     else
     {
-        /* 32-bit PV guest by default only if Xen is not 64-bit. */
-        d->arch.is_32bit_pv = d->arch.has_32bit_shinfo =
-            (CONFIG_PAGING_LEVELS != 4);
+        /* 64-bit PV guest by default. */
+        d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
 
         spin_lock_init(&d->arch.pv_domain.e820_lock);
     }
@@ -2096,7 +2095,6 @@ int domain_relinquish_resources(struct d
         ret = relinquish_memory(d, &d->xenpage_list, ~0UL);
         if ( ret )
             return ret;
-#if CONFIG_PAGING_LEVELS >= 4
         d->arch.relmem = RELMEM_l4;
         /* fallthrough */
 
@@ -2104,8 +2102,6 @@ int domain_relinquish_resources(struct d
         ret = relinquish_memory(d, &d->page_list, PGT_l4_page_table);
         if ( ret )
             return ret;
-#endif
-#if CONFIG_PAGING_LEVELS >= 3
         d->arch.relmem = RELMEM_l3;
         /* fallthrough */
 
@@ -2113,7 +2109,6 @@ int domain_relinquish_resources(struct d
         ret = relinquish_memory(d, &d->page_list, PGT_l3_page_table);
         if ( ret )
             return ret;
-#endif
         d->arch.relmem = RELMEM_l2;
         /* fallthrough */
 
diff -r bc8cb4778702 -r 7b658d31b5e1 xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c       Wed Sep 12 13:29:30 2012 +0100
+++ b/xen/arch/x86/domain_build.c       Wed Sep 12 13:59:26 2012 +0100
@@ -331,12 +331,7 @@ int __init construct_dom0(
     unsigned long image_len = image->mod_end;
     char *image_start = image_base + image_headroom;
     unsigned long initrd_len = initrd ? initrd->mod_end : 0;
-#if CONFIG_PAGING_LEVELS < 4
-    module_t mpt;
-    void *mpt_ptr;
-#else
     l4_pgentry_t *l4tab = NULL, *l4start = NULL;
-#endif
     l3_pgentry_t *l3tab = NULL, *l3start = NULL;
     l2_pgentry_t *l2tab = NULL, *l2start = NULL;
     l1_pgentry_t *l1tab = NULL, *l1start = NULL;
@@ -391,27 +386,16 @@ int __init construct_dom0(
     compatible = 0;
     compat32   = 0;
     machine = elf_uval(&elf, elf.ehdr, e_machine);
-    switch (CONFIG_PAGING_LEVELS) {
-    case 3: /* x86_32p */
-        if (parms.pae == PAEKERN_bimodal)
-            parms.pae = PAEKERN_extended_cr3;
-        printk(" Xen  kernel: 32-bit, PAE, lsb\n");
-        if (elf_32bit(&elf) && parms.pae && machine == EM_386)
-            compatible = 1;
-        break;
-    case 4: /* x86_64 */
-        printk(" Xen  kernel: 64-bit, lsb, compat32\n");
-        if (elf_32bit(&elf) && parms.pae == PAEKERN_bimodal)
-            parms.pae = PAEKERN_extended_cr3;
-        if (elf_32bit(&elf) && parms.pae && machine == EM_386)
-        {
-            compat32 = 1;
-            compatible = 1;
-        }
-        if (elf_64bit(&elf) && machine == EM_X86_64)
-            compatible = 1;
-        break;
+    printk(" Xen  kernel: 64-bit, lsb, compat32\n");
+    if (elf_32bit(&elf) && parms.pae == PAEKERN_bimodal)
+        parms.pae = PAEKERN_extended_cr3;
+    if (elf_32bit(&elf) && parms.pae && machine == EM_386)
+    {
+        compat32 = 1;
+        compatible = 1;
     }
+    if (elf_64bit(&elf) && machine == EM_X86_64)
+        compatible = 1;
     printk(" Dom0 kernel: %s%s, %s, paddr 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
            elf_64bit(&elf) ? "64-bit" : "32-bit",
            parms.pae       ? ", PAE"  : "",
diff -r bc8cb4778702 -r 7b658d31b5e1 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Wed Sep 12 13:29:30 2012 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Wed Sep 12 13:59:26 2012 +0100
@@ -1285,8 +1285,7 @@ struct hvm_function_table * __init start
 
     svm_function_table.hap_supported = cpu_has_svm_npt;
     svm_function_table.hap_capabilities = HVM_HAP_SUPERPAGE_2MB |
-        (((CONFIG_PAGING_LEVELS == 4) && (cpuid_edx(0x80000001) & 0x04000000)) 
?
-            HVM_HAP_SUPERPAGE_1GB : 0);
+        ((cpuid_edx(0x80000001) & 0x04000000) ? HVM_HAP_SUPERPAGE_1GB : 0);
 
     return &svm_function_table;
 }
diff -r bc8cb4778702 -r 7b658d31b5e1 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed Sep 12 13:29:30 2012 +0100
+++ b/xen/arch/x86/mm.c Wed Sep 12 13:59:26 2012 +0100
@@ -277,8 +277,7 @@ void __init arch_init_memory(void)
     if ( cpu_has_nx )
         base_disallow_mask &= ~_PAGE_NX_BIT;
     /* On x86/64, range [62:52] is available for guest software use. */
-    if ( CONFIG_PAGING_LEVELS == 4 )
-        base_disallow_mask &= ~get_pte_flags((intpte_t)0x7ff << 52);
+    base_disallow_mask &= ~get_pte_flags((intpte_t)0x7ff << 52);
 
     /*
      * Initialise our DOMID_XEN domain.
@@ -475,11 +474,9 @@ void update_cr3(struct vcpu *v)
         return;
     }
 
-#if CONFIG_PAGING_LEVELS == 4
     if ( !(v->arch.flags & TF_kernel_mode) )
         cr3_mfn = pagetable_get_pfn(v->arch.guest_table_user);
     else
-#endif
         cr3_mfn = pagetable_get_pfn(v->arch.guest_table);
 
     make_cr3(v, cr3_mfn);
@@ -925,7 +922,6 @@ get_page_from_l3e(
     return rc;
 }
 
-#if CONFIG_PAGING_LEVELS >= 4
 define_get_linear_pagetable(l4);
 static int
 get_page_from_l4e(
@@ -949,7 +945,6 @@ get_page_from_l4e(
 
     return rc;
 }
-#endif /* 4 level */
 
 #ifdef USER_MAPPINGS_ARE_GLOBAL
 #define adjust_guest_l1e(pl1e, d)                                            \
@@ -1107,7 +1102,6 @@ static int put_page_from_l3e(l3_pgentry_
     return put_page_and_type_preemptible(l3e_get_page(l3e), preemptible);
 }
 
-#if CONFIG_PAGING_LEVELS >= 4
 static int put_page_from_l4e(l4_pgentry_t l4e, unsigned long pfn,
                              int partial, int preemptible)
 {
@@ -1120,7 +1114,6 @@ static int put_page_from_l4e(l4_pgentry_
     }
     return 1;
 }
-#endif
 
 static int alloc_l1_table(struct page_info *page)
 {
@@ -1259,21 +1252,6 @@ static int alloc_l3_table(struct page_in
     unsigned int   i;
     int            rc = 0, partial = page->partial_pte;
 
-#if CONFIG_PAGING_LEVELS == 3
-    /*
-     * PAE pgdirs above 4GB are unacceptable if the guest does not understand
-     * the weird 'extended cr3' format for dealing with high-order address
-     * bits. We cut some slack for control tools (before vcpu0 is initialised).
-     */
-    if ( (pfn >= 0x100000) &&
-         unlikely(!VM_ASSIST(d, VMASST_TYPE_pae_extended_cr3)) &&
-         d->vcpu && d->vcpu[0] && d->vcpu[0]->is_initialised )
-    {
-        MEM_LOG("PAE pgd must be below 4GB (0x%lx >= 0x100000)", pfn);
-        return -EINVAL;
-    }
-#endif
-
     pl3e = map_domain_page(pfn);
 
     /*
@@ -1340,7 +1318,6 @@ static int alloc_l3_table(struct page_in
     return rc > 0 ? 0 : rc;
 }
 
-#if CONFIG_PAGING_LEVELS >= 4
 static int alloc_l4_table(struct page_info *page, int preemptible)
 {
     struct domain *d = page_get_owner(page);
@@ -1396,10 +1373,6 @@ static int alloc_l4_table(struct page_in
 
     return rc > 0 ? 0 : rc;
 }
-#else
-#define alloc_l4_table(page, preemptible) (-EINVAL)
-#endif
-
 
 static void free_l1_table(struct page_info *page)
 {
@@ -1486,7 +1459,6 @@ static int free_l3_table(struct page_inf
     return rc > 0 ? 0 : rc;
 }
 
-#if CONFIG_PAGING_LEVELS >= 4
 static int free_l4_table(struct page_info *page, int preemptible)
 {
     struct domain *d = page_get_owner(page);
@@ -1516,9 +1488,6 @@ static int free_l4_table(struct page_inf
     }
     return rc > 0 ? 0 : rc;
 }
-#else
-#define free_l4_table(page, preemptible) (-EINVAL)
-#endif
 
 int page_lock(struct page_info *page)
 {
@@ -1823,8 +1792,6 @@ static int mod_l3_entry(l3_pgentry_t *pl
     return rc;
 }
 
-#if CONFIG_PAGING_LEVELS >= 4
-
 /* Update the L4 entry at pl4e to new value nl4e. pl4e is within frame pfn. */
 static int mod_l4_entry(l4_pgentry_t *pl4e, 
                         l4_pgentry_t nl4e, 
@@ -1886,8 +1853,6 @@ static int mod_l4_entry(l4_pgentry_t *pl
     return rc;
 }
 
-#endif
-
 static int cleanup_page_cacheattr(struct page_info *page)
 {
     uint32_t cacheattr =
@@ -2089,10 +2054,6 @@ int free_page_type(struct page_info *pag
         rc = free_l2_table(page, preemptible);
         break;
     case PGT_l3_page_table:
-#if CONFIG_PAGING_LEVELS == 3
-        if ( !(type & PGT_partial) )
-            page->nr_validated_ptes = L3_PAGETABLE_ENTRIES;
-#endif
         rc = free_l3_table(page, preemptible);
         break;
     case PGT_l4_page_table:
@@ -3348,12 +3309,10 @@ long do_mmu_update(
                     rc = mod_l3_entry(va, l3e_from_intpte(req.val), mfn,
                                       cmd == MMU_PT_UPDATE_PRESERVE_AD, 1, v);
                     break;
-#if CONFIG_PAGING_LEVELS >= 4
                 case PGT_l4_page_table:
                     rc = mod_l4_entry(va, l4e_from_intpte(req.val), mfn,
                                       cmd == MMU_PT_UPDATE_PRESERVE_AD, 1, v);
                 break;
-#endif
                 case PGT_writable_page:
                     perfc_incr(writable_mmu_updates);
                     if ( paging_write_guest_entry(v, va, req.val, _mfn(mfn)) )
diff -r bc8cb4778702 -r 7b658d31b5e1 xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Wed Sep 12 13:29:30 2012 +0100
+++ b/xen/arch/x86/mm/hap/hap.c Wed Sep 12 13:59:26 2012 +0100
@@ -280,26 +280,6 @@ static struct page_info *hap_alloc_p2m_p
     paging_lock_recursive(d);
     pg = hap_alloc(d);
 
-#if CONFIG_PAGING_LEVELS == 3
-    /* Under PAE mode, top-level P2M table should be allocated below 4GB space
-     * because the size of h_cr3 is only 32-bit. We use alloc_domheap_pages to
-     * force this requirement, and exchange the guaranteed 32-bit-clean
-     * page for the one we just hap_alloc()ed. */
-    if ( d->arch.paging.hap.p2m_pages == 0
-         && mfn_x(page_to_mfn(pg)) >= (1UL << (32 - PAGE_SHIFT)) )
-    {
-        free_domheap_page(pg);
-        pg = alloc_domheap_page(
-            NULL, MEMF_bits(32) | MEMF_node(domain_to_node(d)));
-        if ( likely(pg != NULL) )
-        {
-            void *p = __map_domain_page(pg);
-            clear_page(p);
-            hap_unmap_domain_page(p);
-        }
-    }
-#endif
-
     if ( likely(pg != NULL) )
     {
         d->arch.paging.hap.total_pages--;
@@ -403,7 +383,6 @@ hap_set_allocation(struct domain *d, uns
     return 0;
 }
 
-#if CONFIG_PAGING_LEVELS == 4
 static void hap_install_xen_entries_in_l4(struct vcpu *v, mfn_t l4mfn)
 {
     struct domain *d = v->domain;
@@ -433,103 +412,20 @@ static void hap_install_xen_entries_in_l
 
     hap_unmap_domain_page(l4e);
 }
-#endif /* CONFIG_PAGING_LEVELS == 4 */
-
-#if CONFIG_PAGING_LEVELS == 3
-static void hap_install_xen_entries_in_l2h(struct vcpu *v, mfn_t l2hmfn)
-{
-    struct domain *d = v->domain;
-    struct p2m_domain *hostp2m = p2m_get_hostp2m(d);
-    l2_pgentry_t *l2e;
-    l3_pgentry_t *p2m;
-    int i;
-
-    l2e = hap_map_domain_page(l2hmfn);
-    ASSERT(l2e != NULL);
-
-    /* Copy the common Xen mappings from the idle domain */
-    memcpy(&l2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],
-           &idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],
-           L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
-
-    /* Install the per-domain mappings for this domain */
-    for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
-        l2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
-            l2e_from_pfn(
-                mfn_x(page_to_mfn(perdomain_pt_page(d, i))),
-                __PAGE_HYPERVISOR);
-
-    /* No linear mapping; will be set up by monitor-table contructor. */
-    for ( i = 0; i < 4; i++ )
-        l2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
-            l2e_empty();
-
-    /* Install the domain-specific p2m table */
-    ASSERT(pagetable_get_pfn(p2m_get_pagetable(hostp2m)) != 0);
-    p2m = hap_map_domain_page(pagetable_get_mfn(p2m_get_pagetable(hostp2m)));
-    for ( i = 0; i < MACHPHYS_MBYTES>>1; i++ )
-    {
-        l2e[l2_table_offset(RO_MPT_VIRT_START) + i] =
-            (l3e_get_flags(p2m[i]) & _PAGE_PRESENT)
-            ? l2e_from_pfn(mfn_x(_mfn(l3e_get_pfn(p2m[i]))),
-                           __PAGE_HYPERVISOR)
-            : l2e_empty();
-    }
-    hap_unmap_domain_page(p2m);
-    hap_unmap_domain_page(l2e);
-}
-#endif
 
 static mfn_t hap_make_monitor_table(struct vcpu *v)
 {
     struct domain *d = v->domain;
     struct page_info *pg;
+    mfn_t m4mfn;
 
     ASSERT(pagetable_get_pfn(v->arch.monitor_table) == 0);
 
-#if CONFIG_PAGING_LEVELS == 4
-    {
-        mfn_t m4mfn;
-        if ( (pg = hap_alloc(d)) == NULL )
-            goto oom;
-        m4mfn = page_to_mfn(pg);
-        hap_install_xen_entries_in_l4(v, m4mfn);
-        return m4mfn;
-    }
-#elif CONFIG_PAGING_LEVELS == 3
-    {
-        mfn_t m3mfn, m2mfn;
-        l3_pgentry_t *l3e;
-        l2_pgentry_t *l2e;
-        int i;
-
-        if ( (pg = hap_alloc(d)) == NULL )
-            goto oom;
-        m3mfn = page_to_mfn(pg);
-
-        /* Install a monitor l2 table in slot 3 of the l3 table.
-         * This is used for all Xen entries, including linear maps
-         */
-        if ( (pg = hap_alloc(d)) == NULL )
-            goto oom;
-        m2mfn = page_to_mfn(pg);
-        l3e = hap_map_domain_page(m3mfn);
-        l3e[3] = l3e_from_pfn(mfn_x(m2mfn), _PAGE_PRESENT);
-        hap_install_xen_entries_in_l2h(v, m2mfn);
-        /* Install the monitor's own linear map */
-        l2e = hap_map_domain_page(m2mfn);
-        for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
-            l2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
-                (l3e_get_flags(l3e[i]) & _PAGE_PRESENT)
-                ? l2e_from_pfn(l3e_get_pfn(l3e[i]), __PAGE_HYPERVISOR)
-                : l2e_empty();
-        hap_unmap_domain_page(l2e);
-        hap_unmap_domain_page(l3e);
-
-        HAP_PRINTK("new monitor table: %#lx\n", mfn_x(m3mfn));
-        return m3mfn;
-    }
-#endif
+    if ( (pg = hap_alloc(d)) == NULL )
+        goto oom;
+    m4mfn = page_to_mfn(pg);
+    hap_install_xen_entries_in_l4(v, m4mfn);
+    return m4mfn;
 
  oom:
     HAP_ERROR("out of memory building monitor pagetable\n");
@@ -541,16 +437,6 @@ static void hap_destroy_monitor_table(st
 {
     struct domain *d = v->domain;
 
-#if CONFIG_PAGING_LEVELS == 3
-    /* Need to destroy the l2 monitor page in slot 4 too */
-    {
-        l3_pgentry_t *l3e = hap_map_domain_page(mmfn);
-        ASSERT(l3e_get_flags(l3e[3]) & _PAGE_PRESENT);
-        hap_free(d, _mfn(l3e_get_pfn(l3e[3])));
-        hap_unmap_domain_page(l3e);
-    }
-#endif
-
     /* Put the memory back in the pool */
     hap_free(d, mmfn);
 }
@@ -814,47 +700,6 @@ static void hap_update_paging_modes(stru
     put_gfn(d, cr3_gfn);
 }
 
-#if CONFIG_PAGING_LEVELS == 3
-static void p2m_install_entry_in_monitors(struct domain *d, l3_pgentry_t *l3e)
-/* Special case, only used for PAE hosts: update the mapping of the p2m
- * table.  This is trivial in other paging modes (one top-level entry
- * points to the top-level p2m, no maintenance needed), but PAE makes
- * life difficult by needing a copy of the p2m table in eight l2h slots
- * in the monitor table.  This function makes fresh copies when a p2m
- * l3e changes. */
-{
-    l2_pgentry_t *ml2e;
-    struct vcpu *v;
-    unsigned int index;
-
-    index = ((unsigned long)l3e & ~PAGE_MASK) / sizeof(l3_pgentry_t);
-    ASSERT(index < MACHPHYS_MBYTES>>1);
-
-    for_each_vcpu ( d, v )
-    {
-        if ( pagetable_get_pfn(v->arch.monitor_table) == 0 )
-            continue;
-
-        ASSERT(paging_mode_external(v->domain));
-
-        if ( v == current ) /* OK to use linear map of monitor_table */
-            ml2e = __linear_l2_table + l2_linear_offset(RO_MPT_VIRT_START);
-        else {
-            l3_pgentry_t *ml3e;
-            ml3e = hap_map_domain_page(
-                pagetable_get_mfn(v->arch.monitor_table));
-            ASSERT(l3e_get_flags(ml3e[3]) & _PAGE_PRESENT);
-            ml2e = hap_map_domain_page(_mfn(l3e_get_pfn(ml3e[3])));
-            ml2e += l2_table_offset(RO_MPT_VIRT_START);
-            hap_unmap_domain_page(ml3e);
-        }
-        ml2e[index] = l2e_from_pfn(l3e_get_pfn(*l3e), __PAGE_HYPERVISOR);
-        if ( v != current )
-            hap_unmap_domain_page(ml2e);
-    }
-}
-#endif
-
 static void
 hap_write_p2m_entry(struct vcpu *v, unsigned long gfn, l1_pgentry_t *p,
                     mfn_t table_mfn, l1_pgentry_t new, unsigned int level)
@@ -886,14 +731,6 @@ hap_write_p2m_entry(struct vcpu *v, unsi
          && (level == 1 || (level == 2 && (old_flags & _PAGE_PSE))) )
              flush_tlb_mask(d->domain_dirty_cpumask);
 
-#if CONFIG_PAGING_LEVELS == 3
-    /* install P2M in monitor table for PAE Xen */
-    if ( level == 3 )
-        /* We have written to the p2m l3: need to sync the per-vcpu
-         * copies of it in the monitor tables */
-        p2m_install_entry_in_monitors(d, (l3_pgentry_t *)p);
-#endif
-
     paging_unlock(d);
 
     if ( flush_nestedp2m )
diff -r bc8cb4778702 -r 7b658d31b5e1 xen/arch/x86/mm/p2m-pod.c
--- a/xen/arch/x86/mm/p2m-pod.c Wed Sep 12 13:29:30 2012 +0100
+++ b/xen/arch/x86/mm/p2m-pod.c Wed Sep 12 13:59:26 2012 +0100
@@ -1120,10 +1120,6 @@ guest_physmap_mark_populate_on_demand(st
     if ( !paging_mode_translate(d) )
         return -EINVAL;
 
-    rc = p2m_gfn_check_limit(d, gfn, order);
-    if ( rc != 0 )
-        return rc;
-
     gfn_lock(p2m, gfn, order);
 
     P2M_DEBUG("mark pod gfn=%#lx\n", gfn);
diff -r bc8cb4778702 -r 7b658d31b5e1 xen/arch/x86/mm/p2m-pt.c
--- a/xen/arch/x86/mm/p2m-pt.c  Wed Sep 12 13:29:30 2012 +0100
+++ b/xen/arch/x86/mm/p2m-pt.c  Wed Sep 12 13:59:26 2012 +0100
@@ -150,10 +150,8 @@ p2m_free_entry(struct p2m_domain *p2m, l
 static void p2m_add_iommu_flags(l1_pgentry_t *p2m_entry,
                                 unsigned int nlevel, unsigned int flags)
 {
-#if CONFIG_PAGING_LEVELS == 4
     if ( iommu_hap_pt_share )
         l1e_add_flags(*p2m_entry, iommu_nlevel_to_flags(nlevel, flags));
-#endif
 }
 
 static int
@@ -189,10 +187,6 @@ p2m_next_level(struct p2m_domain *p2m, m
             p2m->write_p2m_entry(p2m, gfn, p2m_entry, *table_mfn, new_entry, 
4);
             break;
         case PGT_l2_page_table:
-#if CONFIG_PAGING_LEVELS == 3
-            /* for PAE mode, PDPE only has PCD/PWT/P bits available */
-            new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)), _PAGE_PRESENT);
-#endif
             p2m_add_iommu_flags(&new_entry, 2, 
IOMMUF_readable|IOMMUF_writable);
             p2m->write_p2m_entry(p2m, gfn, p2m_entry, *table_mfn, new_entry, 
3);
             break;
@@ -317,12 +311,11 @@ p2m_set_entry(struct p2m_domain *p2m, un
         __trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), &t);
     }
 
-#if CONFIG_PAGING_LEVELS >= 4
     if ( !p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn,
                          L4_PAGETABLE_SHIFT - PAGE_SHIFT,
                          L4_PAGETABLE_ENTRIES, PGT_l3_page_table) )
         goto out;
-#endif
+
     /*
      * Try to allocate 1GB page table if this feature is supported.
      */
@@ -361,18 +354,9 @@ p2m_set_entry(struct p2m_domain *p2m, un
         if ( l1e_get_flags(old_entry) & _PAGE_PRESENT )
             p2m_free_entry(p2m, &old_entry, page_order);
     }
-    /*
-     * When using PAE Xen, we only allow 33 bits of pseudo-physical
-     * address in translated guests (i.e. 8 GBytes).  This restriction
-     * comes from wanting to map the P2M table into the 16MB RO_MPT hole
-     * in Xen's address space for translated PV guests.
-     * When using AMD's NPT on PAE Xen, we are restricted to 4GB.
-     */
     else if ( !p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn,
                               L3_PAGETABLE_SHIFT - PAGE_SHIFT,
-                              ((CONFIG_PAGING_LEVELS == 3)
-                               ? (hap_enabled(p2m->domain) ? 4 : 8)
-                               : L3_PAGETABLE_ENTRIES),
+                              L3_PAGETABLE_ENTRIES,
                               PGT_l2_page_table) )
         goto out;
 
@@ -493,15 +477,12 @@ static mfn_t p2m_gfn_to_mfn_current(stru
 
     l1_pgentry_t l1e = l1e_empty(), *p2m_entry;
     l2_pgentry_t l2e = l2e_empty();
+    l3_pgentry_t l3e = l3e_empty();
     int ret;
-#if CONFIG_PAGING_LEVELS >= 4
-    l3_pgentry_t l3e = l3e_empty();
-#endif
 
     ASSERT(gfn < (RO_MPT_VIRT_END - RO_MPT_VIRT_START) 
            / sizeof(l1_pgentry_t));
 
-#if CONFIG_PAGING_LEVELS >= 4
     /*
      * Read & process L3
      */
@@ -549,7 +530,7 @@ pod_retry_l3:
             *page_order = PAGE_ORDER_1G;
         goto out;
     }
-#endif
+
     /*
      * Read & process L2
      */
@@ -691,7 +672,6 @@ p2m_gfn_to_mfn(struct p2m_domain *p2m, u
 
     mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
 
-#if CONFIG_PAGING_LEVELS >= 4
     {
         l4_pgentry_t *l4e = map_domain_page(mfn_x(mfn));
         l4e += l4_table_offset(addr);
@@ -703,19 +683,9 @@ p2m_gfn_to_mfn(struct p2m_domain *p2m, u
         mfn = _mfn(l4e_get_pfn(*l4e));
         unmap_domain_page(l4e);
     }
-#endif
     {
         l3_pgentry_t *l3e = map_domain_page(mfn_x(mfn));
-#if CONFIG_PAGING_LEVELS == 3
-        /* On PAE hosts the p2m has eight l3 entries, not four (see
-         * shadow_set_p2m_entry()) so we can't use l3_table_offset.
-         * Instead, just count the number of l3es from zero.  It's safe
-         * to do this because we already checked that the gfn is within
-         * the bounds of the p2m. */
-        l3e += (addr >> L3_PAGETABLE_SHIFT);
-#else
         l3e += l3_table_offset(addr);
-#endif
 pod_retry_l3:
         if ( (l3e_get_flags(*l3e) & _PAGE_PRESENT) == 0 )
         {
@@ -828,10 +798,8 @@ static void p2m_change_type_global(struc
     mfn_t l1mfn, l2mfn, l3mfn;
     unsigned long i1, i2, i3;
     l3_pgentry_t *l3e;
-#if CONFIG_PAGING_LEVELS == 4
     l4_pgentry_t *l4e;
     unsigned long i4;
-#endif /* CONFIG_PAGING_LEVELS == 4 */
 
     BUG_ON(p2m_is_grant(ot) || p2m_is_grant(nt));
     BUG_ON(ot != nt && (ot == p2m_mmio_direct || nt == p2m_mmio_direct));
@@ -844,14 +812,8 @@ static void p2m_change_type_global(struc
 
     ASSERT(p2m_locked_by_me(p2m));
 
-#if CONFIG_PAGING_LEVELS == 4
     l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
-#else /* CONFIG_PAGING_LEVELS == 3 */
-    l3mfn = _mfn(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
-    l3e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
-#endif
 
-#if CONFIG_PAGING_LEVELS >= 4
     for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ )
     {
         if ( !(l4e_get_flags(l4e[i4]) & _PAGE_PRESENT) )
@@ -860,9 +822,8 @@ static void p2m_change_type_global(struc
         }
         l3mfn = _mfn(l4e_get_pfn(l4e[i4]));
         l3e = map_domain_page(l4e_get_pfn(l4e[i4]));
-#endif
         for ( i3 = 0;
-              i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8);
+              i3 < L3_PAGETABLE_ENTRIES;
               i3++ )
         {
             if ( !(l3e_get_flags(l3e[i3]) & _PAGE_PRESENT) )
@@ -901,11 +862,7 @@ static void p2m_change_type_global(struc
                     mfn = l2e_get_pfn(l2e[i2]);
                     /* Do not use get_gpfn_from_mfn because it may return 
                        SHARED_M2P_ENTRY */
-                    gfn = (i2 + (i3
-#if CONFIG_PAGING_LEVELS >= 4
-                                  + (i4 * L3_PAGETABLE_ENTRIES)
-#endif
-                               )
+                    gfn = (i2 + (i3 + (i4 * L3_PAGETABLE_ENTRIES))
                            * L2_PAGETABLE_ENTRIES) * L1_PAGETABLE_ENTRIES; 
                     flags = p2m_type_to_flags(nt, _mfn(mfn));
                     l1e_content = l1e_from_pfn(mfn, flags | _PAGE_PSE);
@@ -924,12 +881,8 @@ static void p2m_change_type_global(struc
                     if ( p2m_flags_to_type(flags) != ot )
                         continue;
                     mfn = l1e_get_pfn(l1e[i1]);
-                    gfn = i1 + (i2 + (i3
-#if CONFIG_PAGING_LEVELS >= 4
-                                       + (i4 * L3_PAGETABLE_ENTRIES)
-#endif
-                                    )
-                           * L2_PAGETABLE_ENTRIES) * L1_PAGETABLE_ENTRIES; 
+                    gfn = i1 + (i2 + (i3 + (i4 * L3_PAGETABLE_ENTRIES))
+                                * L2_PAGETABLE_ENTRIES) * 
L1_PAGETABLE_ENTRIES; 
                     /* create a new 1le entry with the new type */
                     flags = p2m_type_to_flags(nt, _mfn(mfn));
                     l1e_content = p2m_l1e_from_pfn(mfn, flags);
@@ -940,17 +893,10 @@ static void p2m_change_type_global(struc
             }
             unmap_domain_page(l2e);
         }
-#if CONFIG_PAGING_LEVELS >= 4
         unmap_domain_page(l3e);
     }
-#endif
 
-#if CONFIG_PAGING_LEVELS == 4
     unmap_domain_page(l4e);
-#else /* CONFIG_PAGING_LEVELS == 3 */
-    unmap_domain_page(l3e);
-#endif
-
 }
 
 #if P2M_AUDIT
@@ -976,19 +922,12 @@ long p2m_pt_audit_p2m(struct p2m_domain 
         l1_pgentry_t *l1e;
         int i1, i2;
 
-#if CONFIG_PAGING_LEVELS == 4
         l4_pgentry_t *l4e;
         l3_pgentry_t *l3e;
         int i4, i3;
         l4e = 
map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
-#else /* CONFIG_PAGING_LEVELS == 3 */
-        l3_pgentry_t *l3e;
-        int i3;
-        l3e = 
map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
-#endif
 
         gfn = 0;
-#if CONFIG_PAGING_LEVELS >= 4
         for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ )
         {
             if ( !(l4e_get_flags(l4e[i4]) & _PAGE_PRESENT) )
@@ -997,9 +936,8 @@ long p2m_pt_audit_p2m(struct p2m_domain 
                 continue;
             }
             l3e = map_domain_page(mfn_x(_mfn(l4e_get_pfn(l4e[i4]))));
-#endif
             for ( i3 = 0;
-                  i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8);
+                  i3 < L3_PAGETABLE_ENTRIES;
                   i3++ )
             {
                 if ( !(l3e_get_flags(l3e[i3]) & _PAGE_PRESENT) )
@@ -1101,17 +1039,10 @@ long p2m_pt_audit_p2m(struct p2m_domain 
                 }
                 unmap_domain_page(l2e);
             }
-#if CONFIG_PAGING_LEVELS >= 4
             unmap_domain_page(l3e);
         }
-#endif
 
-#if CONFIG_PAGING_LEVELS == 4
         unmap_domain_page(l4e);
-#else /* CONFIG_PAGING_LEVELS == 3 */
-        unmap_domain_page(l3e);
-#endif
-
     }
 
     if ( entry_count != p2m->pod.entry_count )
diff -r bc8cb4778702 -r 7b658d31b5e1 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Wed Sep 12 13:29:30 2012 +0100
+++ b/xen/arch/x86/mm/p2m.c     Wed Sep 12 13:59:26 2012 +0100
@@ -343,14 +343,7 @@ int p2m_alloc_table(struct p2m_domain *p
 
     P2M_PRINTK("allocating p2m table\n");
 
-    p2m_top = p2m_alloc_ptp(p2m,
-#if CONFIG_PAGING_LEVELS == 4
-        PGT_l4_page_table
-#else
-        PGT_l3_page_table
-#endif
-        );
-
+    p2m_top = p2m_alloc_ptp(p2m, PGT_l4_page_table);
     if ( p2m_top == NULL )
     {
         p2m_unlock(p2m);
@@ -545,10 +538,6 @@ guest_physmap_add_entry(struct domain *d
         return 0;
     }
 
-    rc = p2m_gfn_check_limit(d, gfn, page_order);
-    if ( rc != 0 )
-        return rc;
-
     p2m_lock(p2m);
 
     P2M_DEBUG("adding gfn=%#lx mfn=%#lx\n", gfn, mfn);
diff -r bc8cb4778702 -r 7b658d31b5e1 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Wed Sep 12 13:29:30 2012 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Wed Sep 12 13:59:26 2012 +0100
@@ -566,10 +566,8 @@ static inline void _sh_resync_l1(struct 
         SHADOW_INTERNAL_NAME(sh_resync_l1, 2)(v, gmfn, snpmfn);
     else if ( pg->shadow_flags & SHF_L1_PAE )
         SHADOW_INTERNAL_NAME(sh_resync_l1, 3)(v, gmfn, snpmfn);
-#if CONFIG_PAGING_LEVELS >= 4
     else if ( pg->shadow_flags & SHF_L1_64 )
         SHADOW_INTERNAL_NAME(sh_resync_l1, 4)(v, gmfn, snpmfn);
-#endif
 }
 
 
@@ -872,10 +870,8 @@ static int sh_skip_sync(struct vcpu *v, 
         return SHADOW_INTERNAL_NAME(sh_safe_not_to_sync, 2)(v, gl1mfn);
     else if ( pg->shadow_flags & SHF_L1_PAE )
         return SHADOW_INTERNAL_NAME(sh_safe_not_to_sync, 3)(v, gl1mfn);
-#if CONFIG_PAGING_LEVELS >= 4
     else if ( pg->shadow_flags & SHF_L1_64 )
         return SHADOW_INTERNAL_NAME(sh_safe_not_to_sync, 4)(v, gl1mfn);
-#endif
     SHADOW_ERROR("gmfn 0x%lx was OOS but not shadowed as an l1.\n", 
                  mfn_x(gl1mfn));
     BUG();
@@ -1083,7 +1079,6 @@ sh_validate_guest_entry(struct vcpu *v, 
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, 3)
             (v, gmfn, entry, size);
 
-#if CONFIG_PAGING_LEVELS >= 4 
     if ( page->shadow_flags & SHF_L1_64 ) 
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 4)
             (v, gmfn, entry, size);
@@ -1099,10 +1094,7 @@ sh_validate_guest_entry(struct vcpu *v, 
     if ( page->shadow_flags & SHF_L4_64 ) 
         result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl4e, 4)
             (v, gmfn, entry, size);
-#else /* 32-bit hypervisor does not support 64-bit guests */
-    ASSERT((page->shadow_flags 
-            & (SHF_L4_64|SHF_L3_64|SHF_L2H_64|SHF_L2_64|SHF_L1_64)) == 0);
-#endif
+
     this_cpu(trace_shadow_path_flags) |= (result<<(TRCE_SFLAG_SET_CHANGED)); 
 
     return result;
@@ -1265,11 +1257,9 @@ void shadow_unhook_mappings(struct vcpu 
     case SH_type_l2h_pae_shadow:
         SHADOW_INTERNAL_NAME(sh_unhook_pae_mappings, 3)(v, smfn, user_only);
         break;
-#if CONFIG_PAGING_LEVELS >= 4
     case SH_type_l4_64_shadow:
         SHADOW_INTERNAL_NAME(sh_unhook_64b_mappings, 4)(v, smfn, user_only);
         break;
-#endif
     default:
         SHADOW_ERROR("top-level shadow has bad type %08x\n", sp->u.sh.type);
         BUG();
@@ -1647,51 +1637,6 @@ shadow_free_p2m_page(struct domain *d, s
     paging_unlock(d);
 }
 
-#if CONFIG_PAGING_LEVELS == 3
-static void p2m_install_entry_in_monitors(struct domain *d, 
-                                          l3_pgentry_t *l3e) 
-/* Special case, only used for external-mode domains on PAE hosts:
- * update the mapping of the p2m table.  Once again, this is trivial in
- * other paging modes (one top-level entry points to the top-level p2m,
- * no maintenance needed), but PAE makes life difficult by needing a
- * copy the eight l3es of the p2m table in eight l2h slots in the
- * monitor table.  This function makes fresh copies when a p2m l3e
- * changes. */
-{
-    l2_pgentry_t *ml2e;
-    struct vcpu *v;
-    unsigned int index;
-
-    index = ((unsigned long)l3e & ~PAGE_MASK) / sizeof(l3_pgentry_t);
-    ASSERT(index < MACHPHYS_MBYTES>>1);
-
-    for_each_vcpu(d, v) 
-    {
-        if ( pagetable_get_pfn(v->arch.monitor_table) == 0 ) 
-            continue;
-        ASSERT(shadow_mode_external(v->domain));
-
-        SHADOW_DEBUG(P2M, "d=%u v=%u index=%u mfn=%#lx\n",
-                      d->domain_id, v->vcpu_id, index, l3e_get_pfn(*l3e));
-
-        if ( v == current ) /* OK to use linear map of monitor_table */
-            ml2e = __linear_l2_table + l2_linear_offset(RO_MPT_VIRT_START);
-        else 
-        {
-            l3_pgentry_t *ml3e;
-            ml3e = 
sh_map_domain_page(pagetable_get_mfn(v->arch.monitor_table));
-            ASSERT(l3e_get_flags(ml3e[3]) & _PAGE_PRESENT);
-            ml2e = sh_map_domain_page(_mfn(l3e_get_pfn(ml3e[3])));
-            ml2e += l2_table_offset(RO_MPT_VIRT_START);
-            sh_unmap_domain_page(ml3e);
-        }
-        ml2e[index] = l2e_from_pfn(l3e_get_pfn(*l3e), __PAGE_HYPERVISOR);
-        if ( v != current )
-            sh_unmap_domain_page(ml2e);
-    }
-}
-#endif
-
 /* Set the pool of shadow pages to the required number of pages.
  * Input will be rounded up to at least shadow_min_acceptable_pages(),
  * plus space for the p2m table.
@@ -2141,7 +2086,6 @@ void sh_destroy_shadow(struct vcpu *v, m
         SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 3)(v, smfn);
         break;
 
-#if CONFIG_PAGING_LEVELS >= 4
     case SH_type_l1_64_shadow:
     case SH_type_fl1_64_shadow:
         SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 4)(v, smfn);
@@ -2158,7 +2102,7 @@ void sh_destroy_shadow(struct vcpu *v, m
     case SH_type_l4_64_shadow:
         SHADOW_INTERNAL_NAME(sh_destroy_l4_shadow, 4)(v, smfn);
         break;
-#endif
+
     default:
         SHADOW_ERROR("tried to destroy shadow of bad type %08lx\n",
                      (unsigned long)t);
@@ -2197,13 +2141,8 @@ int sh_remove_write_access(struct vcpu *
         SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 3), /* fl1_pae */
         NULL, /* l2_pae  */
         NULL, /* l2h_pae */
-#if CONFIG_PAGING_LEVELS >= 4
         SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 4), /* l1_64   */
         SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 4), /* fl1_64  */
-#else
-        NULL, /* l1_64   */
-        NULL, /* fl1_64  */
-#endif
         NULL, /* l2_64   */
         NULL, /* l2h_64  */
         NULL, /* l3_64   */
@@ -2310,7 +2249,6 @@ int sh_remove_write_access(struct vcpu *
                           + ((fault_addr & VADDR_MASK) >> 18), 6); break;
             }
         }
-#if CONFIG_PAGING_LEVELS >= 4
         else if ( v->arch.paging.mode->guest_levels == 4 )
         {
             /* 64bit w2k3: linear map at 0xfffff68000000000 */
@@ -2351,7 +2289,6 @@ int sh_remove_write_access(struct vcpu *
              /* FreeBSD 64bit: direct map at 0xffffff0000000000 */
              GUESS(0xffffff0000000000 + (gfn << PAGE_SHIFT), 6);
         }
-#endif /* CONFIG_PAGING_LEVELS >= 4 */
 
 #undef GUESS
     }
@@ -2423,18 +2360,14 @@ int sh_remove_write_access_from_sl1p(str
         return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p,2)
             (v, gmfn, smfn, off);
     }
-#if CONFIG_PAGING_LEVELS >= 3
     else if ( sp->u.sh.type == SH_type_l1_pae_shadow
               || sp->u.sh.type == SH_type_fl1_pae_shadow )
         return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p,3)
             (v, gmfn, smfn, off);
-#if CONFIG_PAGING_LEVELS >= 4
     else if ( sp->u.sh.type == SH_type_l1_64_shadow
               || sp->u.sh.type == SH_type_fl1_64_shadow )
         return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p,4)
             (v, gmfn, smfn, off);
-#endif
-#endif
 
     return 0;
 }
@@ -2458,13 +2391,8 @@ int sh_remove_all_mappings(struct vcpu *
         SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 3), /* fl1_pae */
         NULL, /* l2_pae  */
         NULL, /* l2h_pae */
-#if CONFIG_PAGING_LEVELS >= 4
         SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 4), /* l1_64   */
         SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 4), /* fl1_64  */
-#else
-        NULL, /* l1_64   */
-        NULL, /* fl1_64  */
-#endif
         NULL, /* l2_64   */
         NULL, /* l2h_64  */
         NULL, /* l3_64   */
@@ -2560,7 +2488,6 @@ static int sh_remove_shadow_via_pointer(
     case SH_type_l2h_pae_shadow:
         SHADOW_INTERNAL_NAME(sh_clear_shadow_entry, 3)(v, vaddr, pmfn);
         break;
-#if CONFIG_PAGING_LEVELS >= 4
     case SH_type_l1_64_shadow:
     case SH_type_l2_64_shadow:
     case SH_type_l2h_64_shadow:
@@ -2568,7 +2495,6 @@ static int sh_remove_shadow_via_pointer(
     case SH_type_l4_64_shadow:
         SHADOW_INTERNAL_NAME(sh_clear_shadow_entry, 4)(v, vaddr, pmfn);
         break;
-#endif
     default: BUG(); /* Some wierd unknown shadow type */
     }
     
@@ -2607,17 +2533,10 @@ void sh_remove_shadows(struct vcpu *v, m
         SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 3), /* l2h_pae */
         NULL, /* l1_64   */
         NULL, /* fl1_64  */
-#if CONFIG_PAGING_LEVELS >= 4
         SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 4), /* l2_64   */
         SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 4), /* l2h_64  */
         SHADOW_INTERNAL_NAME(sh_remove_l2_shadow, 4), /* l3_64   */
         SHADOW_INTERNAL_NAME(sh_remove_l3_shadow, 4), /* l4_64   */
-#else
-        NULL, /* l2_64   */
-        NULL, /* l2h_64  */
-        NULL, /* l3_64   */
-        NULL, /* l4_64   */
-#endif
         NULL, /* p2m     */
         NULL  /* unused  */
     };
@@ -2697,13 +2616,11 @@ void sh_remove_shadows(struct vcpu *v, m
     DO_UNSHADOW(SH_type_l2h_pae_shadow);
     DO_UNSHADOW(SH_type_l2_pae_shadow);
     DO_UNSHADOW(SH_type_l1_pae_shadow);
-#if CONFIG_PAGING_LEVELS >= 4
     DO_UNSHADOW(SH_type_l4_64_shadow);
     DO_UNSHADOW(SH_type_l3_64_shadow);
     DO_UNSHADOW(SH_type_l2h_64_shadow);
     DO_UNSHADOW(SH_type_l2_64_shadow);
     DO_UNSHADOW(SH_type_l1_64_shadow);
-#endif
 
 #undef DO_UNSHADOW
 
@@ -2741,7 +2658,6 @@ sh_remove_all_shadows_and_parents(struct
 
 /**************************************************************************/
 
-#if CONFIG_PAGING_LEVELS >= 4
 /* Reset the up-pointers of every L3 shadow to 0. 
  * This is called when l3 shadows stop being pinnable, to clear out all
  * the list-head bits so the up-pointer field is properly inititalised. */
@@ -2750,7 +2666,6 @@ static int sh_clear_up_pointer(struct vc
     mfn_to_page(smfn)->up = 0;
     return 0;
 }
-#endif
 
 void sh_reset_l3_up_pointers(struct vcpu *v)
 {
@@ -2767,11 +2682,7 @@ void sh_reset_l3_up_pointers(struct vcpu
         NULL, /* fl1_64  */
         NULL, /* l2_64   */
         NULL, /* l2h_64  */
-#if CONFIG_PAGING_LEVELS >= 4
         sh_clear_up_pointer, /* l3_64   */
-#else
-        NULL, /* l3_64   */
-#endif
         NULL, /* l4_64   */
         NULL, /* p2m     */
         NULL  /* unused  */
@@ -2838,11 +2749,7 @@ static void sh_update_paging_modes(struc
         ///
         /// PV guest
         ///
-#if CONFIG_PAGING_LEVELS == 4
         v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 4);
-#else /* CONFIG_PAGING_LEVELS == 3 */
-        v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);
-#endif
     }
     else
     {
@@ -3325,11 +3232,9 @@ static int shadow_one_bit_disable(struct
         {
             if ( v->arch.paging.mode )
                 v->arch.paging.mode->shadow.detach_old_tables(v);
-#if CONFIG_PAGING_LEVELS == 4
             if ( !(v->arch.flags & TF_kernel_mode) )
                 make_cr3(v, pagetable_get_pfn(v->arch.guest_table_user));
             else
-#endif
                 make_cr3(v, pagetable_get_pfn(v->arch.guest_table));
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) 
@@ -3481,14 +3386,6 @@ shadow_write_p2m_entry(struct vcpu *v, u
     /* Update the entry with new content */
     safe_write_pte(p, new);
 
-    /* install P2M in monitors for PAE Xen */
-#if CONFIG_PAGING_LEVELS == 3
-    if ( level == 3 )
-        /* We have written to the p2m l3: need to sync the per-vcpu
-         * copies of it in the monitor tables */
-        p2m_install_entry_in_monitors(d, (l3_pgentry_t *)p);
-#endif
-
 #if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH)
     /* If we're doing FAST_FAULT_PATH, then shadow mode may have
        cached the fact that this is an mmio region in the shadow
@@ -3815,14 +3712,12 @@ void shadow_audit_tables(struct vcpu *v)
         SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 3), /* fl1_pae */
         SHADOW_INTERNAL_NAME(sh_audit_l2_table, 3),  /* l2_pae  */
         SHADOW_INTERNAL_NAME(sh_audit_l2_table, 3),  /* l2h_pae */
-#if CONFIG_PAGING_LEVELS >= 4
         SHADOW_INTERNAL_NAME(sh_audit_l1_table, 4),  /* l1_64   */
         SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 4), /* fl1_64  */
         SHADOW_INTERNAL_NAME(sh_audit_l2_table, 4),  /* l2_64   */
         SHADOW_INTERNAL_NAME(sh_audit_l2_table, 4),  /* l2h_64   */
         SHADOW_INTERNAL_NAME(sh_audit_l3_table, 4),  /* l3_64   */
         SHADOW_INTERNAL_NAME(sh_audit_l4_table, 4),  /* l4_64   */
-#endif /* CONFIG_PAGING_LEVELS >= 4 */
         NULL  /* All the rest */
     };
     unsigned int mask; 
diff -r bc8cb4778702 -r 7b658d31b5e1 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Wed Sep 12 13:29:30 2012 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c    Wed Sep 12 13:59:26 2012 +0100
@@ -788,22 +788,10 @@ static inline void safe_write_entry(void
     volatile unsigned long *d = dst;
     unsigned long *s = src;
     ASSERT(!((unsigned long) d & (sizeof (shadow_l1e_t) - 1)));
-#if CONFIG_PAGING_LEVELS == 3
-    /* In PAE mode, pagetable entries are larger
-     * than machine words, so won't get written atomically.  We need to make
-     * sure any other cpu running on these shadows doesn't see a
-     * half-written entry.  Do this by marking the entry not-present first,
-     * then writing the high word before the low word. */
-    BUILD_BUG_ON(sizeof (shadow_l1e_t) != 2 * sizeof (unsigned long));
-    d[0] = 0;
-    d[1] = s[1];
-    d[0] = s[0];
-#else
     /* In 64-bit, sizeof(pte) == sizeof(ulong) == 1 word,
      * which will be an atomic write, since the entry is aligned. */
     BUILD_BUG_ON(sizeof (shadow_l1e_t) != sizeof (unsigned long));
     *d = *s;
-#endif
 }
 
 
@@ -1444,7 +1432,7 @@ do {                                    
 //        probably wants to wait until the shadow types have been moved from
 //        shadow-types.h to shadow-private.h
 //
-#if CONFIG_PAGING_LEVELS == 4 && GUEST_PAGING_LEVELS == 4
+#if GUEST_PAGING_LEVELS == 4
 void sh_install_xen_entries_in_l4(struct vcpu *v, mfn_t gl4mfn, mfn_t sl4mfn)
 {
     struct domain *d = v->domain;
@@ -1496,7 +1484,7 @@ void sh_install_xen_entries_in_l4(struct
 }
 #endif
 
-#if CONFIG_PAGING_LEVELS >= 3 && GUEST_PAGING_LEVELS >= 3
+#if GUEST_PAGING_LEVELS >= 3
 // For 3-on-3 PV guests, we need to make sure the xen mappings are in
 // place, which means that we need to populate the l2h entry in the l3
 // table.
@@ -1505,62 +1493,13 @@ static void sh_install_xen_entries_in_l2
 {
     struct domain *d = v->domain;
     shadow_l2e_t *sl2e;
-#if CONFIG_PAGING_LEVELS == 3
-    int i;
-#else
 
     if ( !is_pv_32on64_vcpu(v) )
         return;
-#endif
 
     sl2e = sh_map_domain_page(sl2hmfn);
     ASSERT(sl2e != NULL);
     ASSERT(sizeof (l2_pgentry_t) == sizeof (shadow_l2e_t));
-    
-#if CONFIG_PAGING_LEVELS == 3
-
-    /* Copy the common Xen mappings from the idle domain */
-    memcpy(&sl2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],
-           &idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],
-           L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
-
-    /* Install the per-domain mappings for this domain */
-    for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
-        sl2e[shadow_l2_table_offset(PERDOMAIN_VIRT_START) + i] =
-            shadow_l2e_from_mfn(
-                page_to_mfn(perdomain_pt_page(d, i)),
-                __PAGE_HYPERVISOR);
-    
-    /* We don't set up a linear mapping here because we can't until this
-     * l2h is installed in an l3e.  sh_update_linear_entries() handles
-     * the linear mappings when CR3 (and so the fourth l3e) is loaded.  
-     * We zero them here, just as a safety measure.
-     */
-    for ( i = 0; i < SHADOW_L3_PAGETABLE_ENTRIES; i++ )
-        sl2e[shadow_l2_table_offset(LINEAR_PT_VIRT_START) + i] =
-            shadow_l2e_empty();
-    for ( i = 0; i < SHADOW_L3_PAGETABLE_ENTRIES; i++ )
-        sl2e[shadow_l2_table_offset(SH_LINEAR_PT_VIRT_START) + i] =
-            shadow_l2e_empty();
-
-    if ( shadow_mode_translate(d) )
-    {
-        /* Install the domain-specific p2m table */
-        l3_pgentry_t *p2m;
-        ASSERT(pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d))) != 0);
-        p2m = 
sh_map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))));
-        for ( i = 0; i < MACHPHYS_MBYTES>>1; i++ )
-        {
-            sl2e[shadow_l2_table_offset(RO_MPT_VIRT_START) + i] =
-                (l3e_get_flags(p2m[i]) & _PAGE_PRESENT)
-                ? shadow_l2e_from_mfn(_mfn(l3e_get_pfn(p2m[i])),
-                                      __PAGE_HYPERVISOR)
-                : shadow_l2e_empty();
-        }
-        sh_unmap_domain_page(p2m);
-    }
-
-#else
 
     /* Copy the common Xen mappings from the idle domain */
     memcpy(
@@ -1568,16 +1507,11 @@ static void sh_install_xen_entries_in_l2
         &compat_idle_pg_table_l2[l2_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
         COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*sl2e));
 
-#endif
-    
     sh_unmap_domain_page(sl2e);
 }
 #endif
 
 
-
-
-
 /**************************************************************************/
 /* Create a shadow of a given guest page.
  */
@@ -1633,11 +1567,11 @@ sh_make_shadow(struct vcpu *v, mfn_t gmf
     {
         switch (shadow_type) 
         {
-#if CONFIG_PAGING_LEVELS == 4 && GUEST_PAGING_LEVELS == 4
+#if GUEST_PAGING_LEVELS == 4
         case SH_type_l4_shadow:
             sh_install_xen_entries_in_l4(v, gmfn, smfn); break;
 #endif
-#if CONFIG_PAGING_LEVELS >= 3 && GUEST_PAGING_LEVELS >= 3
+#if GUEST_PAGING_LEVELS >= 3
         case SH_type_l2h_shadow:
             sh_install_xen_entries_in_l2h(v, smfn); break;
 #endif
@@ -1677,7 +1611,6 @@ sh_make_monitor_table(struct vcpu *v)
     /* Guarantee we can get the memory we need */
     shadow_prealloc(d, SH_type_monitor_table, CONFIG_PAGING_LEVELS);
 
-#if CONFIG_PAGING_LEVELS == 4    
     {
         mfn_t m4mfn;
         m4mfn = shadow_alloc(d, SH_type_monitor_table, 0);
@@ -1726,43 +1659,6 @@ sh_make_monitor_table(struct vcpu *v)
 #endif /* SHADOW_PAGING_LEVELS < 4 */
         return m4mfn;
     }
-
-#elif CONFIG_PAGING_LEVELS == 3
-
-    {
-        mfn_t m3mfn, m2mfn; 
-        l3_pgentry_t *l3e;
-        l2_pgentry_t *l2e;
-        int i;
-
-        m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
-        /* Remember the level of this table */
-        mfn_to_page(m3mfn)->shadow_flags = 3;
-
-        // Install a monitor l2 table in slot 3 of the l3 table.
-        // This is used for all Xen entries, including linear maps
-        m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
-        mfn_to_page(m2mfn)->shadow_flags = 2;
-        l3e = sh_map_domain_page(m3mfn);
-        l3e[3] = l3e_from_pfn(mfn_x(m2mfn), _PAGE_PRESENT);
-        sh_install_xen_entries_in_l2h(v, m2mfn);
-        /* Install the monitor's own linear map */
-        l2e = sh_map_domain_page(m2mfn);
-        for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
-            l2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
-                (l3e_get_flags(l3e[i]) & _PAGE_PRESENT) 
-                ? l2e_from_pfn(l3e_get_pfn(l3e[i]), __PAGE_HYPERVISOR) 
-                : l2e_empty();
-        sh_unmap_domain_page(l2e);
-        sh_unmap_domain_page(l3e);
-
-        SHADOW_PRINTK("new monitor table: %#lx\n", mfn_x(m3mfn));
-        return m3mfn;
-    }
-
-#else
-#error this should not happen
-#endif /* CONFIG_PAGING_LEVELS */
 }
 #endif /* SHADOW_PAGING_LEVELS == GUEST_PAGING_LEVELS */
 
@@ -2146,7 +2042,7 @@ void sh_destroy_monitor_table(struct vcp
     struct domain *d = v->domain;
     ASSERT(mfn_to_page(mmfn)->u.sh.type == SH_type_monitor_table);
 
-#if (CONFIG_PAGING_LEVELS == 4) && (SHADOW_PAGING_LEVELS != 4)
+#if SHADOW_PAGING_LEVELS != 4
     {
         mfn_t m3mfn;
         l4_pgentry_t *l4e = sh_map_domain_page(mmfn);
@@ -2177,14 +2073,6 @@ void sh_destroy_monitor_table(struct vcp
         }
         sh_unmap_domain_page(l4e);
     }
-#elif CONFIG_PAGING_LEVELS == 3
-    /* Need to destroy the l2 monitor page in slot 4 too */
-    {
-        l3_pgentry_t *l3e = sh_map_domain_page(mmfn);
-        ASSERT(l3e_get_flags(l3e[3]) & _PAGE_PRESENT);
-        shadow_free(d, _mfn(l3e_get_pfn(l3e[3])));
-        sh_unmap_domain_page(l3e);
-    }
 #endif
 
     /* Put the memory back in the pool */
@@ -2382,46 +2270,6 @@ static int validate_gl2e(struct vcpu *v,
     }
     l2e_propagate_from_guest(v, new_gl2e, sl1mfn, &new_sl2e, ft_prefetch);
 
-    // check for updates to xen reserved slots in PV guests...
-    // XXX -- need to revisit this for PV 3-on-4 guests.
-    //
-#if SHADOW_PAGING_LEVELS < 4
-#if CONFIG_PAGING_LEVELS == SHADOW_PAGING_LEVELS
-    if ( !shadow_mode_external(v->domain) )
-    {
-        int shadow_index = (((unsigned long)sl2p & ~PAGE_MASK) /
-                            sizeof(shadow_l2e_t));
-        int reserved_xen_slot;
-
-#if SHADOW_PAGING_LEVELS == 3
-        reserved_xen_slot = 
-            ((mfn_to_page(sl2mfn)->u.sh.type == SH_type_l2h_pae_shadow) &&
-             (shadow_index 
-              >= (L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1))));
-#else /* SHADOW_PAGING_LEVELS == 2 */
-        reserved_xen_slot = (shadow_index >= L2_PAGETABLE_FIRST_XEN_SLOT);
-#endif
-
-        if ( unlikely(reserved_xen_slot) )
-        {
-            // attempt by the guest to write to a xen reserved slot
-            //
-            SHADOW_PRINTK("%s out-of-range update "
-                           "sl2mfn=%05lx index=0x%x val=%" SH_PRI_pte "\n",
-                           __func__, mfn_x(sl2mfn), shadow_index, new_sl2e.l2);
-            if ( shadow_l2e_get_flags(new_sl2e) & _PAGE_PRESENT )
-            {
-                SHADOW_ERROR("out-of-range l2e update\n");
-                result |= SHADOW_SET_ERROR;
-            }
-
-            // do not call shadow_set_l2e...
-            return result;
-        }
-    }
-#endif /* CONFIG_PAGING_LEVELS == SHADOW_PAGING_LEVELS */
-#endif /* SHADOW_PAGING_LEVELS < 4 */
-
     result |= shadow_set_l2e(v, sl2p, new_sl2e, sl2mfn);
 
     return result;
@@ -3836,7 +3684,7 @@ sh_update_linear_entries(struct vcpu *v)
          && pagetable_get_pfn(v->arch.monitor_table) == 0 ) 
         return;
 
-#if (CONFIG_PAGING_LEVELS == 4) && (SHADOW_PAGING_LEVELS == 4)
+#if SHADOW_PAGING_LEVELS == 4
     
     /* For PV, one l4e points at the guest l4, one points at the shadow
      * l4.  No maintenance required. 
@@ -3862,7 +3710,7 @@ sh_update_linear_entries(struct vcpu *v)
         }
     }
 
-#elif (CONFIG_PAGING_LEVELS == 4) && (SHADOW_PAGING_LEVELS == 3)
+#elif SHADOW_PAGING_LEVELS == 3
 
     /* PV: XXX
      *
@@ -3923,102 +3771,6 @@ sh_update_linear_entries(struct vcpu *v)
     else
         domain_crash(d); /* XXX */
 
-#elif CONFIG_PAGING_LEVELS == 3
-
-    /* PV: need to copy the guest's l3 entries into the guest-linear-map l2
-     * entries in the shadow, and the shadow's l3 entries into the 
-     * shadow-linear-map l2 entries in the shadow.  This is safe to do 
-     * because Xen does not let guests share high-slot l2 tables between l3s,
-     * so we know we're not treading on anyone's toes. 
-     *
-     * HVM: need to copy the shadow's l3 entries into the
-     * shadow-linear-map l2 entries in the monitor table.  This is safe
-     * because we have one monitor table for each vcpu.  The monitor's
-     * own l3es don't need to be copied because they never change.  
-     * XXX That might change if we start stuffing things into the rest
-     * of the monitor's virtual address space. 
-     */ 
-    {
-        l2_pgentry_t *l2e, new_l2e;
-        shadow_l3e_t *guest_l3e = NULL, *shadow_l3e;
-        int i;
-        int unmap_l2e = 0;
-
-#if GUEST_PAGING_LEVELS == 2
-
-        /* Shadow l3 tables were built by sh_update_cr3 */
-        BUG_ON(!shadow_mode_external(d)); /* PV 2-on-3 is unsupported */
-        shadow_l3e = (shadow_l3e_t *)&v->arch.paging.shadow.l3table;
-        
-#else /* GUEST_PAGING_LEVELS == 3 */
-        
-        shadow_l3e = (shadow_l3e_t *)&v->arch.paging.shadow.l3table;
-        guest_l3e = (guest_l3e_t *)&v->arch.paging.shadow.gl3e;
-
-#endif /* GUEST_PAGING_LEVELS */
-        
-        /* Choose where to write the entries, using linear maps if possible */
-        if ( shadow_mode_external(d) )
-        {
-            if ( v == current )
-            {
-                /* From the monitor tables, it's safe to use linear maps
-                 * to update monitor l2s */
-                l2e = __linear_l2_table + (3 * L2_PAGETABLE_ENTRIES);
-            }
-            else
-            {
-                /* Map the monitor table's high l2 */
-                l3_pgentry_t *l3e;
-                l3e = sh_map_domain_page(
-                    pagetable_get_mfn(v->arch.monitor_table));
-                ASSERT(l3e_get_flags(l3e[3]) & _PAGE_PRESENT);
-                l2e = sh_map_domain_page(_mfn(l3e_get_pfn(l3e[3])));
-                unmap_l2e = 1;
-                sh_unmap_domain_page(l3e);
-            }
-        }
-        else 
-        {
-            /* Map the shadow table's high l2 */
-            ASSERT(shadow_l3e_get_flags(shadow_l3e[3]) & _PAGE_PRESENT);
-            l2e = sh_map_domain_page(shadow_l3e_get_mfn(shadow_l3e[3]));
-            unmap_l2e = 1;
-        }
-        
-        /* Write linear mapping of guest (only in PV, and only when 
-         * not translated). */
-        if ( !shadow_mode_translate(d) )
-        {
-            for ( i = 0; i < SHADOW_L3_PAGETABLE_ENTRIES; i++ )
-            {
-                new_l2e = 
-                    ((shadow_l3e_get_flags(guest_l3e[i]) & _PAGE_PRESENT)
-                     ? l2e_from_pfn(mfn_x(shadow_l3e_get_mfn(guest_l3e[i])),
-                                    __PAGE_HYPERVISOR) 
-                     : l2e_empty());
-                safe_write_entry(
-                    &l2e[l2_table_offset(LINEAR_PT_VIRT_START) + i],
-                    &new_l2e);
-            }
-        }
-        
-        /* Write linear mapping of shadow. */
-        for ( i = 0; i < SHADOW_L3_PAGETABLE_ENTRIES; i++ )
-        {
-            new_l2e = (shadow_l3e_get_flags(shadow_l3e[i]) & _PAGE_PRESENT) 
-                ? l2e_from_pfn(mfn_x(shadow_l3e_get_mfn(shadow_l3e[i])),
-                               __PAGE_HYPERVISOR) 
-                : l2e_empty();
-            safe_write_entry(
-                &l2e[l2_table_offset(SH_LINEAR_PT_VIRT_START) + i],
-                &new_l2e);
-        }
-        
-        if ( unmap_l2e )
-            sh_unmap_domain_page(l2e);
-    }
-
 #else
 #error this should not happen
 #endif
diff -r bc8cb4778702 -r 7b658d31b5e1 xen/arch/x86/mm/shadow/private.h
--- a/xen/arch/x86/mm/shadow/private.h  Wed Sep 12 13:29:30 2012 +0100
+++ b/xen/arch/x86/mm/shadow/private.h  Wed Sep 12 13:59:26 2012 +0100
@@ -165,11 +165,9 @@ extern void shadow_audit_tables(struct v
 #include "multi.h"
 #undef GUEST_LEVELS
 
-#if CONFIG_PAGING_LEVELS == 4
 #define GUEST_LEVELS  4
 #include "multi.h"
 #undef GUEST_LEVELS
-#endif /* CONFIG_PAGING_LEVELS == 4 */
 
 /* Shadow type codes */
 #define SH_type_none           (0U) /* on the shadow free list */
@@ -214,7 +212,6 @@ static inline int sh_type_is_pinnable(st
      * shadows so they don't just evaporate on every context switch.
      * For all other guests, we'd rather use the up-pointer field in l3s. */ 
     if ( unlikely((v->domain->arch.paging.shadow.opt_flags & 
SHOPT_LINUX_L3_TOPLEVEL) 
-                  && CONFIG_PAGING_LEVELS >= 4
                   && t == SH_type_l3_64_shadow) )
         return 1;
 #endif
diff -r bc8cb4778702 -r 7b658d31b5e1 xen/arch/x86/trace.c
--- a/xen/arch/x86/trace.c      Wed Sep 12 13:29:30 2012 +0100
+++ b/xen/arch/x86/trace.c      Wed Sep 12 13:59:26 2012 +0100
@@ -185,8 +185,7 @@ void __trace_ptwr_emulation(unsigned lon
         d.eip = eip;
         d.pte = npte;
 
-        event = ((CONFIG_PAGING_LEVELS == 3) ?
-                 TRC_PV_PTWR_EMULATION_PAE : TRC_PV_PTWR_EMULATION);
+        event = TRC_PV_PTWR_EMULATION;
         event |= TRC_64_FLAG;
         __trace_var(event, 1/*tsc*/, sizeof(d), &d);
     }
diff -r bc8cb4778702 -r 7b658d31b5e1 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c      Wed Sep 12 13:29:30 2012 +0100
+++ b/xen/arch/x86/traps.c      Wed Sep 12 13:59:26 2012 +0100
@@ -1181,12 +1181,8 @@ static enum pf_type __page_fault_type(
     unsigned long addr, unsigned int error_code)
 {
     unsigned long mfn, cr3 = read_cr3();
-#if CONFIG_PAGING_LEVELS >= 4
     l4_pgentry_t l4e, *l4t;
-#endif
-#if CONFIG_PAGING_LEVELS >= 3
     l3_pgentry_t l3e, *l3t;
-#endif
     l2_pgentry_t l2e, *l2t;
     l1_pgentry_t l1e, *l1t;
     unsigned int required_flags, disallowed_flags, page_user;
@@ -1217,7 +1213,6 @@ static enum pf_type __page_fault_type(
 
     mfn = cr3 >> PAGE_SHIFT;
 
-#if CONFIG_PAGING_LEVELS >= 4
     l4t = map_domain_page(mfn);
     l4e = l4e_read_atomic(&l4t[l4_table_offset(addr)]);
     mfn = l4e_get_pfn(l4e);
@@ -1226,28 +1221,17 @@ static enum pf_type __page_fault_type(
          (l4e_get_flags(l4e) & disallowed_flags) )
         return real_fault;
     page_user &= l4e_get_flags(l4e);
-#endif
-
-#if CONFIG_PAGING_LEVELS >= 3
+
     l3t  = map_domain_page(mfn);
-#if CONFIG_PAGING_LEVELS == 3
-    l3t += (cr3 & 0xFE0UL) >> 3;
-#endif
     l3e = l3e_read_atomic(&l3t[l3_table_offset(addr)]);
     mfn = l3e_get_pfn(l3e);
     unmap_domain_page(l3t);
-#if CONFIG_PAGING_LEVELS == 3
-    if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
-        return real_fault;
-#else
     if ( ((l3e_get_flags(l3e) & required_flags) != required_flags) ||
          (l3e_get_flags(l3e) & disallowed_flags) )
         return real_fault;
     page_user &= l3e_get_flags(l3e);
     if ( l3e_get_flags(l3e) & _PAGE_PSE )
         goto leaf;
-#endif
-#endif
 
     l2t = map_domain_page(mfn);
     l2e = l2e_read_atomic(&l2t[l2_table_offset(addr)]);
diff -r bc8cb4778702 -r 7b658d31b5e1 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Wed Sep 12 13:29:30 2012 +0100
+++ b/xen/include/asm-x86/domain.h      Wed Sep 12 13:59:26 2012 +0100
@@ -119,12 +119,10 @@ struct shadow_domain {
 };
 
 struct shadow_vcpu {
-#if CONFIG_PAGING_LEVELS >= 3
     /* PAE guests: per-vcpu shadow top-level table */
     l3_pgentry_t l3table[4] __attribute__((__aligned__(32)));
     /* PAE guests: per-vcpu cache of the top-level *guest* entries */
     l3_pgentry_t gl3e[4] __attribute__((__aligned__(32)));
-#endif
     /* Non-PAE guests: pointer to guest top-level pagetable */
     void *guest_vtable;
     /* Last MFN that we emulated a write to as unshadow heuristics. */
diff -r bc8cb4778702 -r 7b658d31b5e1 xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h        Wed Sep 12 13:29:30 2012 +0100
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h        Wed Sep 12 13:59:26 2012 +0100
@@ -490,9 +490,6 @@ struct vmcb_struct {
 } __attribute__ ((packed));
 
 struct svm_domain {
-#if CONFIG_PAGING_LEVELS == 3
-    bool_t npt_4gb_warning;
-#endif
 };
 
 struct arch_svm_struct {
diff -r bc8cb4778702 -r 7b658d31b5e1 xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  Wed Sep 12 13:29:30 2012 +0100
+++ b/xen/include/asm-x86/mm.h  Wed Sep 12 13:59:26 2012 +0100
@@ -398,16 +398,6 @@ static inline int get_page_and_type(stru
     ASSERT(((_p)->count_info & PGC_count_mask) != 0);          \
     ASSERT(page_get_owner(_p) == (_d))
 
-// Quick test for whether a given page can be represented directly in CR3.
-//
-#if CONFIG_PAGING_LEVELS == 3
-#define MFN_FITS_IN_CR3(_MFN) !(mfn_x(_MFN) >> 20)
-
-/* returns a lowmem machine address of the copied L3 root table */
-unsigned long
-pae_copy_root(struct vcpu *v, l3_pgentry_t *l3tab);
-#endif /* CONFIG_PAGING_LEVELS == 3 */
-
 int check_descriptor(const struct domain *, struct desc_struct *d);
 
 extern bool_t opt_allow_superpage;
diff -r bc8cb4778702 -r 7b658d31b5e1 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Wed Sep 12 13:29:30 2012 +0100
+++ b/xen/include/asm-x86/p2m.h Wed Sep 12 13:59:26 2012 +0100
@@ -603,30 +603,6 @@ int p2m_get_mem_access(struct domain *d,
 struct page_info *p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type);
 void p2m_free_ptp(struct p2m_domain *p2m, struct page_info *pg);
 
-#if CONFIG_PAGING_LEVELS == 3
-static inline int p2m_gfn_check_limit(
-    struct domain *d, unsigned long gfn, unsigned int order)
-{
-    /*
-     * 32bit AMD nested paging does not support over 4GB guest due to 
-     * hardware translation limit. This limitation is checked by comparing
-     * gfn with 0xfffffUL.
-     */
-    if ( !hap_enabled(d) || ((gfn + (1ul << order)) <= 0x100000UL) ||
-         (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) )
-        return 0;
-
-    if ( !test_and_set_bool(d->arch.hvm_domain.svm.npt_4gb_warning) )
-        dprintk(XENLOG_WARNING, "Dom%d failed to populate memory beyond"
-                " 4GB: specify 'hap=0' domain config option.\n",
-                d->domain_id);
-
-    return -EINVAL;
-}
-#else
-#define p2m_gfn_check_limit(d, g, o) 0
-#endif
-
 /* Directly set a p2m entry: only for use by p2m code. Does not need
  * a call to put_gfn afterwards/ */
 int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, 
diff -r bc8cb4778702 -r 7b658d31b5e1 xen/include/asm-x86/page.h
--- a/xen/include/asm-x86/page.h        Wed Sep 12 13:29:30 2012 +0100
+++ b/xen/include/asm-x86/page.h        Wed Sep 12 13:59:26 2012 +0100
@@ -126,13 +126,11 @@ static inline l3_pgentry_t l3e_from_padd
     ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
     return (l3_pgentry_t) { pa | put_pte_flags(flags) };
 }
-#if CONFIG_PAGING_LEVELS >= 4
 static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
 {
     ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
     return (l4_pgentry_t) { pa | put_pte_flags(flags) };
 }
-#endif
 #endif /* !__ASSEMBLY__ */
 
 /* Construct a pte from its direct integer representation. */
@@ -191,13 +189,7 @@ static inline l4_pgentry_t l4e_from_padd
 #ifndef __ASSEMBLY__
 
 /* Page-table type. */
-#if CONFIG_PAGING_LEVELS == 3
-/* x86_32 PAE */
-typedef struct { u32 pfn; } pagetable_t;
-#elif CONFIG_PAGING_LEVELS == 4
-/* x86_64 */
 typedef struct { u64 pfn; } pagetable_t;
-#endif
 #define pagetable_get_paddr(x)  ((paddr_t)(x).pfn << PAGE_SHIFT)
 #define pagetable_get_page(x)   mfn_to_page((x).pfn)
 #define pagetable_get_pfn(x)    ((x).pfn)
@@ -293,16 +285,11 @@ void copy_page_sse2(void *, const void *
 
 #ifndef __ASSEMBLY__
 extern root_pgentry_t idle_pg_table[ROOT_PAGETABLE_ENTRIES];
-#if CONFIG_PAGING_LEVELS == 3
-extern l2_pgentry_t   idle_pg_table_l2[
-    ROOT_PAGETABLE_ENTRIES * L2_PAGETABLE_ENTRIES];
-#elif CONFIG_PAGING_LEVELS == 4
 extern l2_pgentry_t  *compat_idle_pg_table_l2;
 extern unsigned int   m2p_compat_vstart;
 extern l2_pgentry_t l2_xenmap[L2_PAGETABLE_ENTRIES],
     l2_bootmap[L2_PAGETABLE_ENTRIES];
 extern l3_pgentry_t l3_bootmap[L3_PAGETABLE_ENTRIES];
-#endif
 extern l2_pgentry_t l2_identmap[4*L2_PAGETABLE_ENTRIES];
 extern l1_pgentry_t l1_identmap[L1_PAGETABLE_ENTRIES],
     l1_fixmap[L1_PAGETABLE_ENTRIES];
diff -r bc8cb4778702 -r 7b658d31b5e1 xen/include/asm-x86/paging.h
--- a/xen/include/asm-x86/paging.h      Wed Sep 12 13:29:30 2012 +0100
+++ b/xen/include/asm-x86/paging.h      Wed Sep 12 13:59:26 2012 +0100
@@ -327,17 +327,7 @@ static inline int paging_cmpxchg_guest_e
  * never sees a half-written entry that has _PAGE_PRESENT set */
 static inline void safe_write_pte(l1_pgentry_t *p, l1_pgentry_t new)
 {
-#if CONFIG_PAGING_LEVELS == 3
-    /* PAE machines write 64bit PTEs as two 32bit writes. */
-    volatile unsigned long *d = (unsigned long *) p;
-    unsigned long *s = (unsigned long *) &new;
-    BUILD_BUG_ON(sizeof (l1_pgentry_t) != 2 * sizeof (unsigned long));
-    d[0] = 0;
-    d[1] = s[1];
-    d[0] = s[0]; 
-#else
     *p = new;
-#endif
 }
 
 /* Atomically write a P2M entry and update the paging-assistance state 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.