[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86/mm: remove the linear mapping of the p2m tables.


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Fri, 14 Sep 2012 10:55:39 +0000
  • Delivery-date: Fri, 14 Sep 2012 10:55:45 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Tim Deegan <tim@xxxxxxx>
# Date 1347550893 -3600
# Node ID 8bb4bc335ebbbeaa31960cc7d9bd3e8c49660f86
# Parent  5691e4cc17da7fe8664a67f1d07c3755c0ca34ed
x86/mm: remove the linear mapping of the p2m tables.

Mapping the p2m into the monitor tables was an important optimization
on 32-bit builds, where it avoided mapping and unmapping p2m pages
during a walk.  On 64-bit it makes no difference -- see
http://old-list-archives.xen.org/archives/html/xen-devel/2010-04/msg00981.html
Get rid of it, and use the explicit walk for all lookups.

Signed-off-by: Tim Deegan <tim@xxxxxxx>
Committed-by: Tim Deegan <tim@xxxxxxx>
---


diff -r 5691e4cc17da -r 8bb4bc335ebb xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Thu Sep 13 10:23:17 2012 +0200
+++ b/xen/arch/x86/mm/hap/hap.c Thu Sep 13 16:41:33 2012 +0100
@@ -405,11 +405,6 @@ static void hap_install_xen_entries_in_l
     l4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
         l4e_from_pfn(mfn_x(l4mfn), __PAGE_HYPERVISOR);
 
-    /* Install the domain-specific P2M table */
-    l4e[l4_table_offset(RO_MPT_VIRT_START)] =
-        
l4e_from_pfn(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))),
-                     __PAGE_HYPERVISOR);
-
     hap_unmap_domain_page(l4e);
 }
 
diff -r 5691e4cc17da -r 8bb4bc335ebb xen/arch/x86/mm/p2m-pt.c
--- a/xen/arch/x86/mm/p2m-pt.c  Thu Sep 13 10:23:17 2012 +0200
+++ b/xen/arch/x86/mm/p2m-pt.c  Thu Sep 13 16:41:33 2012 +0100
@@ -460,186 +460,6 @@ out:
     return rv;
 }
 
-
-/* Read the current domain's p2m table (through the linear mapping). */
-static mfn_t p2m_gfn_to_mfn_current(struct p2m_domain *p2m, 
-                                    unsigned long gfn, p2m_type_t *t, 
-                                    p2m_access_t *a, p2m_query_t q,
-                                    unsigned int *page_order)
-{
-    mfn_t mfn = _mfn(INVALID_MFN);
-    p2m_type_t p2mt = p2m_mmio_dm;
-    paddr_t addr = ((paddr_t)gfn) << PAGE_SHIFT;
-    /* XXX This is for compatibility with the old model, where anything not 
-     * XXX marked as RAM was considered to be emulated MMIO space.
-     * XXX Once we start explicitly registering MMIO regions in the p2m 
-     * XXX we will return p2m_invalid for unmapped gfns */
-
-    l1_pgentry_t l1e = l1e_empty(), *p2m_entry;
-    l2_pgentry_t l2e = l2e_empty();
-    l3_pgentry_t l3e = l3e_empty();
-    int ret;
-
-    ASSERT(gfn < (RO_MPT_VIRT_END - RO_MPT_VIRT_START) 
-           / sizeof(l1_pgentry_t));
-
-    /*
-     * Read & process L3
-     */
-    p2m_entry = (l1_pgentry_t *)
-        &__linear_l2_table[l2_linear_offset(RO_MPT_VIRT_START)
-                           + l3_linear_offset(addr)];
-pod_retry_l3:
-    ret = __copy_from_user(&l3e, p2m_entry, sizeof(l3e));
-
-    if ( ret != 0 || !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
-    {
-        if ( (l3e_get_flags(l3e) & _PAGE_PSE) &&
-             (p2m_flags_to_type(l3e_get_flags(l3e)) == p2m_populate_on_demand) 
)
-        {
-            /* The read has succeeded, so we know that mapping exists */
-            if ( q & P2M_ALLOC )
-            {
-                if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_1G, q) )
-                    goto pod_retry_l3;
-                p2mt = p2m_invalid;
-                gdprintk(XENLOG_ERR, "%s: Allocate 1GB failed!\n", __func__);
-                goto out;
-            }
-            else
-            {
-                p2mt = p2m_populate_on_demand;
-                goto out;
-            }
-        }
-        goto pod_retry_l2;
-    }
-
-    if ( l3e_get_flags(l3e) & _PAGE_PSE )
-    {
-        p2mt = p2m_flags_to_type(l3e_get_flags(l3e));
-        ASSERT(l3e_get_pfn(l3e) != INVALID_MFN || !p2m_is_ram(p2mt));
-        if (p2m_is_valid(p2mt) )
-            mfn = _mfn(l3e_get_pfn(l3e) + 
-                       l2_table_offset(addr) * L1_PAGETABLE_ENTRIES + 
-                       l1_table_offset(addr));
-        else
-            p2mt = p2m_mmio_dm;
-            
-        if ( page_order )
-            *page_order = PAGE_ORDER_1G;
-        goto out;
-    }
-
-    /*
-     * Read & process L2
-     */
-    p2m_entry = &__linear_l1_table[l1_linear_offset(RO_MPT_VIRT_START)
-                                   + l2_linear_offset(addr)];
-
-pod_retry_l2:
-    ret = __copy_from_user(&l2e,
-                           p2m_entry,
-                           sizeof(l2e));
-    if ( ret != 0
-         || !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
-    {
-        if( (l2e_get_flags(l2e) & _PAGE_PSE)
-            && ( p2m_flags_to_type(l2e_get_flags(l2e))
-                 == p2m_populate_on_demand ) )
-        {
-            /* The read has succeeded, so we know that the mapping
-             * exits at this point.  */
-            if ( q & P2M_ALLOC )
-            {
-                if ( !p2m_pod_demand_populate(p2m, gfn, 
-                                                PAGE_ORDER_2M, q) )
-                    goto pod_retry_l2;
-
-                /* Allocate failed. */
-                p2mt = p2m_invalid;
-                printk("%s: Allocate failed!\n", __func__);
-                goto out;
-            }
-            else
-            {
-                p2mt = p2m_populate_on_demand;
-                goto out;
-            }
-        }
-
-        goto pod_retry_l1;
-    }
-        
-    if (l2e_get_flags(l2e) & _PAGE_PSE)
-    {
-        p2mt = p2m_flags_to_type(l2e_get_flags(l2e));
-        ASSERT(l2e_get_pfn(l2e) != INVALID_MFN || !p2m_is_ram(p2mt));
-
-        if ( p2m_is_valid(p2mt) )
-            mfn = _mfn(l2e_get_pfn(l2e) + l1_table_offset(addr));
-        else
-            p2mt = p2m_mmio_dm;
-
-        if ( page_order )
-            *page_order = PAGE_ORDER_2M;
-        goto out;
-    }
-
-    /*
-     * Read and process L1
-     */
-
-    /* Need to __copy_from_user because the p2m is sparse and this
-     * part might not exist */
-pod_retry_l1:
-    p2m_entry = &phys_to_machine_mapping[gfn];
-
-    ret = __copy_from_user(&l1e,
-                           p2m_entry,
-                           sizeof(l1e));
-            
-    if ( ret == 0 ) {
-        unsigned long l1e_mfn = l1e_get_pfn(l1e);
-        p2mt = p2m_flags_to_type(l1e_get_flags(l1e));
-        ASSERT( mfn_valid(_mfn(l1e_mfn)) || !p2m_is_ram(p2mt) ||
-                p2m_is_paging(p2mt) );
-
-        if ( p2mt == p2m_populate_on_demand )
-        {
-            /* The read has succeeded, so we know that the mapping
-             * exits at this point.  */
-            if ( q & P2M_ALLOC )
-            {
-                if ( !p2m_pod_demand_populate(p2m, gfn, 
-                                                PAGE_ORDER_4K, q) )
-                    goto pod_retry_l1;
-
-                /* Allocate failed. */
-                p2mt = p2m_invalid;
-                goto out;
-            }
-            else
-            {
-                p2mt = p2m_populate_on_demand;
-                goto out;
-            }
-        }
-
-        if ( p2m_is_valid(p2mt) || p2m_is_grant(p2mt) )
-            mfn = _mfn(l1e_mfn);
-        else 
-            /* XXX see above */
-            p2mt = p2m_mmio_dm;
-    }
-    
-    if ( page_order )
-        *page_order = PAGE_ORDER_4K;
-out:
-    *t = p2mt;
-    return mfn;
-}
-
 static mfn_t
 p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn, 
                p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
@@ -666,10 +486,6 @@ p2m_gfn_to_mfn(struct p2m_domain *p2m, u
         /* This pfn is higher than the highest the p2m map currently holds */
         return _mfn(INVALID_MFN);
 
-    /* Use the fast path with the linear mapping if we can */
-    if ( p2m == p2m_get_hostp2m(current->domain) )
-        return p2m_gfn_to_mfn_current(p2m, gfn, t, a, q, page_order);
-
     mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
 
     {
@@ -904,17 +720,10 @@ long p2m_pt_audit_p2m(struct p2m_domain 
 {
     unsigned long entry_count = 0, pmbad = 0;
     unsigned long mfn, gfn, m2pfn;
-    int test_linear;
-    struct domain *d = p2m->domain;
 
     ASSERT(p2m_locked_by_me(p2m));
     ASSERT(pod_locked_by_me(p2m));
 
-    test_linear = ( (d == current->domain)
-                    && !pagetable_is_null(current->arch.monitor_table) );
-    if ( test_linear )
-        flush_tlb_local();
-
     /* Audit part one: walk the domain's p2m table, checking the entries. */
     if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) != 0 )
     {
diff -r 5691e4cc17da -r 8bb4bc335ebb xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Thu Sep 13 10:23:17 2012 +0200
+++ b/xen/arch/x86/mm/shadow/multi.c    Thu Sep 13 16:41:33 2012 +0100
@@ -1472,14 +1472,6 @@ void sh_install_xen_entries_in_l4(struct
             shadow_l4e_from_mfn(gl4mfn, __PAGE_HYPERVISOR);
     }
 
-    if ( shadow_mode_translate(v->domain) )
-    {
-        /* install domain-specific P2M table */
-        sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] =
-            
shadow_l4e_from_mfn(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))),
-                                __PAGE_HYPERVISOR);
-    }
-
     sh_unmap_domain_page(sl4e);    
 }
 #endif
diff -r 5691e4cc17da -r 8bb4bc335ebb xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Thu Sep 13 10:23:17 2012 +0200
+++ b/xen/include/asm-x86/p2m.h Thu Sep 13 16:41:33 2012 +0100
@@ -35,22 +35,6 @@
 extern bool_t opt_hap_1gb, opt_hap_2mb;
 
 /*
- * The phys_to_machine_mapping maps guest physical frame numbers 
- * to machine frame numbers.  It only exists for paging_mode_translate 
- * guests. It is organised in page-table format, which:
- *
- * (1) allows us to use it directly as the second pagetable in hardware-
- *     assisted paging and (hopefully) iommu support; and 
- * (2) lets us map it directly into the guest vcpus' virtual address space 
- *     as a linear pagetable, so we can read and write it easily.
- *
- * For (2) we steal the address space that would have normally been used
- * by the read-only MPT map in a non-translated guest.  (For 
- * paging_mode_external() guests this mapping is in the monitor table.)
- */
-#define phys_to_machine_mapping ((l1_pgentry_t *)RO_MPT_VIRT_START)
-
-/*
  * The upper levels of the p2m pagetable always contain full rights; all 
  * variation in the access control bits is made in the level-1 PTEs.
  * 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.