[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86: use linear L1 page table for map_domain_page() page table manipulation



commit 625481082df4d2426a6d5d848edc0828df710022
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Thu Feb 28 11:10:53 2013 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu Feb 28 11:10:53 2013 +0100

    x86: use linear L1 page table for map_domain_page() page table manipulation
    
    This saves allocation of a Xen heap page for tracking the L1 page table
    pointers.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Keir Fraser <keir@xxxxxxx>
---
 xen/arch/x86/domain.c        |    3 --
 xen/arch/x86/domain_page.c   |   45 +++++++++++++----------------------------
 xen/include/asm-x86/domain.h |    4 +--
 3 files changed, 15 insertions(+), 37 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 3cdee86..8d30d08 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -585,7 +585,6 @@ int arch_domain_create(struct domain *d, unsigned int 
domcr_flags)
     free_xenheap_page(d->shared_info);
     if ( paging_initialised )
         paging_final_teardown(d);
-    mapcache_domain_exit(d);
     free_perdomain_mappings(d);
     if ( !is_hvm_domain(d) )
         free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
@@ -605,8 +604,6 @@ void arch_domain_destroy(struct domain *d)
 
     paging_final_teardown(d);
 
-    mapcache_domain_exit(d);
-
     free_perdomain_mappings(d);
     if ( !is_hvm_domain(d) )
         free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
index 989ec29..7421e03 100644
--- a/xen/arch/x86/domain_page.c
+++ b/xen/arch/x86/domain_page.c
@@ -53,9 +53,8 @@ void __init mapcache_override_current(struct vcpu *v)
 
 #define mapcache_l2_entry(e) ((e) >> PAGETABLE_ORDER)
 #define MAPCACHE_L2_ENTRIES (mapcache_l2_entry(MAPCACHE_ENTRIES - 1) + 1)
-#define DCACHE_L1ENT(dc, idx) \
-    ((dc)->l1tab[(idx) >> PAGETABLE_ORDER] \
-                [(idx) & ((1 << PAGETABLE_ORDER) - 1)])
+#define MAPCACHE_L1ENT(idx) \
+    __linear_l1_table[l1_linear_offset(MAPCACHE_VIRT_START + 
pfn_to_paddr(idx))]
 
 void *map_domain_page(unsigned long mfn)
 {
@@ -77,7 +76,7 @@ void *map_domain_page(unsigned long mfn)
 
     dcache = &v->domain->arch.pv_domain.mapcache;
     vcache = &v->arch.pv_vcpu.mapcache;
-    if ( !dcache->l1tab )
+    if ( !dcache->inuse )
         return mfn_to_virt(mfn);
 
     perfc_incr(map_domain_page_count);
@@ -91,7 +90,7 @@ void *map_domain_page(unsigned long mfn)
         ASSERT(idx < dcache->entries);
         hashent->refcnt++;
         ASSERT(hashent->refcnt);
-        ASSERT(l1e_get_pfn(DCACHE_L1ENT(dcache, idx)) == mfn);
+        ASSERT(l1e_get_pfn(MAPCACHE_L1ENT(idx)) == mfn);
         goto out;
     }
 
@@ -131,9 +130,8 @@ void *map_domain_page(unsigned long mfn)
                 if ( hashent->idx != MAPHASHENT_NOTINUSE && !hashent->refcnt )
                 {
                     idx = hashent->idx;
-                    ASSERT(l1e_get_pfn(DCACHE_L1ENT(dcache, idx)) ==
-                           hashent->mfn);
-                    l1e_write(&DCACHE_L1ENT(dcache, idx), l1e_empty());
+                    ASSERT(l1e_get_pfn(MAPCACHE_L1ENT(idx)) == hashent->mfn);
+                    l1e_write(&MAPCACHE_L1ENT(idx), l1e_empty());
                     hashent->idx = MAPHASHENT_NOTINUSE;
                     hashent->mfn = ~0UL;
                     break;
@@ -156,8 +154,7 @@ void *map_domain_page(unsigned long mfn)
 
     spin_unlock(&dcache->lock);
 
-    l1e_write(&DCACHE_L1ENT(dcache, idx),
-              l1e_from_pfn(mfn, __PAGE_HYPERVISOR));
+    l1e_write(&MAPCACHE_L1ENT(idx), l1e_from_pfn(mfn, __PAGE_HYPERVISOR));
 
  out:
     local_irq_restore(flags);
@@ -181,10 +178,10 @@ void unmap_domain_page(const void *ptr)
     ASSERT(v && !is_hvm_vcpu(v));
 
     dcache = &v->domain->arch.pv_domain.mapcache;
-    ASSERT(dcache->l1tab);
+    ASSERT(dcache->inuse);
 
     idx = PFN_DOWN(va - MAPCACHE_VIRT_START);
-    mfn = l1e_get_pfn(DCACHE_L1ENT(dcache, idx));
+    mfn = l1e_get_pfn(MAPCACHE_L1ENT(idx));
     hashent = &v->arch.pv_vcpu.mapcache.hash[MAPHASH_HASHFN(mfn)];
 
     local_irq_save(flags);
@@ -200,9 +197,9 @@ void unmap_domain_page(const void *ptr)
         if ( hashent->idx != MAPHASHENT_NOTINUSE )
         {
             /* /First/, zap the PTE. */
-            ASSERT(l1e_get_pfn(DCACHE_L1ENT(dcache, hashent->idx)) ==
+            ASSERT(l1e_get_pfn(MAPCACHE_L1ENT(hashent->idx)) ==
                    hashent->mfn);
-            l1e_write(&DCACHE_L1ENT(dcache, hashent->idx), l1e_empty());
+            l1e_write(&MAPCACHE_L1ENT(hashent->idx), l1e_empty());
             /* /Second/, mark as garbage. */
             set_bit(hashent->idx, dcache->garbage);
         }
@@ -214,7 +211,7 @@ void unmap_domain_page(const void *ptr)
     else
     {
         /* /First/, zap the PTE. */
-        l1e_write(&DCACHE_L1ENT(dcache, idx), l1e_empty());
+        l1e_write(&MAPCACHE_L1ENT(idx), l1e_empty());
         /* /Second/, mark as garbage. */
         set_bit(idx, dcache->garbage);
     }
@@ -253,10 +250,6 @@ int mapcache_domain_init(struct domain *d)
         return 0;
 #endif
 
-    dcache->l1tab = xzalloc_array(l1_pgentry_t *, MAPCACHE_L2_ENTRIES);
-    if ( !dcache->l1tab )
-        return -ENOMEM;
-
     BUILD_BUG_ON(MAPCACHE_VIRT_END + PAGE_SIZE * (3 +
                  2 * PFN_UP(BITS_TO_LONGS(MAPCACHE_ENTRIES) * sizeof(long))) >
                  MAPCACHE_VIRT_START + (PERDOMAIN_SLOT_MBYTES << 20));
@@ -272,16 +265,6 @@ int mapcache_domain_init(struct domain *d)
                                     NIL(l1_pgentry_t *), NULL);
 }
 
-void mapcache_domain_exit(struct domain *d)
-{
-    struct mapcache_domain *dcache = &d->arch.pv_domain.mapcache;
-
-    if ( is_hvm_domain(d) )
-        return;
-
-    xfree(dcache->l1tab);
-}
-
 int mapcache_vcpu_init(struct vcpu *v)
 {
     struct domain *d = v->domain;
@@ -290,7 +273,7 @@ int mapcache_vcpu_init(struct vcpu *v)
     unsigned int ents = d->max_vcpus * MAPCACHE_VCPU_ENTRIES;
     unsigned int nr = PFN_UP(BITS_TO_LONGS(ents) * sizeof(long));
 
-    if ( is_hvm_vcpu(v) || !dcache->l1tab )
+    if ( is_hvm_vcpu(v) || !dcache->inuse )
         return 0;
 
     if ( ents > dcache->entries )
@@ -298,7 +281,7 @@ int mapcache_vcpu_init(struct vcpu *v)
         /* Populate page tables. */
         int rc = create_perdomain_mapping(d, MAPCACHE_VIRT_START,
                                           d->max_vcpus * MAPCACHE_VCPU_ENTRIES,
-                                          dcache->l1tab, NULL);
+                                          NIL(l1_pgentry_t *), NULL);
 
         /* Populate bit maps. */
         if ( !rc )
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index f91f662..97e09ca 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -53,8 +53,7 @@ struct mapcache_vcpu {
 };
 
 struct mapcache_domain {
-    /* The PTEs that provide the mappings, and a cursor into the array. */
-    l1_pgentry_t **l1tab;
+    /* The number of array entries, and a cursor into the array. */
     unsigned int entries;
     unsigned int cursor;
 
@@ -71,7 +70,6 @@ struct mapcache_domain {
 };
 
 int mapcache_domain_init(struct domain *);
-void mapcache_domain_exit(struct domain *);
 int mapcache_vcpu_init(struct vcpu *);
 void mapcache_override_current(struct vcpu *);
 
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.