[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1 of 5] Nested p2m: implement "flush" as a first-class action



# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxx>
# Date 1308929084 -3600
# Node ID 44c7b977302663487922a5d822d1d4032badfebc
# Parent  b240183197720129a8d83847bc5592d6dff3d530
Nested p2m: implement "flush" as a first-class action
rather than using the teardown and init functions.
This makes the locking clearer and avoids an expensive scan of all
pfns that's only needed for non-nested p2ms.  It also moves the
tlb flush into the proper place in the flush logic, avoiding a
possible race against other CPUs.

Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>

diff -r b24018319772 -r 44c7b9773026 xen/arch/x86/hvm/nestedhvm.c
--- a/xen/arch/x86/hvm/nestedhvm.c      Thu Jun 23 18:34:55 2011 +0100
+++ b/xen/arch/x86/hvm/nestedhvm.c      Fri Jun 24 16:24:44 2011 +0100
@@ -119,12 +119,6 @@ nestedhvm_vmcx_flushtlb(struct p2m_domai
     cpus_clear(p2m->p2m_dirty_cpumask);
 }
 
-void
-nestedhvm_vmcx_flushtlbdomain(struct domain *d)
-{
-    on_selected_cpus(d->domain_dirty_cpumask, nestedhvm_flushtlb_ipi, d, 1);
-}
-
 bool_t
 nestedhvm_is_n2(struct vcpu *v)
 {
diff -r b24018319772 -r 44c7b9773026 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Thu Jun 23 18:34:55 2011 +0100
+++ b/xen/arch/x86/mm/p2m.c     Fri Jun 24 16:24:44 2011 +0100
@@ -1050,20 +1050,41 @@ p2m_getlru_nestedp2m(struct domain *d, s
     return lrup2m;
 }
 
-static int 
+/* Reset this p2m table to be empty */
+static void
 p2m_flush_locked(struct p2m_domain *p2m)
 {
-    ASSERT(p2m);
-    if (p2m->cr3 == CR3_EADDR)
-        /* Microoptimisation: p2m is already empty.
-         * => about 0.3% speedup of overall system performance.
-         */
-        return 0;
+    struct page_info *top, *pg;
+    struct domain *d = p2m->domain;
+    void *p;
 
-    p2m_teardown(p2m);
-    p2m_initialise(p2m->domain, p2m);
-    p2m->write_p2m_entry = nestedp2m_write_p2m_entry;
-    return p2m_alloc_table(p2m);
+    p2m_lock(p2m);
+
+    /* "Host" p2m tables can have shared entries &c that need a bit more 
+     * care when discarding them */
+    ASSERT(p2m_is_nestedp2m(p2m));
+    ASSERT(page_list_empty(&p2m->pod.super));
+    ASSERT(page_list_empty(&p2m->pod.single));
+
+    /* This is no longer a valid nested p2m for any address space */
+    p2m->cr3 = CR3_EADDR;
+    
+    /* Zap the top level of the trie */
+    top = mfn_to_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
+    p = __map_domain_page(top);
+    clear_page(p);
+    unmap_domain_page(p);
+
+    /* Make sure nobody else is using this p2m table */
+    nestedhvm_vmcx_flushtlb(p2m);
+
+    /* Free the rest of the trie pages back to the paging pool */
+    while ( (pg = page_list_remove_head(&p2m->pages)) )
+        if ( pg != top ) 
+            d->arch.paging.free_page(d, pg);
+    page_list_add(top, &p2m->pages);
+
+    p2m_unlock(p2m);
 }
 
 void
@@ -1074,9 +1095,8 @@ p2m_flush(struct vcpu *v, struct p2m_dom
     ASSERT(v->domain == d);
     vcpu_nestedhvm(v).nv_p2m = NULL;
     nestedp2m_lock(d);
-    BUG_ON(p2m_flush_locked(p2m) != 0);
+    p2m_flush_locked(p2m);
     hvm_asid_flush_vcpu(v);
-    nestedhvm_vmcx_flushtlb(p2m);
     nestedp2m_unlock(d);
 }
 
@@ -1086,12 +1106,8 @@ p2m_flush_nestedp2m(struct domain *d)
     int i;
 
     nestedp2m_lock(d);
-    for (i = 0; i < MAX_NESTEDP2M; i++) {
-        struct p2m_domain *p2m = d->arch.nested_p2m[i];
-        BUG_ON(p2m_flush_locked(p2m) != 0);
-        cpus_clear(p2m->p2m_dirty_cpumask);
-    }
-    nestedhvm_vmcx_flushtlbdomain(d);
+    for ( i = 0; i < MAX_NESTEDP2M; i++ )
+        p2m_flush_locked(d->arch.nested_p2m[i]);
     nestedp2m_unlock(d);
 }
 
@@ -1104,7 +1120,7 @@ p2m_get_nestedp2m(struct vcpu *v, uint64
     volatile struct nestedvcpu *nv = &vcpu_nestedhvm(v);
     struct domain *d;
     struct p2m_domain *p2m;
-    int i, rv;
+    int i;
 
     if (cr3 == 0 || cr3 == CR3_EADDR)
         cr3 = v->arch.hvm_vcpu.guest_cr[3];
@@ -1136,9 +1152,7 @@ p2m_get_nestedp2m(struct vcpu *v, uint64
      */
     for (i = 0; i < MAX_NESTEDP2M; i++) {
         p2m = p2m_getlru_nestedp2m(d, NULL);
-        rv = p2m_flush_locked(p2m);
-        if (rv == 0)
-            break;
+        p2m_flush_locked(p2m);
     }
     nv->nv_p2m = p2m;
     p2m->cr3 = cr3;
diff -r b24018319772 -r 44c7b9773026 xen/include/asm-x86/hvm/nestedhvm.h
--- a/xen/include/asm-x86/hvm/nestedhvm.h       Thu Jun 23 18:34:55 2011 +0100
+++ b/xen/include/asm-x86/hvm/nestedhvm.h       Fri Jun 24 16:24:44 2011 +0100
@@ -61,7 +61,6 @@ unsigned long *nestedhvm_vcpu_iomap_get(
     (!!vcpu_nestedhvm((v)).nv_vmswitch_in_progress)
 
 void nestedhvm_vmcx_flushtlb(struct p2m_domain *p2m);
-void nestedhvm_vmcx_flushtlbdomain(struct domain *d);
 
 bool_t nestedhvm_is_n2(struct vcpu *v);
 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.