[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2] x86/altp2m: cleanup p2m_altp2m_lazy_copy



The p2m_altp2m_lazy_copy is responsible for lazily populating an altp2m view
when the guest traps out due to no EPT entry being present in the active view.
Currently the function took several inputs that it didn't use and also
locked/unlocked gfns when it didn't need to.

Signed-off-by: Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Roger Pau Monne <roger.pau@xxxxxxxxxx>
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
v2: return bool; return early if hp2m entry is invalid; use gprintk and use
    more useful debug message
---
 xen/arch/x86/hvm/hvm.c    |  7 +++--
 xen/arch/x86/mm/p2m.c     | 61 +++++++++++++++++++--------------------
 xen/include/asm-x86/p2m.h |  5 ++--
 3 files changed, 37 insertions(+), 36 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 8adbb61b57..813e69a4c9 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1688,6 +1688,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long 
gla,
     int sharing_enomem = 0;
     vm_event_request_t *req_ptr = NULL;
     bool_t ap2m_active, sync = 0;
+    unsigned int page_order;
 
     /* On Nested Virtualization, walk the guest page table.
      * If this succeeds, all is fine.
@@ -1754,11 +1755,13 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned 
long gla,
     hostp2m = p2m_get_hostp2m(currd);
     mfn = get_gfn_type_access(hostp2m, gfn, &p2mt, &p2ma,
                               P2M_ALLOC | (npfec.write_access ? P2M_UNSHARE : 
0),
-                              NULL);
+                              &page_order);
 
     if ( ap2m_active )
     {
-        if ( p2m_altp2m_lazy_copy(curr, gpa, gla, npfec, &p2m) )
+        p2m = p2m_get_altp2m(curr);
+
+        if ( p2m_altp2m_lazy_copy(p2m, gfn, mfn, p2mt, p2ma, page_order) )
         {
             /* entry was lazily copied from host -- retry */
             __put_gfn(hostp2m, gfn);
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index b9bbb8f485..dcfa3d357b 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -2375,57 +2375,54 @@ bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, 
unsigned int idx)
  *     indicate that outer handler should handle fault
  */
 
-bool_t p2m_altp2m_lazy_copy(struct vcpu *v, paddr_t gpa,
-                            unsigned long gla, struct npfec npfec,
-                            struct p2m_domain **ap2m)
+bool p2m_altp2m_lazy_copy(struct p2m_domain *ap2m, unsigned long gfn_l,
+                          mfn_t hmfn, p2m_type_t hp2mt, p2m_access_t hp2ma,
+                          unsigned int page_order)
 {
-    struct p2m_domain *hp2m = p2m_get_hostp2m(v->domain);
-    p2m_type_t p2mt;
-    p2m_access_t p2ma;
-    unsigned int page_order;
-    gfn_t gfn = _gfn(paddr_to_pfn(gpa));
+    p2m_type_t ap2mt;
+    p2m_access_t ap2ma;
     unsigned long mask;
-    mfn_t mfn;
-    int rv;
+    gfn_t gfn;
+    mfn_t amfn;
+    int rc;
 
-    *ap2m = p2m_get_altp2m(v);
+    /* No point checking the altp2m if entry is not present in hostp2m */
+    if ( mfn_eq(hmfn, INVALID_MFN) )
+        return false;
 
-    mfn = get_gfn_type_access(*ap2m, gfn_x(gfn), &p2mt, &p2ma,
-                              0, &page_order);
-    __put_gfn(*ap2m, gfn_x(gfn));
+    p2m_lock(ap2m);
 
-    if ( !mfn_eq(mfn, INVALID_MFN) )
-        return 0;
+    amfn = __get_gfn_type_access(ap2m, gfn_l, &ap2mt, &ap2ma,
+                                 0, NULL, false);
 
-    mfn = get_gfn_type_access(hp2m, gfn_x(gfn), &p2mt, &p2ma,
-                              P2M_ALLOC, &page_order);
-    __put_gfn(hp2m, gfn_x(gfn));
 
-    if ( mfn_eq(mfn, INVALID_MFN) )
+    /* If entry is in altp2m already caller should handle the fault */
+    if ( !mfn_eq(amfn, INVALID_MFN) )
+    {
+        p2m_unlock(ap2m);
         return 0;
-
-    p2m_lock(*ap2m);
+    }
 
     /*
      * If this is a superpage mapping, round down both frame numbers
      * to the start of the superpage.
      */
     mask = ~((1UL << page_order) - 1);
-    mfn = _mfn(mfn_x(mfn) & mask);
-    gfn = _gfn(gfn_x(gfn) & mask);
+    hmfn = _mfn(mfn_x(hmfn) & mask);
+    gfn = _gfn(gfn_l & mask);
 
-    rv = p2m_set_entry(*ap2m, gfn, mfn, page_order, p2mt, p2ma);
-    p2m_unlock(*ap2m);
+    rc = p2m_set_entry(ap2m, gfn, hmfn, page_order, hp2mt, hp2ma);
+    p2m_unlock(ap2m);
 
-    if ( rv )
+    if ( rc )
     {
-        gdprintk(XENLOG_ERR,
-           "failed to set entry for %#"PRIx64" -> %#"PRIx64" p2m %#"PRIx64"\n",
-           gfn_x(gfn), mfn_x(mfn), (unsigned long)*ap2m);
-        domain_crash(hp2m->domain);
+        gprintk(XENLOG_ERR,
+        "failed to set entry for %#"PRIx64" -> %#"PRIx64" altp2m %#"PRIx16". 
rc: %"PRIi32"\n",
+        gfn_l, mfn_x(hmfn), vcpu_altp2m(current).p2midx, rc);
+        domain_crash(ap2m->domain);
     }
 
-    return 1;
+    return true;
 }
 
 enum altp2m_reset_type {
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 2801a8ccca..391e7688da 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -867,8 +867,9 @@ void p2m_altp2m_check(struct vcpu *v, uint16_t idx);
 void p2m_flush_altp2m(struct domain *d);
 
 /* Alternate p2m paging */
-bool_t p2m_altp2m_lazy_copy(struct vcpu *v, paddr_t gpa,
-    unsigned long gla, struct npfec npfec, struct p2m_domain **ap2m);
+bool p2m_altp2m_lazy_copy(struct p2m_domain *ap2m, unsigned long gfn_l,
+                          mfn_t hmfn, p2m_type_t hp2mt, p2m_access_t hp2ma,
+                          unsigned int page_order);
 
 /* Make a specific alternate p2m valid */
 int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx);
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.