[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen stable-4.12] x86/altp2m: cleanup p2m_altp2m_lazy_copy



commit 125b038ac615948c55b3a07bd890ab14a1eac15d
Author:     Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
AuthorDate: Fri Jul 19 16:11:20 2019 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Fri Jul 19 16:11:20 2019 +0200

    x86/altp2m: cleanup p2m_altp2m_lazy_copy
    
    The p2m_altp2m_lazy_copy is responsible for lazily populating an
    altp2m view when the guest traps out due to no EPT entry being present
    in the active view.  Currently, in addition to taking a number of
    unused argements, the whole calling convention has a number of
    redundant p2m lookups: the function reads the hostp2m, even though the
    caller has just read the same hostp2m entry; and then the caller
    re-reads the altp2m entry that the function has just read (and possibly 
set).
    
    Rework this function to make it a bit more rational.  Specifically:
    
    - Pass the current hostp2m entry values we have just read for it to
      use to populate the altp2m entry if it finds the entry empty.
    
    - If the altp2m entry is not empty, pass out the values we've read so
      the caller doesn't need to re-walk the tables
    
    - Either way, return with the gfn 'locked', to make clean-up handling
      more consistent.
    
    Rename the function to better reflect this functionality.
    
    While we're here, change bool_t to bool, and return true/false rather
    than 1/0.
    
    It's a bit grating to do both the p2m_lock() and the get_gfn(),
    knowing that they boil down to the same thing at the moment; but we
    have to maintain the fiction until such time as we decide to get rid
    of it entirely.
    
    Signed-off-by: Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
    Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxx>
    Tested-by: Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
    Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
    
    x86/altp2m: Fix style errors introduced with c/s 9abcac7ff
    
    Drop introduced trailing whitespace, excessively long lines, mal-indention,
    superfluous use of PRI macros for int-or-smaller types, and incorrect PRI
    macros for gfns and mfns.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: George Dunlap <george.dunlap@xxxxxxxxxx>
    master commit: 9abcac7ff14506b934e55d1cfd86575f182b77b7
    master date: 2019-05-28 14:10:36 +0100
    master commit: df4e4cafd28d63be64db06493e310ac0217d2c5b
    master date: 2019-05-29 14:39:28 +0100
---
 xen/arch/x86/hvm/hvm.c    | 20 +++++++----
 xen/arch/x86/mm/p2m.c     | 91 ++++++++++++++++++++++++++---------------------
 xen/include/asm-x86/p2m.h |  5 +--
 3 files changed, 66 insertions(+), 50 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 209ed97658..2089a77270 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1688,6 +1688,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long 
gla,
     int sharing_enomem = 0;
     vm_event_request_t *req_ptr = NULL;
     bool_t ap2m_active, sync = 0;
+    unsigned int page_order;
 
     /* On Nested Virtualization, walk the guest page table.
      * If this succeeds, all is fine.
@@ -1754,19 +1755,24 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned 
long gla,
     hostp2m = p2m_get_hostp2m(currd);
     mfn = get_gfn_type_access(hostp2m, gfn, &p2mt, &p2ma,
                               P2M_ALLOC | (npfec.write_access ? P2M_UNSHARE : 
0),
-                              NULL);
+                              &page_order);
 
     if ( ap2m_active )
     {
-        if ( p2m_altp2m_lazy_copy(curr, gpa, gla, npfec, &p2m) )
+        p2m = p2m_get_altp2m(curr);
+
+        /*
+         * Get the altp2m entry if present; or if not, propagate from
+         * the host p2m.  NB that this returns with gfn locked in the
+         * altp2m.
+         */
+        if ( p2m_altp2m_get_or_propagate(p2m, gfn, &mfn, &p2mt,
+                                         &p2ma, page_order) )
         {
-            /* entry was lazily copied from host -- retry */
-            __put_gfn(hostp2m, gfn);
+            /* Entry was copied from host -- retry fault */
             rc = 1;
-            goto out;
+            goto out_put_gfn;
         }
-
-        mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, 0, NULL);
     }
     else
         p2m = hostp2m;
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index b9bbb8f485..4be7790245 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -2367,65 +2367,74 @@ bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, 
unsigned int idx)
 }
 
 /*
- * If the fault is for a not present entry:
- *     if the entry in the host p2m has a valid mfn, copy it and retry
- *     else indicate that outer handler should handle fault
+ * Read info about the gfn in an altp2m, locking the gfn.
  *
- * If the fault is for a present entry:
- *     indicate that outer handler should handle fault
+ * If the entry is valid, pass the results back to the caller.
+ *
+ * If the entry was invalid, and the host's entry is also invalid,
+ * return to the caller without any changes.
+ *
+ * If the entry is invalid, and the host entry was valid, propagate
+ * the host's entry to the altp2m (retaining page order), and indicate
+ * that the caller should re-try the faulting instruction.
  */
-
-bool_t p2m_altp2m_lazy_copy(struct vcpu *v, paddr_t gpa,
-                            unsigned long gla, struct npfec npfec,
-                            struct p2m_domain **ap2m)
+bool p2m_altp2m_get_or_propagate(struct p2m_domain *ap2m, unsigned long gfn_l,
+                                 mfn_t *mfn, p2m_type_t *p2mt,
+                                 p2m_access_t *p2ma, unsigned int page_order)
 {
-    struct p2m_domain *hp2m = p2m_get_hostp2m(v->domain);
-    p2m_type_t p2mt;
-    p2m_access_t p2ma;
-    unsigned int page_order;
-    gfn_t gfn = _gfn(paddr_to_pfn(gpa));
+    p2m_type_t ap2mt;
+    p2m_access_t ap2ma;
     unsigned long mask;
-    mfn_t mfn;
-    int rv;
-
-    *ap2m = p2m_get_altp2m(v);
-
-    mfn = get_gfn_type_access(*ap2m, gfn_x(gfn), &p2mt, &p2ma,
-                              0, &page_order);
-    __put_gfn(*ap2m, gfn_x(gfn));
+    gfn_t gfn;
+    mfn_t amfn;
+    int rc;
 
-    if ( !mfn_eq(mfn, INVALID_MFN) )
-        return 0;
+    /*
+     * NB we must get the full lock on the altp2m here, in addition to
+     * the lock on the individual gfn, since we may change a range of
+     * gfns below.
+     */
+    p2m_lock(ap2m);
 
-    mfn = get_gfn_type_access(hp2m, gfn_x(gfn), &p2mt, &p2ma,
-                              P2M_ALLOC, &page_order);
-    __put_gfn(hp2m, gfn_x(gfn));
+    amfn = get_gfn_type_access(ap2m, gfn_l, &ap2mt, &ap2ma, 0, NULL);
 
-    if ( mfn_eq(mfn, INVALID_MFN) )
-        return 0;
+    if ( !mfn_eq(amfn, INVALID_MFN) )
+    {
+        p2m_unlock(ap2m);
+        *mfn  = amfn;
+        *p2mt = ap2mt;
+        *p2ma = ap2ma;
+        return false;
+    }
 
-    p2m_lock(*ap2m);
+    /* Host entry is also invalid; don't bother setting the altp2m entry. */
+    if ( mfn_eq(*mfn, INVALID_MFN) )
+    {
+        p2m_unlock(ap2m);
+        return false;
+    }
 
     /*
      * If this is a superpage mapping, round down both frame numbers
-     * to the start of the superpage.
+     * to the start of the superpage.  NB that we repupose `amfn`
+     * here.
      */
     mask = ~((1UL << page_order) - 1);
-    mfn = _mfn(mfn_x(mfn) & mask);
-    gfn = _gfn(gfn_x(gfn) & mask);
+    amfn = _mfn(mfn_x(*mfn) & mask);
+    gfn = _gfn(gfn_l & mask);
 
-    rv = p2m_set_entry(*ap2m, gfn, mfn, page_order, p2mt, p2ma);
-    p2m_unlock(*ap2m);
+    rc = p2m_set_entry(ap2m, gfn, amfn, page_order, *p2mt, *p2ma);
+    p2m_unlock(ap2m);
 
-    if ( rv )
+    if ( rc )
     {
-        gdprintk(XENLOG_ERR,
-           "failed to set entry for %#"PRIx64" -> %#"PRIx64" p2m %#"PRIx64"\n",
-           gfn_x(gfn), mfn_x(mfn), (unsigned long)*ap2m);
-        domain_crash(hp2m->domain);
+        gprintk(XENLOG_ERR,
+                "failed to set entry for %"PRI_gfn" -> %"PRI_mfn" altp2m %u, 
rc %d\n",
+                gfn_l, mfn_x(amfn), vcpu_altp2m(current).p2midx, rc);
+        domain_crash(ap2m->domain);
     }
 
-    return 1;
+    return true;
 }
 
 enum altp2m_reset_type {
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 2801a8ccca..c551f4446c 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -867,8 +867,9 @@ void p2m_altp2m_check(struct vcpu *v, uint16_t idx);
 void p2m_flush_altp2m(struct domain *d);
 
 /* Alternate p2m paging */
-bool_t p2m_altp2m_lazy_copy(struct vcpu *v, paddr_t gpa,
-    unsigned long gla, struct npfec npfec, struct p2m_domain **ap2m);
+bool p2m_altp2m_get_or_propagate(struct p2m_domain *ap2m, unsigned long gfn_l,
+                                 mfn_t *mfn, p2m_type_t *p2mt,
+                                 p2m_access_t *p2ma, unsigned int page_order);
 
 /* Make a specific alternate p2m valid */
 int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx);
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.12

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.