[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging] x86/mm: {paging, sh}_{cmpxchg, write}_guest_entry() cannot fault



commit 1bc30c076a7f1678166934c080e1bf94b2c189af
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Mon Sep 28 13:57:51 2020 +0200
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Tue Oct 6 12:28:37 2020 +0100

    x86/mm: {paging, sh}_{cmpxchg, write}_guest_entry() cannot fault
    
    As of 2d0557c5cbeb ("x86: Fold page_info lock into type_info") we
    haven't been updating guest page table entries through linear page
    tables anymore. All updates have been using domain mappings since then.
    Drop the use of guest/user access helpers there, and hence also the
    boolean return values of the involved functions.
    
    update_intpte(), otoh, gets its boolean return type retained for now,
    as we may want to bound the CMPXCHG retry loop, indicating failure to
    the caller instead when the retry threshold got exceeded.
    
    With this {,__}cmpxchg_user() become unused, so they too get dropped.
    (In fact, dropping them was the motivation of making the change.)
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/mm.c                   |  8 +++----
 xen/arch/x86/mm/shadow/private.h    |  4 ++--
 xen/arch/x86/mm/shadow/pv.c         | 24 +++++++--------------
 xen/arch/x86/pv/mm.h                | 13 ++---------
 xen/arch/x86/pv/ro-page-fault.c     |  7 +++---
 xen/include/asm-x86/paging.h        | 21 +++++++++---------
 xen/include/asm-x86/x86_64/system.h | 43 -------------------------------------
 7 files changed, 30 insertions(+), 90 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index d1cfc8fb4a..8c8f054186 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4033,8 +4033,8 @@ long do_mmu_update(
 
                 case PGT_writable_page:
                     perfc_incr(writable_mmu_updates);
-                    if ( paging_write_guest_entry(v, va, req.val, mfn) )
-                        rc = 0;
+                    paging_write_guest_entry(v, va, req.val, mfn);
+                    rc = 0;
                     break;
                 }
                 page_unlock(page);
@@ -4044,9 +4044,9 @@ long do_mmu_update(
             else if ( get_page_type(page, PGT_writable_page) )
             {
                 perfc_incr(writable_mmu_updates);
-                if ( paging_write_guest_entry(v, va, req.val, mfn) )
-                    rc = 0;
+                paging_write_guest_entry(v, va, req.val, mfn);
                 put_page_type(page);
+                rc = 0;
             }
 
             put_page(page);
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index 0a8927f49e..fd72f4afb0 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -396,9 +396,9 @@ int shadow_write_p2m_entry(struct p2m_domain *p2m, unsigned 
long gfn,
                            unsigned int level);
 
 /* Functions that atomically write PV guest PT entries */
-bool sh_write_guest_entry(struct vcpu *v, intpte_t *p, intpte_t new,
+void sh_write_guest_entry(struct vcpu *v, intpte_t *p, intpte_t new,
                           mfn_t gmfn);
-bool sh_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p, intpte_t *old,
+void sh_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p, intpte_t *old,
                             intpte_t new, mfn_t gmfn);
 
 /* Update all the things that are derived from the guest's CR0/CR3/CR4.
diff --git a/xen/arch/x86/mm/shadow/pv.c b/xen/arch/x86/mm/shadow/pv.c
index 25dca1eb25..ac779afce1 100644
--- a/xen/arch/x86/mm/shadow/pv.c
+++ b/xen/arch/x86/mm/shadow/pv.c
@@ -26,43 +26,35 @@
 
 /*
  * Write a new value into the guest pagetable, and update the shadows
- * appropriately.  Returns false if we page-faulted, true for success.
+ * appropriately.
  */
-bool
+void
 sh_write_guest_entry(struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn)
 {
-    unsigned int failed;
-
     paging_lock(v->domain);
-    failed = __copy_to_user(p, &new, sizeof(new));
-    if ( failed != sizeof(new) )
-        sh_validate_guest_entry(v, gmfn, p, sizeof(new));
+    write_atomic(p, new);
+    sh_validate_guest_entry(v, gmfn, p, sizeof(new));
     paging_unlock(v->domain);
-
-    return !failed;
 }
 
 /*
  * Cmpxchg a new value into the guest pagetable, and update the shadows
- * appropriately. Returns false if we page-faulted, true if not.
+ * appropriately.
  * N.B. caller should check the value of "old" to see if the cmpxchg itself
  * was successful.
  */
-bool
+void
 sh_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p, intpte_t *old,
                        intpte_t new, mfn_t gmfn)
 {
-    bool failed;
-    intpte_t t = *old;
+    intpte_t t;
 
     paging_lock(v->domain);
-    failed = cmpxchg_user(p, t, new);
+    t = cmpxchg(p, *old, new);
     if ( t == *old )
         sh_validate_guest_entry(v, gmfn, p, sizeof(new));
     *old = t;
     paging_unlock(v->domain);
-
-    return !failed;
 }
 
 /*
diff --git a/xen/arch/x86/pv/mm.h b/xen/arch/x86/pv/mm.h
index a1bd473b29..f537e58a8d 100644
--- a/xen/arch/x86/pv/mm.h
+++ b/xen/arch/x86/pv/mm.h
@@ -43,9 +43,7 @@ static inline bool update_intpte(intpte_t *p, intpte_t old, 
intpte_t new,
 
 #ifndef PTE_UPDATE_WITH_CMPXCHG
     if ( !preserve_ad )
-    {
-        rv = paging_write_guest_entry(v, p, new, mfn);
-    }
+        paging_write_guest_entry(v, p, new, mfn);
     else
 #endif
     {
@@ -58,14 +56,7 @@ static inline bool update_intpte(intpte_t *p, intpte_t old, 
intpte_t new,
             if ( preserve_ad )
                 _new |= old & (_PAGE_ACCESSED | _PAGE_DIRTY);
 
-            rv = paging_cmpxchg_guest_entry(v, p, &t, _new, mfn);
-            if ( unlikely(rv == 0) )
-            {
-                gdprintk(XENLOG_WARNING,
-                         "Failed to update %" PRIpte " -> %" PRIpte
-                         ": saw %" PRIpte "\n", old, _new, t);
-                break;
-            }
+            paging_cmpxchg_guest_entry(v, p, &t, _new, mfn);
 
             if ( t == old )
                 break;
diff --git a/xen/arch/x86/pv/ro-page-fault.c b/xen/arch/x86/pv/ro-page-fault.c
index 556ee7ca11..8468f9e9bb 100644
--- a/xen/arch/x86/pv/ro-page-fault.c
+++ b/xen/arch/x86/pv/ro-page-fault.c
@@ -168,10 +168,9 @@ static int ptwr_emulated_update(unsigned long addr, 
intpte_t *p_old,
     if ( p_old )
     {
         ol1e = l1e_from_intpte(old);
-        if ( !paging_cmpxchg_guest_entry(v, &l1e_get_intpte(*pl1e),
-                                         &old, l1e_get_intpte(nl1e), mfn) )
-            ret = X86EMUL_UNHANDLEABLE;
-        else if ( l1e_get_intpte(ol1e) == old )
+        paging_cmpxchg_guest_entry(v, &l1e_get_intpte(*pl1e), &old,
+                                   l1e_get_intpte(nl1e), mfn);
+        if ( l1e_get_intpte(ol1e) == old )
             ret = X86EMUL_OKAY;
         else
         {
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index 87ce135b8c..4a26f30c81 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -100,9 +100,9 @@ struct shadow_paging_mode {
 #ifdef CONFIG_SHADOW_PAGING
     void          (*detach_old_tables     )(struct vcpu *v);
 #ifdef CONFIG_PV
-    bool          (*write_guest_entry     )(struct vcpu *v, intpte_t *p,
+    void          (*write_guest_entry     )(struct vcpu *v, intpte_t *p,
                                             intpte_t new, mfn_t gmfn);
-    bool          (*cmpxchg_guest_entry   )(struct vcpu *v, intpte_t *p,
+    void          (*cmpxchg_guest_entry   )(struct vcpu *v, intpte_t *p,
                                             intpte_t *old, intpte_t new,
                                             mfn_t gmfn);
 #endif
@@ -333,15 +333,15 @@ static inline void paging_update_paging_modes(struct vcpu 
*v)
  * paging-assistance state appropriately.  Returns false if we page-faulted,
  * true for success.
  */
-static inline bool paging_write_guest_entry(
+static inline void paging_write_guest_entry(
     struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn)
 {
 #ifdef CONFIG_SHADOW_PAGING
     if ( unlikely(paging_mode_shadow(v->domain)) && paging_get_hostmode(v) )
-        return paging_get_hostmode(v)->shadow.write_guest_entry(v, p, new,
-                                                                gmfn);
+        paging_get_hostmode(v)->shadow.write_guest_entry(v, p, new, gmfn);
+    else
 #endif
-    return !__copy_to_user(p, &new, sizeof(new));
+        write_atomic(p, new);
 }
 
 
@@ -351,15 +351,16 @@ static inline bool paging_write_guest_entry(
  * true if not.  N.B. caller should check the value of "old" to see if the
  * cmpxchg itself was successful.
  */
-static inline bool paging_cmpxchg_guest_entry(
+static inline void paging_cmpxchg_guest_entry(
     struct vcpu *v, intpte_t *p, intpte_t *old, intpte_t new, mfn_t gmfn)
 {
 #ifdef CONFIG_SHADOW_PAGING
     if ( unlikely(paging_mode_shadow(v->domain)) && paging_get_hostmode(v) )
-        return paging_get_hostmode(v)->shadow.cmpxchg_guest_entry(v, p, old,
-                                                                  new, gmfn);
+        paging_get_hostmode(v)->shadow.cmpxchg_guest_entry(v, p, old,
+                                                           new, gmfn);
+    else
 #endif
-    return !cmpxchg_user(p, *old, new);
+        *old = cmpxchg(p, *old, new);
 }
 
 #endif /* CONFIG_PV */
diff --git a/xen/include/asm-x86/x86_64/system.h 
b/xen/include/asm-x86/x86_64/system.h
index f471859c19..e94371cf20 100644
--- a/xen/include/asm-x86/x86_64/system.h
+++ b/xen/include/asm-x86/x86_64/system.h
@@ -59,47 +59,4 @@ static always_inline __uint128_t cmpxchg16b_local_(
     __cmpxchg16b(_p, (void *)(o), (void *)(n));            \
 })
 
-/*
- * This function causes value _o to be changed to _n at location _p.
- * If this access causes a fault then we return 1, otherwise we return 0.
- * If no fault occurs then _o is updated to the value we saw at _p. If this
- * is the same as the initial value of _o then _n is written to location _p.
- */
-#define __cmpxchg_user(_p, _o, _n, _oppre, _regtype)                    \
-    stac();                                                             \
-    asm volatile (                                                      \
-        "1: lock cmpxchg %"_oppre"[new], %[ptr]\n"                      \
-        "2:\n"                                                          \
-        ".section .fixup,\"ax\"\n"                                      \
-        "3:     movl $1, %[rc]\n"                                       \
-        "       jmp 2b\n"                                               \
-        ".previous\n"                                                   \
-        _ASM_EXTABLE(1b, 3b)                                            \
-        : "+a" (_o), [rc] "=r" (_rc),                                   \
-          [ptr] "+m" (*(volatile typeof(*(_p)) *)(_p))                  \
-        : [new] _regtype (_n), "[rc]" (0)                               \
-        : "memory");                                                    \
-    clac()
-
-#define cmpxchg_user(_p, _o, _n)                                        \
-({                                                                      \
-    int _rc;                                                            \
-    switch ( sizeof(*(_p)) )                                            \
-    {                                                                   \
-    case 1:                                                             \
-        __cmpxchg_user(_p, _o, _n, "b", "q");                           \
-        break;                                                          \
-    case 2:                                                             \
-        __cmpxchg_user(_p, _o, _n, "w", "r");                           \
-        break;                                                          \
-    case 4:                                                             \
-        __cmpxchg_user(_p, _o, _n, "k", "r");                           \
-        break;                                                          \
-    case 8:                                                             \
-        __cmpxchg_user(_p, _o, _n, "q", "r");                           \
-        break;                                                          \
-    }                                                                   \
-    _rc;                                                                \
-})
-
 #endif /* __X86_64_SYSTEM_H__ */
--
generated by git-patchbot for /home/xen/git/xen.git#staging



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.