[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/pagewalk: Fix determination of Protection Key access rights



commit 8f2e3d8913bba06651d9021b6f925bdcd1060f4a
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Tue May 16 15:47:33 2017 +0100
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Fri May 19 15:41:45 2017 +0100

    x86/pagewalk: Fix determination of Protection Key access rights
    
     * When fabricating gl1e's from superpages, propagate the protection key as
       well, so the protection key logic sees the real key as opposed to 0.
    
     * Experimentally, the protection key checks are performed ahead of the 
other
       access rights.  In particular, accesses which fail both protection key 
and
       regular permission checks yield PFEC_prot_key in the resulting pagefault.
    
     * Protection keys apply to all data accesses to user-mode addresses,
       including accesses from supervisor code.  PKRU WD applies to any data
       write, not just to mapping which are writable.  However, a supervisor
       access without CR0.WP bypasses any protection from protection keys.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Tim Deegan <tim@xxxxxxx>
    Release-acked-by: Julien Grall <julien.grall@xxxxxxx>
---
 xen/arch/x86/mm/guest_walk.c | 63 ++++++++++++++++++++++----------------------
 1 file changed, 32 insertions(+), 31 deletions(-)

diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
index 32d818e..5c6a85b 100644
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -197,12 +197,12 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
         int flags = (_PAGE_PRESENT|_PAGE_USER|_PAGE_RW|
                      _PAGE_ACCESSED|_PAGE_DIRTY);
         /*
-         * Import cache-control bits. Note that _PAGE_PAT is actually
-         * _PAGE_PSE, and it is always set. We will clear it in case
-         * _PAGE_PSE_PAT (bit 12, i.e. first bit of gfn) is clear.
+         * Import protection key and cache-control bits. Note that _PAGE_PAT
+         * is actually _PAGE_PSE, and it is always set. We will clear it in
+         * case _PAGE_PSE_PAT (bit 12, i.e. first bit of gfn) is clear.
          */
         flags |= (guest_l3e_get_flags(gw->l3e)
-                  & (_PAGE_PAT|_PAGE_PWT|_PAGE_PCD));
+                  & (_PAGE_PKEY_BITS|_PAGE_PAT|_PAGE_PWT|_PAGE_PCD));
         if ( !(gfn_x(start) & 1) )
             /* _PAGE_PSE_PAT not set: remove _PAGE_PAT from flags. */
             flags &= ~_PAGE_PAT;
@@ -302,12 +302,12 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
         int flags = (_PAGE_PRESENT|_PAGE_USER|_PAGE_RW|
                      _PAGE_ACCESSED|_PAGE_DIRTY);
         /*
-         * Import cache-control bits. Note that _PAGE_PAT is actually
-         * _PAGE_PSE, and it is always set. We will clear it in case
-         * _PAGE_PSE_PAT (bit 12, i.e. first bit of gfn) is clear.
+         * Import protection key and cache-control bits. Note that _PAGE_PAT
+         * is actually _PAGE_PSE, and it is always set. We will clear it in
+         * case _PAGE_PSE_PAT (bit 12, i.e. first bit of gfn) is clear.
          */
         flags |= (guest_l2e_get_flags(gw->l2e)
-                  & (_PAGE_PAT|_PAGE_PWT|_PAGE_PCD));
+                  & (_PAGE_PKEY_BITS|_PAGE_PAT|_PAGE_PWT|_PAGE_PCD));
         if ( !(gfn_x(start) & 1) )
             /* _PAGE_PSE_PAT not set: remove _PAGE_PAT from flags. */
             flags &= ~_PAGE_PAT;
@@ -365,6 +365,30 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
      */
     ar = (ar_and & AR_ACCUM_AND) | (ar_or & AR_ACCUM_OR);
 
+#if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
+    /*
+     * If all access checks are thus far ok, check Protection Key for 64bit
+     * data accesses to user mappings.
+     *
+     * N.B. In the case that the walk ended with a superpage, the fabricated
+     * gw->l1e contains the appropriate leaf pkey.
+     */
+    if ( (ar & _PAGE_USER) && !(walk & PFEC_insn_fetch) &&
+         guest_pku_enabled(v) )
+    {
+        unsigned int pkey = guest_l1e_get_pkey(gw->l1e);
+        unsigned int pkru = read_pkru();
+
+        if ( read_pkru_ad(pkru, pkey) ||
+             ((walk & PFEC_write_access) && read_pkru_wd(pkru, pkey) &&
+              ((walk & PFEC_user_mode) || guest_wp_enabled(v))) )
+        {
+            gw->pfec |= PFEC_prot_key;
+            goto out;
+        }
+    }
+#endif
+
     if ( (walk & PFEC_insn_fetch) && (ar & _PAGE_NX_BIT) )
         /* Requested an instruction fetch and found NX? Fail. */
         goto out;
@@ -400,29 +424,6 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
             goto out;
     }
 
-#if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
-    /*
-     * If all access checks are thusfar ok, check Protection Key for 64bit
-     * user data accesses.
-     *
-     * N.B. In the case that the walk ended with a superpage, the fabricated
-     * gw->l1e contains the appropriate leaf pkey.
-     */
-    if ( (walk & PFEC_user_mode) && !(walk & PFEC_insn_fetch) &&
-         guest_pku_enabled(v) )
-    {
-        unsigned int pkey = guest_l1e_get_pkey(gw->l1e);
-        unsigned int pkru = read_pkru();
-
-        if ( read_pkru_ad(pkru, pkey) ||
-             ((ar & PFEC_write_access) && read_pkru_wd(pkru, pkey)) )
-        {
-            gw->pfec |= PFEC_prot_key;
-            goto out;
-        }
-    }
-#endif
-
     walk_ok = true;
 
     /*
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.