[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [V3 PATCH 7/9] x86/hvm: pkeys, add pkeys support for guest_walk_tables



This patch adds pkeys support for guest_walk_tables.

Signed-off-by: Huaitong Han <huaitong.han@xxxxxxxxx>
---
 xen/arch/x86/i387.c           |  2 +-
 xen/arch/x86/mm/guest_walk.c  | 73 +++++++++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/hvm/hvm.h |  2 ++
 xen/include/asm-x86/i387.h    |  1 +
 4 files changed, 77 insertions(+), 1 deletion(-)

diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index b661d39..83c8465 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -132,7 +132,7 @@ static inline uint64_t vcpu_xsave_mask(const struct vcpu *v)
 }
 
 /* Save x87 extended state */
-static inline void fpu_xsave(struct vcpu *v)
+void fpu_xsave(struct vcpu *v)
 {
     bool_t ok;
     uint64_t mask = vcpu_xsave_mask(v);
diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
index 18d1acf..e79f72f 100644
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -31,6 +31,8 @@ asm(".file \"" __OBJECT_FILE__ "\"");
 #include <xen/sched.h>
 #include <asm/page.h>
 #include <asm/guest_pt.h>
+#include <asm/xstate.h>
+#include <asm/i387.h>
 
 extern const uint32_t gw_page_flags[];
 #if GUEST_PAGING_LEVELS == CONFIG_PAGING_LEVELS
@@ -90,6 +92,61 @@ static uint32_t set_ad_bits(void *guest_p, void *walk_p, int 
set_dirty)
     return 0;
 }
 
+#if GUEST_PAGING_LEVELS >= CONFIG_PAGING_LEVELS
+bool_t leaf_pte_pkeys_check(struct vcpu *vcpu, uint32_t pfec,
+                uint32_t pte_access, uint32_t pte_pkeys)
+{
+    void *xsave_addr;
+    unsigned int pkru = 0;
+    bool_t pkru_ad, pkru_wd;
+
+    bool_t uf = !!(pfec & PFEC_user_mode);
+    bool_t wf = !!(pfec & PFEC_write_access);
+    bool_t ff = !!(pfec & PFEC_insn_fetch);
+    bool_t rsvdf = !!(pfec & PFEC_reserved_bit);
+    bool_t pkuf  = !!(pfec & PFEC_prot_key);
+
+    if ( !cpu_has_xsave || !pkuf || is_pv_vcpu(vcpu) )
+        return 0;
+
+    /* PKRU dom0 is always zero */
+    if ( likely(!pte_pkeys) )
+        return 0;
+
+    /* Update vcpu xsave area */
+    fpu_xsave(vcpu);
+    xsave_addr = get_xsave_addr(vcpu->arch.xsave_area, fls64(XSTATE_PKRU)-1);
+    if ( !!xsave_addr )
+        memcpy(&pkru, xsave_addr, sizeof(pkru));
+
+    if ( unlikely(pkru) )
+    {
+        /*
+         * PKU:  additional mechanism by which the paging controls
+         * access to user-mode addresses based on the value in the
+         * PKRU register. A fault is considered as a PKU violation if all
+         * of the following conditions are ture:
+         * 1.CR4_PKE=1.
+         * 2.EFER_LMA=1.
+         * 3.page is present with no reserved bit violations.
+         * 4.the access is not an instruction fetch.
+         * 5.the access is to a user page.
+         * 6.PKRU.AD=1
+         *       or The access is a data write and PKRU.WD=1
+         *            and either CR0.WP=1 or it is a user access.
+         */
+        pkru_ad = read_pkru_ad(pkru, pte_pkeys);
+        pkru_wd = read_pkru_wd(pkru, pte_pkeys);
+        if ( hvm_pku_enabled(vcpu) && hvm_long_mode_enabled(vcpu) &&
+            !rsvdf && !ff && (pkru_ad ||
+            (pkru_wd && wf && (hvm_wp_enabled(vcpu) || uf))))
+            return 1;
+    }
+
+    return 0;
+}
+#endif
+
 /* Walk the guest pagetables, after the manner of a hardware walker. */
 /* Because the walk is essentially random, it can cause a deadlock 
  * warning in the p2m locking code. Highly unlikely this is an actual
@@ -106,6 +163,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
 #if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
     guest_l3e_t *l3p = NULL;
     guest_l4e_t *l4p;
+    unsigned int pkeys;
 #endif
     uint32_t gflags, mflags, iflags, rc = 0;
     bool_t smep = 0, smap = 0;
@@ -190,6 +248,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
         goto out;
     /* Get the l3e and check its flags*/
     gw->l3e = l3p[guest_l3_table_offset(va)];
+    pkeys = guest_l3e_get_pkeys(gw->l3e);
     gflags = guest_l3e_get_flags(gw->l3e) ^ iflags;
     if ( !(gflags & _PAGE_PRESENT) ) {
         rc |= _PAGE_PRESENT;
@@ -199,6 +258,9 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
     
     pse1G = (gflags & _PAGE_PSE) && guest_supports_1G_superpages(v); 
 
+    if ( pse1G && leaf_pte_pkeys_check(v, pfec, gflags, pkeys) )
+        rc |= _PAGE_PKEY_BITS;
+
     if ( pse1G )
     {
         /* Generate a fake l1 table entry so callers don't all 
@@ -270,6 +332,12 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
 
     pse2M = (gflags & _PAGE_PSE) && guest_supports_superpages(v); 
 
+#if GUEST_PAGING_LEVELS >= 4
+    pkeys = guest_l2e_get_pkeys(gw->l2e);
+    if ( pse2M && leaf_pte_pkeys_check(v, pfec, gflags, pkeys) )
+        rc |= _PAGE_PKEY_BITS;
+#endif
+
     if ( pse2M )
     {
         /* Special case: this guest VA is in a PSE superpage, so there's
@@ -330,6 +398,11 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
             goto out;
         }
         rc |= ((gflags & mflags) ^ mflags);
+#if GUEST_PAGING_LEVELS >= 4
+        pkeys = guest_l1e_get_pkeys(gw->l1e);
+        if ( leaf_pte_pkeys_check(v, pfec, gflags, pkeys) )
+            rc |= _PAGE_PKEY_BITS;
+#endif
     }
 
 #if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index f80e143..79b3421 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -275,6 +275,8 @@ int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, 
uint8_t dest_mode);
     (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_SMAP))
 #define hvm_nx_enabled(v) \
     (!!((v)->arch.hvm_vcpu.guest_efer & EFER_NX))
+#define hvm_pku_enabled(v) \
+    (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PKE))
 
 /* Can we use superpages in the HAP p2m table? */
 #define hap_has_1gb (!!(hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_1GB))
diff --git a/xen/include/asm-x86/i387.h b/xen/include/asm-x86/i387.h
index 7cfa215..c4aee70 100644
--- a/xen/include/asm-x86/i387.h
+++ b/xen/include/asm-x86/i387.h
@@ -30,6 +30,7 @@ struct ix87_env {
 
 void vcpu_restore_fpu_eager(struct vcpu *v);
 void vcpu_restore_fpu_lazy(struct vcpu *v);
+void fpu_xsave(struct vcpu *v);
 void vcpu_save_fpu(struct vcpu *v);
 void save_fpu_enable(void);
 
-- 
2.4.3


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.