|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 5/6] x86/pagewalk: Support PKS
PKS is incredibly similar to the existing PKU behaviour, operating on
pagewalks for any supervisor mapping.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Wei Liu <wl@xxxxxxx>
---
xen/arch/x86/include/asm/guest_pt.h | 5 +++++
xen/arch/x86/include/asm/hvm/hvm.h | 3 +++
xen/arch/x86/mm/guest_walk.c | 9 +++++----
3 files changed, 13 insertions(+), 4 deletions(-)
diff --git a/xen/arch/x86/include/asm/guest_pt.h
b/xen/arch/x86/include/asm/guest_pt.h
index 6647ccfb8520..6802db2a415a 100644
--- a/xen/arch/x86/include/asm/guest_pt.h
+++ b/xen/arch/x86/include/asm/guest_pt.h
@@ -282,6 +282,11 @@ static always_inline bool guest_pku_enabled(const struct
vcpu *v)
return !is_pv_vcpu(v) && hvm_pku_enabled(v);
}
+static always_inline bool guest_pks_enabled(const struct vcpu *v)
+{
+ return !is_pv_vcpu(v) && hvm_pks_enabled(v);
+}
+
/* Helpers for identifying whether guest entries have reserved bits set. */
/* Bits reserved because of maxphysaddr, and (lack of) EFER.NX */
diff --git a/xen/arch/x86/include/asm/hvm/hvm.h
b/xen/arch/x86/include/asm/hvm/hvm.h
index bd2cbb0e7baf..ffef7ed075a7 100644
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -394,6 +394,8 @@ int hvm_get_param(struct domain *d, uint32_t index,
uint64_t *value);
((v)->arch.hvm.guest_efer & EFER_NXE)
#define hvm_pku_enabled(v) \
(hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_PKE))
+#define hvm_pks_enabled(v) \
+ (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_PKS))
/* Can we use superpages in the HAP p2m table? */
#define hap_has_1gb (!!(hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_1GB))
@@ -868,6 +870,7 @@ static inline int hvm_vmtrace_get_option(
#define hvm_smap_enabled(v) ((void)(v), false)
#define hvm_nx_enabled(v) ((void)(v), false)
#define hvm_pku_enabled(v) ((void)(v), false)
+#define hvm_pks_enabled(v) ((void)(v), false)
#define arch_vcpu_block(v) ((void)(v))
diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
index dc8fdde0212e..8670d4990a11 100644
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -406,16 +406,17 @@ guest_walk_tables(const struct vcpu *v, struct p2m_domain
*p2m,
#if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
/*
* If all access checks are thus far ok, check Protection Key for 64bit
- * data accesses to user mappings.
+ * data accesses.
*
* N.B. In the case that the walk ended with a superpage, the fabricated
* gw->l1e contains the appropriate leaf pkey.
*/
- if ( (ar & _PAGE_USER) && !(walk & PFEC_insn_fetch) &&
- guest_pku_enabled(v) )
+ if ( !(walk & PFEC_insn_fetch) &&
+ ((ar & _PAGE_USER) ? guest_pku_enabled(v)
+ : guest_pks_enabled(v)) )
{
unsigned int pkey = guest_l1e_get_pkey(gw->l1e);
- unsigned int pkr = rdpkru();
+ unsigned int pkr = (ar & _PAGE_USER) ? rdpkru() : rdpkrs();
unsigned int pk_ar = pkr >> (pkey * PKEY_WIDTH);
if ( (pk_ar & PKEY_AD) ||
--
2.11.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |