|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 09/10] x86/hvm: Add SMAP support to HVM guest
Intel new CPU supports SMAP (Supervisor Mode Access Prevention).
SMAP prevents supervisor-mode accesses to any linear address with
a valid translation for which the U/S flag (bit 2) is 1 in every
paging-structure entry controlling the translation for the linear
address.
Signed-off-by: Feng Wu <feng.wu@xxxxxxxxx>
---
xen/arch/x86/hvm/hvm.c | 3 +++
xen/arch/x86/mm/guest_walk.c | 40 ++++++++++++++++++++++++++++++----------
xen/include/asm-x86/hvm/hvm.h | 16 ++++++++++++++++
3 files changed, 49 insertions(+), 10 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index b0da8e7..b52476d 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3036,6 +3036,9 @@ void hvm_cpuid(unsigned int input, unsigned int *eax,
unsigned int *ebx,
if ( (count == 0) && !cpu_has_smep )
*ebx &= ~cpufeat_mask(X86_FEATURE_SMEP);
+ if ( (count == 0) && !cpu_has_smap )
+ *ebx &= ~cpufeat_mask(X86_FEATURE_SMAP);
+
/* Don't expose MPX to hvm when VMX support is not available */
if ( (count == 0) &&
(!(vmx_vmexit_control & VM_EXIT_CLEAR_BNDCFGS) ||
diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
index 70460b6..bb38fda 100644
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -144,7 +144,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
guest_l4e_t *l4p;
#endif
uint32_t gflags, mflags, iflags, rc = 0;
- int smep;
+ bool_t smep = 0, smap = 0;
bool_t pse1G = 0, pse2M = 0;
p2m_query_t qt = P2M_ALLOC | P2M_UNSHARE;
@@ -159,13 +159,33 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
mflags = mandatory_flags(v, pfec);
iflags = (_PAGE_NX_BIT | _PAGE_INVALID_BITS);
- /* SMEP: kernel-mode instruction fetches from user-mode mappings
- * should fault. Unlike NX or invalid bits, we're looking for _all_
- * entries in the walk to have _PAGE_USER set, so we need to do the
- * whole walk as if it were a user-mode one and then invert the answer. */
- smep = (is_hvm_vcpu(v) && hvm_smep_enabled(v)
- && (pfec & PFEC_insn_fetch) && !(pfec & PFEC_user_mode) );
- if ( smep )
+ if ( is_hvm_vcpu(v) && !(pfec & PFEC_user_mode) )
+ {
+ struct segment_register seg;
+ const struct cpu_user_regs *regs = guest_cpu_user_regs();
+
+ hvm_get_segment_register(v, x86_seg_ss, &seg);
+
+ /* SMEP: kernel-mode instruction fetches from user-mode mappings
+ * should fault. Unlike NX or invalid bits, we're looking for _all_
+ * entries in the walk to have _PAGE_USER set, so we need to do the
+ * whole walk as if it were a user-mode one and then invert the
answer. */
+ smep = hvm_smep_enabled(v) && (pfec & PFEC_insn_fetch);
+
+ /*
+ * SMAP: kernel-mode data accesses from user-mode mappings should fault
+ * A fault is considered as a SMAP violation if the following
+ * conditions come true:
+ * - X86_CR4_SMAP is set in CR4
+ * - A user page is accessed
+ * - CPL = 3 or X86_EFLAGS_AC is clear
+ * - Page fault in kernel mode
+ */
+ smap = hvm_smap_enabled(v) &&
+ ((seg.attr.fields.dpl == 3) || !(regs->eflags & X86_EFLAGS_AC));
+ }
+
+ if ( smep || smap )
mflags |= _PAGE_USER;
#if GUEST_PAGING_LEVELS >= 3 /* PAE or 64... */
@@ -338,8 +358,8 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
#if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
set_ad:
#endif
- /* Now re-invert the user-mode requirement for SMEP. */
- if ( smep )
+ /* Now re-invert the user-mode requirement for SMEP and SMAP */
+ if ( smep || smap )
rc ^= _PAGE_USER;
/* Go back and set accessed and dirty bits only if the walk was a
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 99bfc4c..c33d270 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -257,6 +257,8 @@ int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest,
uint8_t dest_mode);
(hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE))
#define hvm_smep_enabled(v) \
(hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_SMEP))
+#define hvm_smap_enabled(v) \
+ (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_SMAP))
#define hvm_nx_enabled(v) \
(!!((v)->arch.hvm_vcpu.guest_efer & EFER_NX))
@@ -364,6 +366,19 @@ static inline bool_t hvm_vcpu_has_smep(void)
return !!(ebx & cpufeat_mask(X86_FEATURE_SMEP));
}
+static inline bool_t hvm_vcpu_has_smap(void)
+{
+ unsigned int eax, ebx;
+
+ hvm_cpuid(0x0, &eax, NULL, NULL, NULL);
+
+ if (eax < 0x7)
+ return 0;
+
+ hvm_cpuid(0x7, NULL, &ebx, NULL, NULL);
+ return !!(ebx & cpufeat_mask(X86_FEATURE_SMAP));
+}
+
/* These reserved bits in lower 32 remain 0 after any load of CR0 */
#define HVM_CR0_GUEST_RESERVED_BITS \
(~((unsigned long) \
@@ -384,6 +399,7 @@ static inline bool_t hvm_vcpu_has_smep(void)
X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE | \
X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT | \
(hvm_vcpu_has_smep() ? X86_CR4_SMEP : 0) | \
+ (hvm_vcpu_has_smap() ? X86_CR4_SMAP : 0) | \
(cpu_has_fsgsbase ? X86_CR4_FSGSBASE : 0) | \
((nestedhvm_enabled((_v)->domain) && cpu_has_vmx)\
? X86_CR4_VMXE : 0) | \
--
1.8.3.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |