[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-4.1-testing] x86/hvm: add SMEP support to HVM guest



# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1308826493 -3600
# Node ID 0300d7f10d42fe0f87395059811db95b2c089c54
# Parent  37ba0319e2cfe31a3ae108437e95f31eb6aea042
x86/hvm: add SMEP support to HVM guest

Intel new CPU supports SMEP (Supervisor Mode Execution
Protection). SMEP
prevents software operating with CPL < 3 (supervisor mode) from
fetching
instructions from any linear address with a valid translation for
which the U/S
flag (bit 2) is 1 in every paging-structure entry controlling the
translation
for the linear address.

This patch adds SMEP support to HVM guest.

Signed-off-by: Yang Wei <wei.y.yang@xxxxxxxxx>
Signed-off-by: Shan Haitao <haitao.shan@xxxxxxxxx>
Signed-off-by: Li Xin <xin.li@xxxxxxxxx>
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
xen-unstable changeset:   23504:c34604d5a293
xen-unstable date:        Mon Jun 06 13:46:48 2011 +0100
---


diff -r 37ba0319e2cf -r 0300d7f10d42 tools/libxc/xc_cpufeature.h
--- a/tools/libxc/xc_cpufeature.h       Thu Jun 23 11:51:49 2011 +0100
+++ b/tools/libxc/xc_cpufeature.h       Thu Jun 23 11:54:53 2011 +0100
@@ -143,5 +143,6 @@
 
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
 #define X86_FEATURE_FSGSBASE   (7*32+ 0) /* {RD,WR}{FS,GS}BASE instructions */
+#define X86_FEATURE_SMEP        (7*32+ 7) /* Supervisor Mode Execution 
Protection */
 
 #endif /* __LIBXC_CPUFEATURE_H */
diff -r 37ba0319e2cf -r 0300d7f10d42 tools/libxc/xc_cpuid_x86.c
--- a/tools/libxc/xc_cpuid_x86.c        Thu Jun 23 11:51:49 2011 +0100
+++ b/tools/libxc/xc_cpuid_x86.c        Thu Jun 23 11:54:53 2011 +0100
@@ -300,6 +300,14 @@
             clear_bit(X86_FEATURE_PAE, regs[3]);
         break;
 
+    case 0x00000007: /* Intel-defined CPU features */
+        if ( input[1] == 0 ) {
+            regs[1] &= bitmaskof(X86_FEATURE_SMEP);
+        } else
+            regs[1] = 0;
+        regs[0] = regs[2] = regs[3] = 0;
+        break;
+
     case 0x0000000d:
         xc_cpuid_config_xsave(xch, domid, xfeature_mask, input, regs);
         break;
diff -r 37ba0319e2cf -r 0300d7f10d42 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Thu Jun 23 11:51:49 2011 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Thu Jun 23 11:54:53 2011 +0100
@@ -1473,8 +1473,9 @@
     v->arch.hvm_vcpu.guest_cr[4] = value;
     hvm_update_guest_cr(v, 4);
 
-    /* Modifying CR4.{PSE,PAE,PGE} invalidates all TLB entries, inc. Global. */
-    if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
+    /* Modifying CR4.{PSE,PAE,PGE} invalidates all TLB entries. */
+    if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE |
+                             X86_CR4_PAE | X86_CR4_SMEP) )
         paging_update_paging_modes(v);
 
     return X86EMUL_OKAY;
@@ -2116,7 +2117,7 @@
 enum hvm_copy_result hvm_fetch_from_guest_virt(
     void *buf, unsigned long vaddr, int size, uint32_t pfec)
 {
-    if ( hvm_nx_enabled(current) )
+    if ( hvm_nx_enabled(current) || hvm_smep_enabled(current) )
         pfec |= PFEC_insn_fetch;
     return __hvm_copy(buf, vaddr, size,
                       HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt,
@@ -2142,7 +2143,7 @@
 enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
     void *buf, unsigned long vaddr, int size, uint32_t pfec)
 {
-    if ( hvm_nx_enabled(current) )
+    if ( hvm_nx_enabled(current) || hvm_smep_enabled(current) )
         pfec |= PFEC_insn_fetch;
     return __hvm_copy(buf, vaddr, size,
                       HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt,
@@ -2212,6 +2213,10 @@
             *ecx |= (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_OSXSAVE) ?
                      cpufeat_mask(X86_FEATURE_OSXSAVE) : 0;
         break;
+    case 0x7:
+        if ( (count == 0) && !cpu_has_smep )
+            *ebx &= ~cpufeat_mask(X86_FEATURE_SMEP);
+        break;
     case 0xb:
         /* Fix the x2APIC identifier. */
         *edx = v->vcpu_id * 2;
diff -r 37ba0319e2cf -r 0300d7f10d42 xen/arch/x86/mm/guest_walk.c
--- a/xen/arch/x86/mm/guest_walk.c      Thu Jun 23 11:51:49 2011 +0100
+++ b/xen/arch/x86/mm/guest_walk.c      Thu Jun 23 11:54:53 2011 +0100
@@ -132,7 +132,7 @@
     guest_l4e_t *l4p;
 #endif
     uint32_t gflags, mflags, iflags, rc = 0;
-    int pse;
+    int pse, smep;
 
     perfc_incr(guest_walk);
     memset(gw, 0, sizeof(*gw));
@@ -145,6 +145,15 @@
     mflags = mandatory_flags(v, pfec);
     iflags = (_PAGE_NX_BIT | _PAGE_INVALID_BITS);
 
+    /* SMEP: kernel-mode instruction fetches from user-mode mappings
+     * should fault.  Unlike NX or invalid bits, we're looking for _all_
+     * entries in the walk to have _PAGE_USER set, so we need to do the
+     * whole walk as if it were a user-mode one and then invert the answer. */
+    smep = (is_hvm_vcpu(v) && hvm_smep_enabled(v) 
+            && (pfec & PFEC_insn_fetch) && !(pfec & PFEC_user_mode) );
+    if ( smep )
+        mflags |= _PAGE_USER;
+
 #if GUEST_PAGING_LEVELS >= 3 /* PAE or 64... */
 #if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
 
@@ -271,6 +280,10 @@
         rc |= ((gflags & mflags) ^ mflags);
     }
 
+    /* Now re-invert the user-mode requirement for SMEP. */
+    if ( smep ) 
+        rc ^= _PAGE_USER;
+
     /* Go back and set accessed and dirty bits only if the walk was a
      * success.  Although the PRMs say higher-level _PAGE_ACCESSED bits
      * get set whenever a lower-level PT is used, at least some hardware
diff -r 37ba0319e2cf -r 0300d7f10d42 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Thu Jun 23 11:51:49 2011 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Thu Jun 23 11:54:53 2011 +0100
@@ -178,6 +178,8 @@
     (!!((v)->arch.hvm_vcpu.guest_cr[0] & X86_CR0_WP))
 #define hvm_pae_enabled(v) \
     (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE))
+#define hvm_smep_enabled(v) \
+    (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_SMEP))
 #define hvm_nx_enabled(v) \
     (!!((v)->arch.hvm_vcpu.guest_efer & EFER_NX))
 
@@ -291,6 +293,7 @@
         X86_CR4_DE  | X86_CR4_PSE | X86_CR4_PAE |       \
         X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE |       \
         X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT |           \
+        (cpu_has_smep ? X86_CR4_SMEP : 0) |             \
         (xsave_enabled(_v) ? X86_CR4_OSXSAVE : 0))))
 
 /* These exceptions must always be intercepted. */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.