[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] xen/sm{e, a}p: allow disabling sm{e, a}p for Xen itself



commit 493ab190e5b1432e23cd3d09ba28315f453239e4
Author:     He Chen <he.chen@xxxxxxxxxxxxxxx>
AuthorDate: Wed Oct 19 16:03:24 2016 +0800
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Thu Oct 20 12:44:16 2016 +0100

    xen/sm{e, a}p: allow disabling sm{e, a}p for Xen itself
    
    SMEP/SMAP is a security feature to prevent kernel executing/accessing
    user address involuntarily, any such behavior will lead to a page fault.
    
    SMEP/SMAP is open (in CR4) for both Xen and HVM guest in earlier code.
    SMEP/SMAP bit set in Xen CR4 would enforce security checking for 32-bit
    PV guest which will suffer unknown SMEP/SMAP page fault when guest
    kernel attempt to access user address although SMEP/SMAP is close for
    PV guests.
    
    This patch introduces a new boot option value "hvm" for "sm{e,a}p", it
    is going to diable SMEP/SMAP for Xen hypervisor while enable them for
    HVM. In this way, 32-bit PV guest will not suffer SMEP/SMAP security
    issue. Users can choose whether open SMEP/SMAP for Xen itself,
    especially when they are going to run 32-bit PV guests.
    
    Signed-off-by: He Chen <he.chen@xxxxxxxxxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Release-acked-by: Wei Liu <wei.liu2@xxxxxxxxxx>
    [Fixed up command line docs]
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 docs/misc/xen-command-line.markdown | 14 ++++----
 xen/arch/x86/setup.c                | 68 +++++++++++++++++++++++++++++++------
 xen/arch/x86/x86_64/compat/entry.S  |  4 +--
 xen/arch/x86/x86_64/entry.S         |  4 +--
 xen/include/asm-x86/asm_defns.h     | 10 +++---
 xen/include/asm-x86/cpufeature.h    |  2 ++
 6 files changed, 77 insertions(+), 25 deletions(-)

diff --git a/docs/misc/xen-command-line.markdown 
b/docs/misc/xen-command-line.markdown
index cd9534b..87c3023 100644
--- a/docs/misc/xen-command-line.markdown
+++ b/docs/misc/xen-command-line.markdown
@@ -1433,19 +1433,21 @@ enabling more sockets and cores to go into deeper sleep 
states.
 
 Set the serial transmit buffer size.
 
-### smep
-> `= <boolean>`
+### smap
+> `= <boolean> | hvm`
 
 > Default: `true`
 
-Flag to enable Supervisor Mode Execution Protection
+Flag to enable Supervisor Mode Access Prevention
+Use `smap=hvm` to allow SMAP use by HVM guests only.
 
-### smap
-> `= <boolean>`
+### smep
+> `= <boolean> | hvm`
 
 > Default: `true`
 
-Flag to enable Supervisor Mode Access Prevention
+Flag to enable Supervisor Mode Execution Protection
+Use `smep=hvm` to allow SMEP use by HVM guests only.
 
 ### snb\_igd\_quirk
 > `= <boolean> | cap | <integer>`
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 8ae897a..58b117d 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -61,14 +61,6 @@ boolean_param("nosmp", opt_nosmp);
 static unsigned int __initdata max_cpus;
 integer_param("maxcpus", max_cpus);
 
-/* smep: Enable/disable Supervisor Mode Execution Protection (default on). */
-static bool_t __initdata opt_smep = 1;
-boolean_param("smep", opt_smep);
-
-/* smap: Enable/disable Supervisor Mode Access Prevention (default on). */
-static bool_t __initdata opt_smap = 1;
-boolean_param("smap", opt_smap);
-
 unsigned long __read_mostly cr4_pv32_mask;
 
 /* Boot dom0 in pvh mode */
@@ -112,6 +104,58 @@ struct cpuinfo_x86 __read_mostly boot_cpu_data = { 0, 0, 
0, 0, -1 };
 
 unsigned long __read_mostly mmu_cr4_features = XEN_MINIMAL_CR4;
 
+/* smep: Enable/disable Supervisor Mode Execution Protection (default on). */
+#define SMEP_HVM_ONLY (-1)
+static s8 __initdata opt_smep = 1;
+static void __init parse_smep_param(char *s)
+{
+    if ( !*s )
+    {
+        opt_smep = 1;
+        return;
+    }
+
+    switch ( parse_bool(s) )
+    {
+    case 0:
+        opt_smep = 0;
+        return;
+    case 1:
+        opt_smep = 1;
+        return;
+    }
+
+    if ( !strcmp(s, "hvm") )
+        opt_smep = SMEP_HVM_ONLY;
+}
+custom_param("smep", parse_smep_param);
+
+/* smap: Enable/disable Supervisor Mode Access Prevention (default on). */
+#define SMAP_HVM_ONLY (-1)
+static s8 __initdata opt_smap = 1;
+static void __init parse_smap_param(char *s)
+{
+    if ( !*s )
+    {
+        opt_smap = 1;
+        return;
+    }
+
+    switch ( parse_bool(s) )
+    {
+    case 0:
+        opt_smap = 0;
+        return;
+    case 1:
+        opt_smap = 1;
+        return;
+    }
+
+    if ( !strcmp(s, "hvm") )
+        opt_smap = SMAP_HVM_ONLY;
+}
+custom_param("smap", parse_smap_param);
+
 bool_t __read_mostly acpi_disabled;
 bool_t __initdata acpi_force;
 static char __initdata acpi_param[10] = "";
@@ -1404,12 +1448,16 @@ void __init noreturn __start_xen(unsigned long mbi_p)
 
     if ( !opt_smep )
         setup_clear_cpu_cap(X86_FEATURE_SMEP);
-    if ( cpu_has_smep )
+    if ( cpu_has_smep && opt_smep != SMEP_HVM_ONLY )
+        __set_bit(X86_FEATURE_XEN_SMEP, boot_cpu_data.x86_capability);
+    if ( boot_cpu_has(X86_FEATURE_XEN_SMEP) )
         set_in_cr4(X86_CR4_SMEP);
 
     if ( !opt_smap )
         setup_clear_cpu_cap(X86_FEATURE_SMAP);
-    if ( cpu_has_smap )
+    if ( cpu_has_smap && opt_smap != SMAP_HVM_ONLY )
+        __set_bit(X86_FEATURE_XEN_SMAP, boot_cpu_data.x86_capability);
+    if ( boot_cpu_has(X86_FEATURE_XEN_SMAP) )
         set_in_cr4(X86_CR4_SMAP);
 
     cr4_pv32_mask = mmu_cr4_features & XEN_CR4_PV32_BITS;
diff --git a/xen/arch/x86/x86_64/compat/entry.S 
b/xen/arch/x86/x86_64/compat/entry.S
index cdec0f3..3bb6b61 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -137,10 +137,10 @@ ENTRY(compat_restore_all_guest)
         .section .altinstructions, "a"
         altinstruction_entry .Lcr4_orig, .Lcr4_orig, X86_FEATURE_ALWAYS, \
                              (.Lcr4_orig_end - .Lcr4_orig), 0
-        altinstruction_entry .Lcr4_orig, .Lcr4_alt, X86_FEATURE_SMEP, \
+        altinstruction_entry .Lcr4_orig, .Lcr4_alt, X86_FEATURE_XEN_SMEP, \
                              (.Lcr4_orig_end - .Lcr4_orig), \
                              (.Lcr4_alt_end - .Lcr4_alt)
-        altinstruction_entry .Lcr4_orig, .Lcr4_alt, X86_FEATURE_SMAP, \
+        altinstruction_entry .Lcr4_orig, .Lcr4_alt, X86_FEATURE_XEN_SMAP, \
                              (.Lcr4_orig_end - .Lcr4_orig), \
                              (.Lcr4_alt_end - .Lcr4_alt)
         .popsection
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index b56c46c..66aefaa 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -417,11 +417,11 @@ handle_exception_saved:
 .Lcr4_pv32_alt_end:
         .section .altinstructions, "a"
         altinstruction_entry .Lcr4_pv32_orig, .Lcr4_pv32_alt, \
-                             X86_FEATURE_SMEP, \
+                             X86_FEATURE_XEN_SMEP, \
                              (.Lcr4_pv32_alt_end - .Lcr4_pv32_alt), \
                              (.Lcr4_pv32_alt_end - .Lcr4_pv32_alt)
         altinstruction_entry .Lcr4_pv32_orig, .Lcr4_pv32_alt, \
-                             X86_FEATURE_SMAP, \
+                             X86_FEATURE_XEN_SMAP, \
                              (.Lcr4_pv32_alt_end - .Lcr4_pv32_alt), \
                              (.Lcr4_pv32_alt_end - .Lcr4_pv32_alt)
         .popsection
diff --git a/xen/include/asm-x86/asm_defns.h b/xen/include/asm-x86/asm_defns.h
index e36e78f..f1c6fa1 100644
--- a/xen/include/asm-x86/asm_defns.h
+++ b/xen/include/asm-x86/asm_defns.h
@@ -205,7 +205,7 @@ void ret_from_intr(void);
         .popsection;                                                   \
         .pushsection .altinstructions, "a";                            \
         altinstruction_entry 661b, 661b, X86_FEATURE_ALWAYS, 3, 0;     \
-        altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3;       \
+        altinstruction_entry 661b, 662b, X86_FEATURE_XEN_SMAP, 3, 3;       \
         .popsection
 
 #define ASM_STAC ASM_AC(STAC)
@@ -217,21 +217,21 @@ void ret_from_intr(void);
         668: call cr4_pv32_restore;                                \
         .section .altinstructions, "a";                            \
         altinstruction_entry 667b, 667b, X86_FEATURE_ALWAYS, 5, 0; \
-        altinstruction_entry 667b, 668b, X86_FEATURE_SMEP, 5, 5;   \
-        altinstruction_entry 667b, 668b, X86_FEATURE_SMAP, 5, 5;   \
+        altinstruction_entry 667b, 668b, X86_FEATURE_XEN_SMEP, 5, 5;   \
+        altinstruction_entry 667b, 668b, X86_FEATURE_XEN_SMAP, 5, 5;   \
         .popsection
 
 #else
 static always_inline void clac(void)
 {
     /* Note: a barrier is implicit in alternative() */
-    alternative(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP);
+    alternative(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_XEN_SMAP);
 }
 
 static always_inline void stac(void)
 {
     /* Note: a barrier is implicit in alternative() */
-    alternative(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP);
+    alternative(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_XEN_SMAP);
 }
 #endif
 
diff --git a/xen/include/asm-x86/cpufeature.h b/xen/include/asm-x86/cpufeature.h
index 48f0507..c7c8520 100644
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -17,6 +17,8 @@ XEN_CPUFEATURE(CPUID_FAULTING,  (FSCAPINTS+0)*32+ 6) /* cpuid 
faulting */
 XEN_CPUFEATURE(CLFLUSH_MONITOR, (FSCAPINTS+0)*32+ 7) /* clflush reqd with 
monitor */
 XEN_CPUFEATURE(APERFMPERF,      (FSCAPINTS+0)*32+ 8) /* APERFMPERF */
 XEN_CPUFEATURE(MFENCE_RDTSC,    (FSCAPINTS+0)*32+ 9) /* MFENCE synchronizes 
RDTSC */
+XEN_CPUFEATURE(XEN_SMEP,        (FSCAPINTS+0)*32+ 10) /* SMEP gets used by Xen 
itself */
+XEN_CPUFEATURE(XEN_SMAP,        (FSCAPINTS+0)*32+ 11) /* SMAP gets used by Xen 
itself */
 
 #define NCAPINTS (FSCAPINTS + 1) /* N 32-bit words worth of info */
 
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.