[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen staging-4.10] x86: command line option to avoid use of secondary hyper-threads
commit 01eb262d5eb03617a99ad8109e4b7da7f839d7d2 Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Mon Jul 30 11:40:53 2018 +0200 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Mon Jul 30 11:40:53 2018 +0200 x86: command line option to avoid use of secondary hyper-threads Shared resources (L1 cache and TLB in particular) present a risk of information leak via side channels. Provide a means to avoid use of hyperthreads in such cases. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> master commit: d8f974f1a646c0200b97ebcabb808324b288fadb master date: 2018-07-19 13:43:33 +0100 --- docs/misc/xen-command-line.markdown | 7 +++++++ xen/arch/x86/setup.c | 8 +++++++- xen/arch/x86/sysctl.c | 16 +++++++++++++++- xen/include/asm-x86/setup.h | 2 ++ 4 files changed, 31 insertions(+), 2 deletions(-) diff --git a/docs/misc/xen-command-line.markdown b/docs/misc/xen-command-line.markdown index cb81a9cbc4..dba37e2e53 100644 --- a/docs/misc/xen-command-line.markdown +++ b/docs/misc/xen-command-line.markdown @@ -1717,6 +1717,13 @@ Use `smap=hvm` to allow SMAP use by HVM guests only. Flag to enable Supervisor Mode Execution Protection Use `smep=hvm` to allow SMEP use by HVM guests only. +### smt (x86) +> `= <boolean>` + +Default: `true` + +Control bring up of multiple hyper-threads per CPU core. + ### snb\_igd\_quirk > `= <boolean> | cap | <integer>` diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c index 5c384d8a35..7cce57a274 100644 --- a/xen/arch/x86/setup.c +++ b/xen/arch/x86/setup.c @@ -62,6 +62,9 @@ boolean_param("nosmp", opt_nosmp); static unsigned int __initdata max_cpus; integer_param("maxcpus", max_cpus); +int8_t __read_mostly opt_smt = -1; +boolean_param("smt", opt_smt); + /* opt_invpcid: If false, don't use INVPCID instruction even if available. */ static bool __initdata opt_invpcid = true; boolean_param("invpcid", opt_invpcid); @@ -1617,7 +1620,10 @@ void __init noreturn __start_xen(unsigned long mbi_p) int ret = cpu_up(i); if ( ret != 0 ) printk("Failed to bring up CPU %u (error %d)\n", i, ret); - else if ( num_online_cpus() > max_cpus ) + else if ( num_online_cpus() > max_cpus || + (!opt_smt && + cpu_data[i].compute_unit_id == INVALID_CUID && + cpumask_weight(per_cpu(cpu_sibling_mask, i)) > 1) ) { ret = cpu_down(i); if ( !ret ) diff --git a/xen/arch/x86/sysctl.c b/xen/arch/x86/sysctl.c index 6ba823ca69..e726eee974 100644 --- a/xen/arch/x86/sysctl.c +++ b/xen/arch/x86/sysctl.c @@ -23,6 +23,7 @@ #include <asm/hvm/hvm.h> #include <asm/hvm/support.h> #include <asm/processor.h> +#include <asm/setup.h> #include <asm/smp.h> #include <asm/numa.h> #include <xen/nodemask.h> @@ -48,14 +49,27 @@ static void l3_cache_get(void *arg) long cpu_up_helper(void *data) { - int cpu = (unsigned long)data; + unsigned int cpu = (unsigned long)data; int ret = cpu_up(cpu); + if ( ret == -EBUSY ) { /* On EBUSY, flush RCU work and have one more go. */ rcu_barrier(); ret = cpu_up(cpu); } + + if ( !ret && !opt_smt && + cpu_data[cpu].compute_unit_id == INVALID_CUID && + cpumask_weight(per_cpu(cpu_sibling_mask, cpu)) > 1 ) + { + ret = cpu_down_helper(data); + if ( ret ) + printk("Could not re-offline CPU%u (%d)\n", cpu, ret); + else + ret = -EPERM; + } + return ret; } diff --git a/xen/include/asm-x86/setup.h b/xen/include/asm-x86/setup.h index b68ec9de4d..ecfd0c2e7b 100644 --- a/xen/include/asm-x86/setup.h +++ b/xen/include/asm-x86/setup.h @@ -66,6 +66,8 @@ extern uint8_t kbd_shift_flags; extern unsigned long highmem_start; #endif +extern int8_t opt_smt; + #ifdef CONFIG_SHADOW_PAGING extern bool opt_dom0_shadow; #else -- generated by git-patchbot for /home/xen/git/xen.git#staging-4.10 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |