[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen staging] x86/spec-ctrl: Mitigations for LazyFPU
commit 243435bf67e8159495194f623b9e4d8c90140384 Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> AuthorDate: Thu Jun 7 17:00:37 2018 +0100 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Wed Jun 13 21:45:17 2018 +0100 x86/spec-ctrl: Mitigations for LazyFPU Intel Core processors since at least Nehalem speculate past #NM, which is the mechanism by which lazy FPU context switching is implemented. On affected processors, Xen must use fully eager FPU context switching to prevent guests from being able to read FPU state (SSE/AVX/etc) from previously scheduled vcpus. This is part of XSA-267 / CVE-2018-3665 Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> --- docs/misc/xen-command-line.markdown | 7 ++- xen/arch/x86/i387.c | 3 ++ xen/arch/x86/spec_ctrl.c | 96 +++++++++++++++++++++++++++++++++++-- xen/include/asm-x86/spec_ctrl.h | 1 + 4 files changed, 102 insertions(+), 5 deletions(-) diff --git a/docs/misc/xen-command-line.markdown b/docs/misc/xen-command-line.markdown index 8712a833a2..075e5ea159 100644 --- a/docs/misc/xen-command-line.markdown +++ b/docs/misc/xen-command-line.markdown @@ -1758,7 +1758,7 @@ false disable the quirk workaround, which is also the default. ### spec-ctrl (x86) > `= List of [ <bool>, xen=<bool>, {pv,hvm,msr-sc,rsb}=<bool>, -> bti-thunk=retpoline|lfence|jmp, {ibrs,ibpb,ssbd}=<bool> ]` +> bti-thunk=retpoline|lfence|jmp, {ibrs,ibpb,ssbd,eager-fpu}=<bool> ]` Controls for speculative execution sidechannel mitigations. By default, Xen will pick the most appropriate mitigations based on compiled in support, @@ -1808,6 +1808,11 @@ hardware, this is a global option applied at boot, and not virtualised for guest use. On Intel hardware, the feature is virtualised for guests, independently of Xen's choice of setting. +On all hardware, the `eager-fpu=` option can be used to force or prevent Xen +from using fully eager FPU context switches. This is currently implemented as +a global control. By default, Xen will choose to use fully eager context +switches on hardware believed to speculate past #NM exceptions. + ### sync\_console > `= <boolean>` diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c index 50116d576f..dbdf1b42bd 100644 --- a/xen/arch/x86/i387.c +++ b/xen/arch/x86/i387.c @@ -15,6 +15,7 @@ #include <asm/i387.h> #include <asm/xstate.h> #include <asm/asm_defns.h> +#include <asm/spec_ctrl.h> /*******************************/ /* FPU Restore Functions */ @@ -307,6 +308,8 @@ int vcpu_init_fpu(struct vcpu *v) { int rc; + v->arch.fully_eager_fpu = opt_eager_fpu; + if ( (rc = xstate_alloc_save_area(v)) != 0 ) return rc; diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c index fd938c33a6..08e6784c4c 100644 --- a/xen/arch/x86/spec_ctrl.c +++ b/xen/arch/x86/spec_ctrl.c @@ -44,6 +44,7 @@ static enum ind_thunk { static int8_t __initdata opt_ibrs = -1; bool __read_mostly opt_ibpb = true; bool __read_mostly opt_ssbd = false; +int8_t __read_mostly opt_eager_fpu = -1; bool __initdata bsp_delay_spec_ctrl; uint8_t __read_mostly default_xen_spec_ctrl; @@ -130,6 +131,7 @@ static int __init parse_spec_ctrl(const char *s) opt_thunk = THUNK_JMP; opt_ibrs = 0; opt_ibpb = false; + opt_eager_fpu = 0; } else if ( val > 0 ) rc = -EINVAL; @@ -183,6 +185,8 @@ static int __init parse_spec_ctrl(const char *s) opt_ibpb = val; else if ( (val = parse_boolean("ssbd", s, ss)) >= 0 ) opt_ssbd = val; + else if ( (val = parse_boolean("eager-fpu", s, ss)) >= 0 ) + opt_eager_fpu = val; else rc = -EINVAL; @@ -236,15 +240,19 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps) * Alternatives blocks for protecting against and/or virtualising * mitigation support for guests. */ - printk(" Support for VMs: PV:%s%s%s, HVM:%s%s%s\n", + printk(" Support for VMs: PV:%s%s%s%s, HVM:%s%s%s%s\n", (boot_cpu_has(X86_FEATURE_SC_MSR_PV) || - boot_cpu_has(X86_FEATURE_SC_RSB_PV)) ? "" : " None", + boot_cpu_has(X86_FEATURE_SC_RSB_PV) || + opt_eager_fpu) ? "" : " None", boot_cpu_has(X86_FEATURE_SC_MSR_PV) ? " MSR_SPEC_CTRL" : "", boot_cpu_has(X86_FEATURE_SC_RSB_PV) ? " RSB" : "", + opt_eager_fpu ? " EAGER_FPU" : "", (boot_cpu_has(X86_FEATURE_SC_MSR_HVM) || - boot_cpu_has(X86_FEATURE_SC_RSB_HVM)) ? "" : " None", + boot_cpu_has(X86_FEATURE_SC_RSB_HVM) || + opt_eager_fpu) ? "" : " None", boot_cpu_has(X86_FEATURE_SC_MSR_HVM) ? " MSR_SPEC_CTRL" : "", - boot_cpu_has(X86_FEATURE_SC_RSB_HVM) ? " RSB" : ""); + boot_cpu_has(X86_FEATURE_SC_RSB_HVM) ? " RSB" : "", + opt_eager_fpu ? " EAGER_FPU" : ""); printk(" XPTI (64-bit PV only): Dom0 %s, DomU %s\n", opt_xpti & OPT_XPTI_DOM0 ? "enabled" : "disabled", @@ -334,6 +342,82 @@ static bool __init retpoline_safe(uint64_t caps) } } +/* Calculate whether this CPU speculates past #NM */ +static bool __init should_use_eager_fpu(void) +{ + /* + * Assume all unrecognised processors are ok. This is only known to + * affect Intel Family 6 processors. + */ + if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || + boot_cpu_data.x86 != 6 ) + return false; + + switch ( boot_cpu_data.x86_model ) + { + /* + * Core processors since at least Nehalem are vulnerable. + */ + case 0x1e: /* Nehalem */ + case 0x1f: /* Auburndale / Havendale */ + case 0x1a: /* Nehalem EP */ + case 0x2e: /* Nehalem EX */ + case 0x25: /* Westmere */ + case 0x2c: /* Westmere EP */ + case 0x2f: /* Westmere EX */ + case 0x2a: /* SandyBridge */ + case 0x2d: /* SandyBridge EP/EX */ + case 0x3a: /* IvyBridge */ + case 0x3e: /* IvyBridge EP/EX */ + case 0x3c: /* Haswell */ + case 0x3f: /* Haswell EX/EP */ + case 0x45: /* Haswell D */ + case 0x46: /* Haswell H */ + case 0x3d: /* Broadwell */ + case 0x47: /* Broadwell H */ + case 0x4f: /* Broadwell EP/EX */ + case 0x56: /* Broadwell D */ + case 0x4e: /* Skylake M */ + case 0x55: /* Skylake X */ + case 0x5e: /* Skylake D */ + case 0x66: /* Cannonlake */ + case 0x67: /* Cannonlake? */ + case 0x8e: /* Kabylake M */ + case 0x9e: /* Kabylake D */ + return true; + + /* + * Atom processors are not vulnerable. + */ + case 0x1c: /* Pineview */ + case 0x26: /* Lincroft */ + case 0x27: /* Penwell */ + case 0x35: /* Cloverview */ + case 0x36: /* Cedarview */ + case 0x37: /* Baytrail / Valleyview (Silvermont) */ + case 0x4d: /* Avaton / Rangely (Silvermont) */ + case 0x4c: /* Cherrytrail / Brasswell */ + case 0x4a: /* Merrifield */ + case 0x5a: /* Moorefield */ + case 0x5c: /* Goldmont */ + case 0x5f: /* Denverton */ + case 0x7a: /* Gemini Lake */ + return false; + + /* + * Knights processors are not vulnerable. + */ + case 0x57: /* Knights Landing */ + case 0x85: /* Knights Mill */ + return false; + + default: + printk("Unrecognised CPU model %#x - assuming vulnerable to LazyFPU\n", + boot_cpu_data.x86_model); + return true; + } +} + #define OPT_XPTI_DEFAULT 0xff uint8_t __read_mostly opt_xpti = OPT_XPTI_DEFAULT; @@ -530,6 +614,10 @@ void __init init_speculation_mitigations(void) if ( !boot_cpu_has(X86_FEATURE_IBRSB) && !boot_cpu_has(X86_FEATURE_IBPB) ) opt_ibpb = false; + /* Check whether Eager FPU should be enabled by default. */ + if ( opt_eager_fpu == -1 ) + opt_eager_fpu = should_use_eager_fpu(); + /* (Re)init BSP state now that default_spec_ctrl_flags has been calculated. */ init_shadow_spec_ctrl_state(); diff --git a/xen/include/asm-x86/spec_ctrl.h b/xen/include/asm-x86/spec_ctrl.h index 91bed1b476..5b40afbab0 100644 --- a/xen/include/asm-x86/spec_ctrl.h +++ b/xen/include/asm-x86/spec_ctrl.h @@ -28,6 +28,7 @@ void init_speculation_mitigations(void); extern bool opt_ibpb; extern bool opt_ssbd; +extern int8_t opt_eager_fpu; extern bool bsp_delay_spec_ctrl; extern uint8_t default_xen_spec_ctrl; -- generated by git-patchbot for /home/xen/git/xen.git#staging _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |