[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen stable-4.8] x86/spec-ctrl: Infrastructure to use VERW to flush pipeline buffers
commit a95a1035f4e8ca5a24b33d72d680d50fff246500 Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> AuthorDate: Wed Dec 12 19:22:15 2018 +0000 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Tue May 14 15:48:54 2019 +0100 x86/spec-ctrl: Infrastructure to use VERW to flush pipeline buffers Three synthetic features are introduced, as we need individual control of each, depending on circumstances. A later change will enable them at appropriate points. The verw_sel field doesn't strictly need to live in struct cpu_info. It lives there because there is a convenient hole it can fill, and it reduces the complexity of the SPEC_CTRL_EXIT_TO_{PV,HVM} assembly by avoiding the need for any temporary stack maintenance. This is part of XSA-297, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> (cherry picked from commit 548a932ac786d6bf3584e4b54f2ab993e1117710) --- xen/arch/x86/x86_64/asm-offsets.c | 1 + xen/include/asm-x86/cpufeature.h | 3 +++ xen/include/asm-x86/current.h | 1 + xen/include/asm-x86/spec_ctrl.h | 34 ++++++++++++++++++++++++++++++++++ xen/include/asm-x86/spec_ctrl_asm.h | 10 ++++++++-- 5 files changed, 47 insertions(+), 2 deletions(-) diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c index a1706739ff..4b7bc407f8 100644 --- a/xen/arch/x86/x86_64/asm-offsets.c +++ b/xen/arch/x86/x86_64/asm-offsets.c @@ -137,6 +137,7 @@ void __dummy__(void) OFFSET(CPUINFO_guest_cpu_user_regs, struct cpu_info, guest_cpu_user_regs); OFFSET(CPUINFO_processor_id, struct cpu_info, processor_id); + OFFSET(CPUINFO_verw_sel, struct cpu_info, verw_sel); OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu); OFFSET(CPUINFO_cr4, struct cpu_info, cr4); OFFSET(CPUINFO_xen_cr3, struct cpu_info, xen_cr3); diff --git a/xen/include/asm-x86/cpufeature.h b/xen/include/asm-x86/cpufeature.h index 370e89e281..6057d95404 100644 --- a/xen/include/asm-x86/cpufeature.h +++ b/xen/include/asm-x86/cpufeature.h @@ -32,6 +32,9 @@ XEN_CPUFEATURE(SC_RSB_HVM, (FSCAPINTS+0)*32+21) /* RSB overwrite needed for XEN_CPUFEATURE(NO_XPTI, (FSCAPINTS+0)*32+22) /* XPTI mitigation not in use */ XEN_CPUFEATURE(SC_MSR_IDLE, (FSCAPINTS+0)*32+23) /* (SC_MSR_PV || SC_MSR_HVM) && default_xen_spec_ctrl */ XEN_CPUFEATURE(XEN_LBR, (FSCAPINTS+0)*32+24) /* Xen uses MSR_DEBUGCTL.LBR */ +XEN_CPUFEATURE(SC_VERW_PV, (FSCAPINTS+0)*32+25) /* VERW used by Xen for PV */ +XEN_CPUFEATURE(SC_VERW_HVM, (FSCAPINTS+0)*32+26) /* VERW used by Xen for HVM */ +XEN_CPUFEATURE(SC_VERW_IDLE, (FSCAPINTS+0)*32+27) /* VERW used by Xen for idle */ #define NCAPINTS (FSCAPINTS + 1) /* N 32-bit words worth of info */ diff --git a/xen/include/asm-x86/current.h b/xen/include/asm-x86/current.h index 1c2799d380..adecb5c975 100644 --- a/xen/include/asm-x86/current.h +++ b/xen/include/asm-x86/current.h @@ -39,6 +39,7 @@ struct vcpu; struct cpu_info { struct cpu_user_regs guest_cpu_user_regs; unsigned int processor_id; + unsigned int verw_sel; struct vcpu *current_vcpu; unsigned long per_cpu_offset; unsigned long cr4; diff --git a/xen/include/asm-x86/spec_ctrl.h b/xen/include/asm-x86/spec_ctrl.h index 4983071901..98a0a504f6 100644 --- a/xen/include/asm-x86/spec_ctrl.h +++ b/xen/include/asm-x86/spec_ctrl.h @@ -53,6 +53,13 @@ static inline void init_shadow_spec_ctrl_state(void) info->shadow_spec_ctrl = 0; info->xen_spec_ctrl = default_xen_spec_ctrl; info->spec_ctrl_flags = default_spec_ctrl_flags; + + /* + * For least latency, the VERW selector should be a writeable data + * descriptor resident in the cache. __HYPERVISOR_DS32 shares a cache + * line with __HYPERVISOR_CS, so is expected to be very cache-hot. + */ + info->verw_sel = __HYPERVISOR_DS32; } /* WARNING! `ret`, `call *`, `jmp *` not safe after this call. */ @@ -73,6 +80,22 @@ static always_inline void spec_ctrl_enter_idle(struct cpu_info *info) alternative_input(ASM_NOP3, "wrmsr", X86_FEATURE_SC_MSR_IDLE, "a" (val), "c" (MSR_SPEC_CTRL), "d" (0)); barrier(); + + /* + * Microarchitectural Store Buffer Data Sampling: + * + * On vulnerable systems, store buffer entries are statically partitioned + * between active threads. When entering idle, our store buffer entries + * are re-partitioned to allow the other threads to use them. + * + * Flush the buffers to ensure that no sensitive data of ours can be + * leaked by a sibling after it gets our store buffer entries. + * + * Note: VERW must be encoded with a memory operand, as it is only that + * form which causes a flush. + */ + alternative_input(ASM_NOP8, "verw %[sel]", X86_FEATURE_SC_VERW_IDLE, + [sel] "m" (info->verw_sel)); } /* WARNING! `ret`, `call *`, `jmp *` not safe before this call. */ @@ -91,6 +114,17 @@ static always_inline void spec_ctrl_exit_idle(struct cpu_info *info) alternative_input(ASM_NOP3, "wrmsr", X86_FEATURE_SC_MSR_IDLE, "a" (val), "c" (MSR_SPEC_CTRL), "d" (0)); barrier(); + + /* + * Microarchitectural Store Buffer Data Sampling: + * + * On vulnerable systems, store buffer entries are statically partitioned + * between active threads. When exiting idle, the other threads store + * buffer entries are re-partitioned to give us some. + * + * We now have store buffer entries with stale data from sibling threads. + * A flush if necessary will be performed on the return to guest path. + */ } #endif /* !__X86_SPEC_CTRL_H__ */ diff --git a/xen/include/asm-x86/spec_ctrl_asm.h b/xen/include/asm-x86/spec_ctrl_asm.h index 4d864eb4ec..560306f3ab 100644 --- a/xen/include/asm-x86/spec_ctrl_asm.h +++ b/xen/include/asm-x86/spec_ctrl_asm.h @@ -247,12 +247,18 @@ /* Use when exiting to PV guest context. */ #define SPEC_CTRL_EXIT_TO_PV \ ALTERNATIVE __stringify(ASM_NOP24), \ - DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR_PV + DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR_PV; \ + ALTERNATIVE __stringify(ASM_NOP8), \ + __stringify(verw CPUINFO_verw_sel(%rsp)), \ + X86_FEATURE_SC_VERW_PV /* Use when exiting to HVM guest context. */ #define SPEC_CTRL_EXIT_TO_HVM \ ALTERNATIVE __stringify(ASM_NOP24), \ - DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR_HVM + DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR_HVM; \ + ALTERNATIVE __stringify(ASM_NOP8), \ + __stringify(verw CPUINFO_verw_sel(%rsp)), \ + X86_FEATURE_SC_VERW_HVM /* * Use in IST interrupt/exception context. May interrupt Xen or PV context. -- generated by git-patchbot for /home/xen/git/xen.git#stable-4.8 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |