[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging] x86/spec-ctrl: Rework spec_ctrl_flags context switching
commit 5796912f7279d9348a3166655588d30eae9f72cc Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> AuthorDate: Fri Jul 1 15:59:40 2022 +0100 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Tue Jul 12 16:23:00 2022 +0100 x86/spec-ctrl: Rework spec_ctrl_flags context switching We are shortly going to need to context switch new bits in both the vcpu and S3 paths. Introduce SCF_IST_MASK and SCF_DOM_MASK, and rework d->arch.verw into d->arch.spec_ctrl_flags to accommodate. No functional change. This is part of XSA-407. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> --- xen/arch/x86/acpi/power.c | 8 ++++---- xen/arch/x86/domain.c | 8 ++++---- xen/arch/x86/include/asm/domain.h | 3 +-- xen/arch/x86/include/asm/spec_ctrl.h | 30 +++++++++++++++++++++++++++++- xen/arch/x86/include/asm/spec_ctrl_asm.h | 3 --- xen/arch/x86/spec_ctrl.c | 9 ++++++--- 6 files changed, 44 insertions(+), 17 deletions(-) diff --git a/xen/arch/x86/acpi/power.c b/xen/arch/x86/acpi/power.c index c4e7e86989..1bb4d78392 100644 --- a/xen/arch/x86/acpi/power.c +++ b/xen/arch/x86/acpi/power.c @@ -248,8 +248,8 @@ static int enter_state(u32 state) error = 0; ci = get_cpu_info(); - /* Avoid NMI/#MC using MSR_SPEC_CTRL until we've reloaded microcode. */ - ci->spec_ctrl_flags &= ~SCF_ist_wrmsr; + /* Avoid NMI/#MC using unsafe MSRs until we've reloaded microcode. */ + ci->spec_ctrl_flags &= ~SCF_IST_MASK; ACPI_FLUSH_CPU_CACHE(); @@ -292,8 +292,8 @@ static int enter_state(u32 state) if ( !recheck_cpu_features(0) ) panic("Missing previously available feature(s)\n"); - /* Re-enabled default NMI/#MC use of MSR_SPEC_CTRL. */ - ci->spec_ctrl_flags |= (default_spec_ctrl_flags & SCF_ist_wrmsr); + /* Re-enabled default NMI/#MC use of MSRs now microcode is loaded. */ + ci->spec_ctrl_flags |= (default_spec_ctrl_flags & SCF_IST_MASK); if ( boot_cpu_has(X86_FEATURE_IBRSB) || boot_cpu_has(X86_FEATURE_IBRS) ) { diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 408ee284ed..21dbf7b822 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -2123,10 +2123,10 @@ void context_switch(struct vcpu *prev, struct vcpu *next) } } - /* Update the top-of-stack block with the VERW disposition. */ - info->spec_ctrl_flags &= ~SCF_verw; - if ( nextd->arch.verw ) - info->spec_ctrl_flags |= SCF_verw; + /* Update the top-of-stack block with the new spec_ctrl settings. */ + info->spec_ctrl_flags = + (info->spec_ctrl_flags & ~SCF_DOM_MASK) | + (nextd->arch.spec_ctrl_flags & SCF_DOM_MASK); } sched_context_switched(prev, next); diff --git a/xen/arch/x86/include/asm/domain.h b/xen/arch/x86/include/asm/domain.h index ad01ee68e1..4e59ca8c4e 100644 --- a/xen/arch/x86/include/asm/domain.h +++ b/xen/arch/x86/include/asm/domain.h @@ -324,8 +324,7 @@ struct arch_domain uint32_t pci_cf8; uint8_t cmos_idx; - /* Use VERW on return-to-guest for its flushing side effect. */ - bool verw; + uint8_t spec_ctrl_flags; /* See SCF_DOM_MASK */ union { struct pv_domain pv; diff --git a/xen/arch/x86/include/asm/spec_ctrl.h b/xen/arch/x86/include/asm/spec_ctrl.h index 7e83e0179f..3cd72e4030 100644 --- a/xen/arch/x86/include/asm/spec_ctrl.h +++ b/xen/arch/x86/include/asm/spec_ctrl.h @@ -20,12 +20,40 @@ #ifndef __X86_SPEC_CTRL_H__ #define __X86_SPEC_CTRL_H__ -/* Encoding of cpuinfo.spec_ctrl_flags */ +/* + * Encoding of: + * cpuinfo.spec_ctrl_flags + * default_spec_ctrl_flags + * domain.spec_ctrl_flags + * + * Live settings are in the top-of-stack block, because they need to be + * accessable when XPTI is active. Some settings are fixed from boot, some + * context switched per domain, and some inhibited in the S3 path. + */ #define SCF_use_shadow (1 << 0) #define SCF_ist_wrmsr (1 << 1) #define SCF_ist_rsb (1 << 2) #define SCF_verw (1 << 3) +/* + * The IST paths (NMI/#MC) can interrupt any arbitrary context. Some + * functionality requires updated microcode to work. + * + * On boot, this is easy; we load microcode before figuring out which + * speculative protections to apply. However, on the S3 resume path, we must + * be able to disable the configured mitigations until microcode is reloaded. + * + * These are the controls to inhibit on the S3 resume path until microcode has + * been reloaded. + */ +#define SCF_IST_MASK (SCF_ist_wrmsr) + +/* + * Some speculative protections are per-domain. These settings are merged + * into the top-of-stack block in the context switch path. + */ +#define SCF_DOM_MASK (SCF_verw) + #ifndef __ASSEMBLY__ #include <asm/alternative.h> diff --git a/xen/arch/x86/include/asm/spec_ctrl_asm.h b/xen/arch/x86/include/asm/spec_ctrl_asm.h index 5a590bac44..66b00d511f 100644 --- a/xen/arch/x86/include/asm/spec_ctrl_asm.h +++ b/xen/arch/x86/include/asm/spec_ctrl_asm.h @@ -248,9 +248,6 @@ /* * Use in IST interrupt/exception context. May interrupt Xen or PV context. - * Fine grain control of SCF_ist_wrmsr is needed for safety in the S3 resume - * path to avoid using MSR_SPEC_CTRL before the microcode introducing it has - * been reloaded. */ .macro SPEC_CTRL_ENTRY_FROM_INTR_IST /* diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c index 328862bdf5..97b0272ecc 100644 --- a/xen/arch/x86/spec_ctrl.c +++ b/xen/arch/x86/spec_ctrl.c @@ -1011,9 +1011,12 @@ void spec_ctrl_init_domain(struct domain *d) { bool pv = is_pv_domain(d); - d->arch.verw = - (pv ? opt_md_clear_pv : opt_md_clear_hvm) || - (opt_fb_clear_mmio && is_iommu_enabled(d)); + bool verw = ((pv ? opt_md_clear_pv : opt_md_clear_hvm) || + (opt_fb_clear_mmio && is_iommu_enabled(d))); + + d->arch.spec_ctrl_flags = + (verw ? SCF_verw : 0) | + 0; } void __init init_speculation_mitigations(void) -- generated by git-patchbot for /home/xen/git/xen.git#staging
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |