[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 03/10] x86/spec_ctrl: Merge bti_ist_info and use_shadow_spec_ctrl into spec_ctrl_flags
All 3 bits of information here are control flags for the entry/exit code behaviour. Treat them as such, rather than having two different variables. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Wei Liu <wei.liu2@xxxxxxxxxx> CC: Roger Pau Monné <roger.pau@xxxxxxxxxx> CC: Juergen Gross <jgross@xxxxxxxx> --- xen/arch/x86/acpi/power.c | 4 ++-- xen/arch/x86/spec_ctrl.c | 10 ++++---- xen/arch/x86/x86_64/asm-offsets.c | 3 +-- xen/include/asm-x86/current.h | 3 +-- xen/include/asm-x86/spec_ctrl.h | 10 ++++---- xen/include/asm-x86/spec_ctrl_asm.h | 46 ++++++++++++++++++++----------------- 6 files changed, 40 insertions(+), 36 deletions(-) diff --git a/xen/arch/x86/acpi/power.c b/xen/arch/x86/acpi/power.c index bb0d095..a704c7c 100644 --- a/xen/arch/x86/acpi/power.c +++ b/xen/arch/x86/acpi/power.c @@ -215,7 +215,7 @@ static int enter_state(u32 state) ci = get_cpu_info(); spec_ctrl_enter_idle(ci); /* Avoid NMI/#MC using MSR_SPEC_CTRL until we've reloaded microcode. */ - ci->bti_ist_info = 0; + ci->spec_ctrl_flags &= ~SCF_ist_wrmsr; ACPI_FLUSH_CPU_CACHE(); @@ -259,7 +259,7 @@ static int enter_state(u32 state) panic("Missing previously available feature(s)."); /* Re-enabled default NMI/#MC use of MSR_SPEC_CTRL. */ - ci->bti_ist_info = default_bti_ist_info; + ci->spec_ctrl_flags |= (default_spec_ctrl_flags & SCF_ist_wrmsr); spec_ctrl_exit_idle(ci); done: diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c index 6633c64..1ad3ff5 100644 --- a/xen/arch/x86/spec_ctrl.c +++ b/xen/arch/x86/spec_ctrl.c @@ -39,7 +39,7 @@ static bool __initdata opt_rsb_native = true; static bool __initdata opt_rsb_vmexit = true; bool __read_mostly opt_ibpb = true; uint8_t __read_mostly default_xen_spec_ctrl; -uint8_t __read_mostly default_bti_ist_info; +uint8_t __read_mostly default_spec_ctrl_flags; static int __init parse_bti(const char *s) { @@ -374,7 +374,7 @@ void __init init_speculation_mitigations(void) else setup_force_cpu_cap(X86_FEATURE_XEN_IBRS_CLEAR); - default_bti_ist_info |= BTI_IST_WRMSR; + default_spec_ctrl_flags |= SCF_ist_wrmsr; } /* @@ -393,7 +393,7 @@ void __init init_speculation_mitigations(void) if ( opt_rsb_native ) { setup_force_cpu_cap(X86_FEATURE_RSB_NATIVE); - default_bti_ist_info |= BTI_IST_RSB; + default_spec_ctrl_flags |= SCF_ist_rsb; } /* @@ -407,7 +407,7 @@ void __init init_speculation_mitigations(void) if ( !boot_cpu_has(X86_FEATURE_IBRSB) && !boot_cpu_has(X86_FEATURE_IBPB) ) opt_ibpb = false; - /* (Re)init BSP state now that default_bti_ist_info has been calculated. */ + /* (Re)init BSP state now that default_spec_ctrl_flags has been calculated. */ init_shadow_spec_ctrl_state(); xpti_init_default(false); @@ -421,6 +421,8 @@ void __init init_speculation_mitigations(void) static void __init __maybe_unused build_assertions(void) { + /* The optimised assembly relies on this alias. */ + BUILD_BUG_ON(SCF_use_shadow != 1); } /* diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c index f80d3b7..5957c76 100644 --- a/xen/arch/x86/x86_64/asm-offsets.c +++ b/xen/arch/x86/x86_64/asm-offsets.c @@ -135,8 +135,7 @@ void __dummy__(void) OFFSET(CPUINFO_pv_cr3, struct cpu_info, pv_cr3); OFFSET(CPUINFO_shadow_spec_ctrl, struct cpu_info, shadow_spec_ctrl); OFFSET(CPUINFO_xen_spec_ctrl, struct cpu_info, xen_spec_ctrl); - OFFSET(CPUINFO_use_shadow_spec_ctrl, struct cpu_info, use_shadow_spec_ctrl); - OFFSET(CPUINFO_bti_ist_info, struct cpu_info, bti_ist_info); + OFFSET(CPUINFO_spec_ctrl_flags, struct cpu_info, spec_ctrl_flags); OFFSET(CPUINFO_root_pgt_changed, struct cpu_info, root_pgt_changed); OFFSET(CPUINFO_use_pv_cr3, struct cpu_info, use_pv_cr3); DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info)); diff --git a/xen/include/asm-x86/current.h b/xen/include/asm-x86/current.h index 200e935..5bd64b2 100644 --- a/xen/include/asm-x86/current.h +++ b/xen/include/asm-x86/current.h @@ -55,8 +55,7 @@ struct cpu_info { /* See asm-x86/spec_ctrl_asm.h for usage. */ unsigned int shadow_spec_ctrl; uint8_t xen_spec_ctrl; - bool use_shadow_spec_ctrl; - uint8_t bti_ist_info; + uint8_t spec_ctrl_flags; /* * The following field controls copying of the L4 page table of 64-bit diff --git a/xen/include/asm-x86/spec_ctrl.h b/xen/include/asm-x86/spec_ctrl.h index 0c7663a..d5bd4df 100644 --- a/xen/include/asm-x86/spec_ctrl.h +++ b/xen/include/asm-x86/spec_ctrl.h @@ -28,7 +28,7 @@ void init_speculation_mitigations(void); extern bool opt_ibpb; extern uint8_t default_xen_spec_ctrl; -extern uint8_t default_bti_ist_info; +extern uint8_t default_spec_ctrl_flags; extern uint8_t opt_xpti; #define OPT_XPTI_DOM0 0x01 @@ -38,9 +38,9 @@ static inline void init_shadow_spec_ctrl_state(void) { struct cpu_info *info = get_cpu_info(); - info->shadow_spec_ctrl = info->use_shadow_spec_ctrl = 0; + info->shadow_spec_ctrl = 0; info->xen_spec_ctrl = default_xen_spec_ctrl; - info->bti_ist_info = default_bti_ist_info; + info->spec_ctrl_flags = default_spec_ctrl_flags; } /* WARNING! `ret`, `call *`, `jmp *` not safe after this call. */ @@ -54,7 +54,7 @@ static always_inline void spec_ctrl_enter_idle(struct cpu_info *info) */ info->shadow_spec_ctrl = val; barrier(); - info->use_shadow_spec_ctrl = true; + info->spec_ctrl_flags |= SCF_use_shadow; barrier(); asm volatile ( ALTERNATIVE(ASM_NOP3, "wrmsr", X86_FEATURE_XEN_IBRS_SET) :: "a" (val), "c" (MSR_SPEC_CTRL), "d" (0) : "memory" ); @@ -69,7 +69,7 @@ static always_inline void spec_ctrl_exit_idle(struct cpu_info *info) * Disable shadowing before updating the MSR. There are no SMP issues * here; only local processor ordering concerns. */ - info->use_shadow_spec_ctrl = false; + info->spec_ctrl_flags &= ~SCF_use_shadow; barrier(); asm volatile ( ALTERNATIVE(ASM_NOP3, "wrmsr", X86_FEATURE_XEN_IBRS_SET) :: "a" (val), "c" (MSR_SPEC_CTRL), "d" (0) : "memory" ); diff --git a/xen/include/asm-x86/spec_ctrl_asm.h b/xen/include/asm-x86/spec_ctrl_asm.h index e8e8f9a..97da08b 100644 --- a/xen/include/asm-x86/spec_ctrl_asm.h +++ b/xen/include/asm-x86/spec_ctrl_asm.h @@ -20,9 +20,10 @@ #ifndef __X86_SPEC_CTRL_ASM_H__ #define __X86_SPEC_CTRL_ASM_H__ -/* Encoding of the bottom bits in cpuinfo.bti_ist_info */ -#define BTI_IST_WRMSR (1 << 1) -#define BTI_IST_RSB (1 << 2) +/* Encoding of cpuinfo.spec_ctrl_flags */ +#define SCF_use_shadow (1 << 0) +#define SCF_ist_wrmsr (1 << 1) +#define SCF_ist_rsb (1 << 2) #ifdef __ASSEMBLY__ #include <asm/msr-index.h> @@ -49,20 +50,20 @@ * after VMEXIT. The VMEXIT-specific code reads MSR_SPEC_CTRL and updates * current before loading Xen's MSR_SPEC_CTRL setting. * - * Factor 2 is harder. We maintain a shadow_spec_ctrl value, and - * use_shadow_spec_ctrl boolean per cpu. The synchronous use is: + * Factor 2 is harder. We maintain a shadow_spec_ctrl value, and a use_shadow + * boolean in the per cpu spec_ctrl_flags. The synchronous use is: * * 1) Store guest value in shadow_spec_ctrl - * 2) Set use_shadow_spec_ctrl boolean + * 2) Set the use_shadow boolean * 3) Load guest value into MSR_SPEC_CTRL * 4) Exit to guest * 5) Entry from guest - * 6) Clear use_shadow_spec_ctrl boolean + * 6) Clear the use_shadow boolean * 7) Load Xen's value into MSR_SPEC_CTRL * * The asynchronous use for interrupts/exceptions is: * - Set/clear IBRS on entry to Xen - * - On exit to Xen, check use_shadow_spec_ctrl + * - On exit to Xen, check use_shadow * - If set, load shadow_spec_ctrl * * Therefore, an interrupt/exception which hits the synchronous path between @@ -134,7 +135,7 @@ xor %edx, %edx /* Clear SPEC_CTRL shadowing *before* loading Xen's value. */ - movb %dl, CPUINFO_use_shadow_spec_ctrl(%rsp) + andb $~SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp) /* Load Xen's intended value. */ mov $\ibrs_val, %eax @@ -160,12 +161,14 @@ * block so calculate the position directly. */ .if \maybexen + xor %eax, %eax /* Branchless `if ( !xen ) clear_shadowing` */ testb $3, UREGS_cs(%rsp) - setz %al - and %al, STACK_CPUINFO_FIELD(use_shadow_spec_ctrl)(%r14) + setnz %al + not %eax + and %al, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14) .else - movb %dl, CPUINFO_use_shadow_spec_ctrl(%rsp) + andb $~SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp) .endif /* Load Xen's intended value. */ @@ -184,8 +187,8 @@ */ xor %edx, %edx - cmpb %dl, STACK_CPUINFO_FIELD(use_shadow_spec_ctrl)(%rbx) - je .L\@_skip + testb $SCF_use_shadow, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%rbx) + jz .L\@_skip mov STACK_CPUINFO_FIELD(shadow_spec_ctrl)(%rbx), %eax mov $MSR_SPEC_CTRL, %ecx @@ -206,7 +209,7 @@ mov %eax, CPUINFO_shadow_spec_ctrl(%rsp) /* Set SPEC_CTRL shadowing *before* loading the guest value. */ - movb $1, CPUINFO_use_shadow_spec_ctrl(%rsp) + orb $SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp) mov $MSR_SPEC_CTRL, %ecx xor %edx, %edx @@ -265,22 +268,23 @@ * This is logical merge of DO_OVERWRITE_RSB and DO_SPEC_CTRL_ENTRY * maybexen=1, but with conditionals rather than alternatives. */ - movzbl STACK_CPUINFO_FIELD(bti_ist_info)(%r14), %eax + movzbl STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14), %eax - testb $BTI_IST_RSB, %al + test $SCF_ist_rsb, %al jz .L\@_skip_rsb DO_OVERWRITE_RSB tmp=rdx /* Clobbers %rcx/%rdx */ .L\@_skip_rsb: - testb $BTI_IST_WRMSR, %al + test $SCF_ist_wrmsr, %al jz .L\@_skip_wrmsr xor %edx, %edx testb $3, UREGS_cs(%rsp) - setz %dl - and %dl, STACK_CPUINFO_FIELD(use_shadow_spec_ctrl)(%r14) + setnz %dl + not %edx + and %dl, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14) /* Load Xen's intended value. */ mov $MSR_SPEC_CTRL, %ecx @@ -307,7 +311,7 @@ UNLIKELY_DISPATCH_LABEL(\@_serialise): * Requires %rbx=stack_end * Clobbers %rax, %rcx, %rdx */ - testb $BTI_IST_WRMSR, STACK_CPUINFO_FIELD(bti_ist_info)(%rbx) + testb $SCF_ist_wrmsr, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%rbx) jz .L\@_skip DO_SPEC_CTRL_EXIT_TO_XEN -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |