[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen staging-4.8] x86/spec_ctrl: Merge bti_ist_info and use_shadow_spec_ctrl into spec_ctrl_flags
commit b0ea18ed5be3ad89a54da7f5ed15584cb315cd5d Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> AuthorDate: Tue May 29 10:05:07 2018 +0200 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Tue May 29 10:05:07 2018 +0200 x86/spec_ctrl: Merge bti_ist_info and use_shadow_spec_ctrl into spec_ctrl_flags All 3 bits of information here are control flags for the entry/exit code behaviour. Treat them as such, rather than having two different variables. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> master commit: 5262ba2e7799001402dfe139ff944e035dfff928 master date: 2018-05-16 12:19:10 +0100 --- xen/arch/x86/acpi/power.c | 4 +-- xen/arch/x86/spec_ctrl.c | 10 ++++--- xen/arch/x86/x86_64/asm-offsets.c | 3 +-- xen/include/asm-x86/current.h | 3 +-- xen/include/asm-x86/nops.h | 5 ++-- xen/include/asm-x86/spec_ctrl.h | 10 +++---- xen/include/asm-x86/spec_ctrl_asm.h | 52 ++++++++++++++++++++----------------- 7 files changed, 45 insertions(+), 42 deletions(-) diff --git a/xen/arch/x86/acpi/power.c b/xen/arch/x86/acpi/power.c index 6fc32e8694..733379e7d6 100644 --- a/xen/arch/x86/acpi/power.c +++ b/xen/arch/x86/acpi/power.c @@ -216,7 +216,7 @@ static int enter_state(u32 state) ci = get_cpu_info(); spec_ctrl_enter_idle(ci); /* Avoid NMI/#MC using MSR_SPEC_CTRL until we've reloaded microcode. */ - ci->bti_ist_info = 0; + ci->spec_ctrl_flags &= ~SCF_ist_wrmsr; ACPI_FLUSH_CPU_CACHE(); @@ -257,7 +257,7 @@ static int enter_state(u32 state) microcode_resume_cpu(0); /* Re-enabled default NMI/#MC use of MSR_SPEC_CTRL. */ - ci->bti_ist_info = default_bti_ist_info; + ci->spec_ctrl_flags |= (default_spec_ctrl_flags & SCF_ist_wrmsr); spec_ctrl_exit_idle(ci); done: diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c index 114352115a..2d69910c0f 100644 --- a/xen/arch/x86/spec_ctrl.c +++ b/xen/arch/x86/spec_ctrl.c @@ -39,7 +39,7 @@ static bool __initdata opt_rsb_native = true; static bool __initdata opt_rsb_vmexit = true; bool __read_mostly opt_ibpb = true; uint8_t __read_mostly default_xen_spec_ctrl; -uint8_t __read_mostly default_bti_ist_info; +uint8_t __read_mostly default_spec_ctrl_flags; static int __init parse_bti(const char *s) { @@ -293,7 +293,7 @@ void __init init_speculation_mitigations(void) else setup_force_cpu_cap(X86_FEATURE_XEN_IBRS_CLEAR); - default_bti_ist_info |= BTI_IST_WRMSR; + default_spec_ctrl_flags |= SCF_ist_wrmsr; } /* @@ -312,7 +312,7 @@ void __init init_speculation_mitigations(void) if ( opt_rsb_native ) { setup_force_cpu_cap(X86_FEATURE_RSB_NATIVE); - default_bti_ist_info |= BTI_IST_RSB; + default_spec_ctrl_flags |= SCF_ist_rsb; } /* @@ -326,7 +326,7 @@ void __init init_speculation_mitigations(void) if ( !boot_cpu_has(X86_FEATURE_IBRSB) && !boot_cpu_has(X86_FEATURE_IBPB) ) opt_ibpb = false; - /* (Re)init BSP state now that default_bti_ist_info has been calculated. */ + /* (Re)init BSP state now that default_spec_ctrl_flags has been calculated. */ init_shadow_spec_ctrl_state(); print_details(thunk, caps); @@ -334,6 +334,8 @@ void __init init_speculation_mitigations(void) static void __init __maybe_unused build_assertions(void) { + /* The optimised assembly relies on this alias. */ + BUILD_BUG_ON(SCF_use_shadow != 1); } /* diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c index e0aff2c25d..d939a13c11 100644 --- a/xen/arch/x86/x86_64/asm-offsets.c +++ b/xen/arch/x86/x86_64/asm-offsets.c @@ -143,8 +143,7 @@ void __dummy__(void) OFFSET(CPUINFO_pv_cr3, struct cpu_info, pv_cr3); OFFSET(CPUINFO_shadow_spec_ctrl, struct cpu_info, shadow_spec_ctrl); OFFSET(CPUINFO_xen_spec_ctrl, struct cpu_info, xen_spec_ctrl); - OFFSET(CPUINFO_use_shadow_spec_ctrl, struct cpu_info, use_shadow_spec_ctrl); - OFFSET(CPUINFO_bti_ist_info, struct cpu_info, bti_ist_info); + OFFSET(CPUINFO_spec_ctrl_flags, struct cpu_info, spec_ctrl_flags); DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info)); BLANK(); diff --git a/xen/include/asm-x86/current.h b/xen/include/asm-x86/current.h index fc0a2fb790..43aac0b8b6 100644 --- a/xen/include/asm-x86/current.h +++ b/xen/include/asm-x86/current.h @@ -58,8 +58,7 @@ struct cpu_info { /* See asm-x86/spec_ctrl_asm.h for usage. */ unsigned int shadow_spec_ctrl; uint8_t xen_spec_ctrl; - bool use_shadow_spec_ctrl; - uint8_t bti_ist_info; + uint8_t spec_ctrl_flags; unsigned long __pad; /* get_stack_bottom() must be 16-byte aligned */ diff --git a/xen/include/asm-x86/nops.h b/xen/include/asm-x86/nops.h index f00bd16a70..cab2badd5d 100644 --- a/xen/include/asm-x86/nops.h +++ b/xen/include/asm-x86/nops.h @@ -64,10 +64,9 @@ #define ASM_NOP8 _ASM_MK_NOP(K8_NOP8) #define ASM_NOP17 ASM_NOP8; ASM_NOP7; ASM_NOP2 -#define ASM_NOP21 ASM_NOP8; ASM_NOP8; ASM_NOP5 +#define ASM_NOP22 ASM_NOP8; ASM_NOP8; ASM_NOP6 #define ASM_NOP24 ASM_NOP8; ASM_NOP8; ASM_NOP8 -#define ASM_NOP29 ASM_NOP8; ASM_NOP8; ASM_NOP8; ASM_NOP5 -#define ASM_NOP32 ASM_NOP8; ASM_NOP8; ASM_NOP8; ASM_NOP8 +#define ASM_NOP33 ASM_NOP8; ASM_NOP8; ASM_NOP8; ASM_NOP7; ASM_NOP2 #define ASM_NOP40 ASM_NOP8; ASM_NOP8; ASM_NOP8; ASM_NOP8; ASM_NOP8 #define ASM_NOP_MAX 8 diff --git a/xen/include/asm-x86/spec_ctrl.h b/xen/include/asm-x86/spec_ctrl.h index 5e4fc84aec..059e29116f 100644 --- a/xen/include/asm-x86/spec_ctrl.h +++ b/xen/include/asm-x86/spec_ctrl.h @@ -28,15 +28,15 @@ void init_speculation_mitigations(void); extern bool opt_ibpb; extern uint8_t default_xen_spec_ctrl; -extern uint8_t default_bti_ist_info; +extern uint8_t default_spec_ctrl_flags; static inline void init_shadow_spec_ctrl_state(void) { struct cpu_info *info = get_cpu_info(); - info->shadow_spec_ctrl = info->use_shadow_spec_ctrl = 0; + info->shadow_spec_ctrl = 0; info->xen_spec_ctrl = default_xen_spec_ctrl; - info->bti_ist_info = default_bti_ist_info; + info->spec_ctrl_flags = default_spec_ctrl_flags; } /* WARNING! `ret`, `call *`, `jmp *` not safe after this call. */ @@ -50,7 +50,7 @@ static always_inline void spec_ctrl_enter_idle(struct cpu_info *info) */ info->shadow_spec_ctrl = val; barrier(); - info->use_shadow_spec_ctrl = true; + info->spec_ctrl_flags |= SCF_use_shadow; barrier(); asm volatile ( ALTERNATIVE(ASM_NOP3, "wrmsr", X86_FEATURE_XEN_IBRS_SET) :: "a" (val), "c" (MSR_SPEC_CTRL), "d" (0) : "memory" ); @@ -65,7 +65,7 @@ static always_inline void spec_ctrl_exit_idle(struct cpu_info *info) * Disable shadowing before updating the MSR. There are no SMP issues * here; only local processor ordering concerns. */ - info->use_shadow_spec_ctrl = false; + info->spec_ctrl_flags &= ~SCF_use_shadow; barrier(); asm volatile ( ALTERNATIVE(ASM_NOP3, "wrmsr", X86_FEATURE_XEN_IBRS_SET) :: "a" (val), "c" (MSR_SPEC_CTRL), "d" (0) : "memory" ); diff --git a/xen/include/asm-x86/spec_ctrl_asm.h b/xen/include/asm-x86/spec_ctrl_asm.h index 9c169454ef..582403a9d8 100644 --- a/xen/include/asm-x86/spec_ctrl_asm.h +++ b/xen/include/asm-x86/spec_ctrl_asm.h @@ -20,9 +20,10 @@ #ifndef __X86_SPEC_CTRL_ASM_H__ #define __X86_SPEC_CTRL_ASM_H__ -/* Encoding of the bottom bits in cpuinfo.bti_ist_info */ -#define BTI_IST_WRMSR (1 << 1) -#define BTI_IST_RSB (1 << 2) +/* Encoding of cpuinfo.spec_ctrl_flags */ +#define SCF_use_shadow (1 << 0) +#define SCF_ist_wrmsr (1 << 1) +#define SCF_ist_rsb (1 << 2) #ifdef __ASSEMBLY__ #include <asm/msr-index.h> @@ -49,20 +50,20 @@ * after VMEXIT. The VMEXIT-specific code reads MSR_SPEC_CTRL and updates * current before loading Xen's MSR_SPEC_CTRL setting. * - * Factor 2 is harder. We maintain a shadow_spec_ctrl value, and - * use_shadow_spec_ctrl boolean per cpu. The synchronous use is: + * Factor 2 is harder. We maintain a shadow_spec_ctrl value, and a use_shadow + * boolean in the per cpu spec_ctrl_flags. The synchronous use is: * * 1) Store guest value in shadow_spec_ctrl - * 2) Set use_shadow_spec_ctrl boolean + * 2) Set the use_shadow boolean * 3) Load guest value into MSR_SPEC_CTRL * 4) Exit to guest * 5) Entry from guest - * 6) Clear use_shadow_spec_ctrl boolean + * 6) Clear the use_shadow boolean * 7) Load Xen's value into MSR_SPEC_CTRL * * The asynchronous use for interrupts/exceptions is: * - Set/clear IBRS on entry to Xen - * - On exit to Xen, check use_shadow_spec_ctrl + * - On exit to Xen, check use_shadow * - If set, load shadow_spec_ctrl * * Therefore, an interrupt/exception which hits the synchronous path between @@ -133,7 +134,7 @@ xor %edx, %edx /* Clear SPEC_CTRL shadowing *before* loading Xen's value. */ - movb %dl, CPUINFO_use_shadow_spec_ctrl(%rsp) + andb $~SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp) /* Load Xen's intended value. */ mov $\ibrs_val, %eax @@ -159,12 +160,14 @@ * block so calculate the position directly. */ .if \maybexen + xor %eax, %eax /* Branchless `if ( !xen ) clear_shadowing` */ testb $3, UREGS_cs(%rsp) - setz %al - and %al, STACK_CPUINFO_FIELD(use_shadow_spec_ctrl)(%r14) + setnz %al + not %eax + and %al, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14) .else - movb %dl, CPUINFO_use_shadow_spec_ctrl(%rsp) + andb $~SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp) .endif /* Load Xen's intended value. */ @@ -183,8 +186,8 @@ */ xor %edx, %edx - cmpb %dl, STACK_CPUINFO_FIELD(use_shadow_spec_ctrl)(%rbx) - je .L\@_skip + testb $SCF_use_shadow, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%rbx) + jz .L\@_skip mov STACK_CPUINFO_FIELD(shadow_spec_ctrl)(%rbx), %eax mov $MSR_SPEC_CTRL, %ecx @@ -205,7 +208,7 @@ mov %eax, CPUINFO_shadow_spec_ctrl(%rsp) /* Set SPEC_CTRL shadowing *before* loading the guest value. */ - movb $1, CPUINFO_use_shadow_spec_ctrl(%rsp) + orb $SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp) mov $MSR_SPEC_CTRL, %ecx xor %edx, %edx @@ -216,7 +219,7 @@ #define SPEC_CTRL_ENTRY_FROM_VMEXIT \ ALTERNATIVE __stringify(ASM_NOP40), \ DO_OVERWRITE_RSB, X86_FEATURE_RSB_VMEXIT; \ - ALTERNATIVE_2 __stringify(ASM_NOP32), \ + ALTERNATIVE_2 __stringify(ASM_NOP33), \ __stringify(DO_SPEC_CTRL_ENTRY_FROM_VMEXIT \ ibrs_val=SPEC_CTRL_IBRS), \ X86_FEATURE_XEN_IBRS_SET, \ @@ -228,7 +231,7 @@ #define SPEC_CTRL_ENTRY_FROM_PV \ ALTERNATIVE __stringify(ASM_NOP40), \ DO_OVERWRITE_RSB, X86_FEATURE_RSB_NATIVE; \ - ALTERNATIVE_2 __stringify(ASM_NOP21), \ + ALTERNATIVE_2 __stringify(ASM_NOP22), \ __stringify(DO_SPEC_CTRL_ENTRY maybexen=0 \ ibrs_val=SPEC_CTRL_IBRS), \ X86_FEATURE_XEN_IBRS_SET, \ @@ -239,7 +242,7 @@ #define SPEC_CTRL_ENTRY_FROM_INTR \ ALTERNATIVE __stringify(ASM_NOP40), \ DO_OVERWRITE_RSB, X86_FEATURE_RSB_NATIVE; \ - ALTERNATIVE_2 __stringify(ASM_NOP29), \ + ALTERNATIVE_2 __stringify(ASM_NOP33), \ __stringify(DO_SPEC_CTRL_ENTRY maybexen=1 \ ibrs_val=SPEC_CTRL_IBRS), \ X86_FEATURE_XEN_IBRS_SET, \ @@ -267,22 +270,23 @@ * This is logical merge of DO_OVERWRITE_RSB and DO_SPEC_CTRL_ENTRY * maybexen=1, but with conditionals rather than alternatives. */ - movzbl STACK_CPUINFO_FIELD(bti_ist_info)(%r14), %eax + movzbl STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14), %eax - testb $BTI_IST_RSB, %al + test $SCF_ist_rsb, %al jz .L\@_skip_rsb DO_OVERWRITE_RSB tmp=rdx /* Clobbers %rcx/%rdx */ .L\@_skip_rsb: - testb $BTI_IST_WRMSR, %al + test $SCF_ist_wrmsr, %al jz .L\@_skip_wrmsr xor %edx, %edx testb $3, UREGS_cs(%rsp) - setz %dl - and %dl, STACK_CPUINFO_FIELD(use_shadow_spec_ctrl)(%r14) + setnz %dl + not %edx + and %dl, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14) /* Load Xen's intended value. */ mov $MSR_SPEC_CTRL, %ecx @@ -309,7 +313,7 @@ UNLIKELY_DISPATCH_LABEL(\@_serialise): * Requires %rbx=stack_end * Clobbers %rax, %rcx, %rdx */ - testb $BTI_IST_WRMSR, STACK_CPUINFO_FIELD(bti_ist_info)(%rbx) + testb $SCF_ist_wrmsr, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%rbx) jz .L\@_skip DO_SPEC_CTRL_EXIT_TO_XEN -- generated by git-patchbot for /home/xen/git/xen.git#staging-4.8 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |