[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v4 1/4] x86/spec-ctrl: add logic to issue IBPB on exit to guest
In order to be able to defer the context switch IBPB to the last possible point, add logic to the exit-to-guest paths to issue the barrier there, including the "IBPB doesn't flush the RSB/RAS" workaround. Since alternatives, for now at least, can't nest, emit JMP to skip past both constructs where both are needed. This may be more efficient anyway, as the sequence of NOPs is pretty long. As with all other conditional blocks on exit-to-guest paths, no Spectre-v1 protections are necessary as execution will imminently be hitting a serialising event. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- I have to admit that I'm not really certain about the placement of the IBPB wrt the MSR_SPEC_CTRL writes. For now I've simply used "opposite of entry". Since we're going to run out of SCF_* bits soon and since the new flag is meaningful only in struct cpu_info's spec_ctrl_flags, we could choose to widen that field to 16 bits right away and then use bit 8 (or higher) for the purpose here. --- v4: Alter parts of the description. Re-word a comment. Rename flag and feature identifiers. v3: New. --- a/xen/arch/x86/hvm/svm/entry.S +++ b/xen/arch/x86/hvm/svm/entry.S @@ -75,6 +75,12 @@ __UNLIKELY_END(nsvm_hap) .endm ALTERNATIVE "", svm_vmentry_spec_ctrl, X86_FEATURE_SC_MSR_HVM + ALTERNATIVE "jmp 2f", __stringify(DO_SPEC_CTRL_EXIT_IBPB disp=(2f-1f)), \ + X86_FEATURE_NEW_PRED_CTXT_HVM +1: + ALTERNATIVE "", DO_OVERWRITE_RSB, X86_BUG_IBPB_NO_RET +2: + pop %r15 pop %r14 pop %r13 --- a/xen/arch/x86/hvm/vmx/entry.S +++ b/xen/arch/x86/hvm/vmx/entry.S @@ -86,7 +86,8 @@ UNLIKELY_END(realmode) jz .Lvmx_vmentry_restart /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */ - /* SPEC_CTRL_EXIT_TO_VMX Req: %rsp=regs/cpuinfo Clob: */ + /* SPEC_CTRL_EXIT_TO_VMX Req: %rsp=regs/cpuinfo Clob: acd */ + ALTERNATIVE "", DO_SPEC_CTRL_EXIT_IBPB, X86_FEATURE_NEW_PRED_CTXT_HVM DO_SPEC_CTRL_COND_VERW mov VCPU_hvm_guest_cr2(%rbx),%rax --- a/xen/arch/x86/include/asm/cpufeatures.h +++ b/xen/arch/x86/include/asm/cpufeatures.h @@ -39,8 +39,10 @@ XEN_CPUFEATURE(XEN_LBR, X86_SY XEN_CPUFEATURE(SC_VERW_IDLE, X86_SYNTH(25)) /* VERW used by Xen for idle */ XEN_CPUFEATURE(XEN_SHSTK, X86_SYNTH(26)) /* Xen uses CET Shadow Stacks */ XEN_CPUFEATURE(XEN_IBT, X86_SYNTH(27)) /* Xen uses CET Indirect Branch Tracking */ -XEN_CPUFEATURE(IBPB_ENTRY_PV, X86_SYNTH(28)) /* MSR_PRED_CMD used by Xen for PV */ -XEN_CPUFEATURE(IBPB_ENTRY_HVM, X86_SYNTH(29)) /* MSR_PRED_CMD used by Xen for HVM */ +XEN_CPUFEATURE(IBPB_ENTRY_PV, X86_SYNTH(28)) /* MSR_PRED_CMD used by Xen when entered from PV */ +XEN_CPUFEATURE(IBPB_ENTRY_HVM, X86_SYNTH(29)) /* MSR_PRED_CMD used by Xen when entered from HVM */ +XEN_CPUFEATURE(NEW_PRED_CTXT_PV, X86_SYNTH(30)) /* issue prediction barrier when exiting to PV */ +XEN_CPUFEATURE(NEW_PRED_CTXT_HVM, X86_SYNTH(31)) /* issue prediction barrier when exiting to HVM */ /* Bug words follow the synthetic words. */ #define X86_NR_BUG 1 --- a/xen/arch/x86/include/asm/current.h +++ b/xen/arch/x86/include/asm/current.h @@ -55,9 +55,13 @@ struct cpu_info { /* See asm/spec_ctrl_asm.h for usage. */ unsigned int shadow_spec_ctrl; + /* + * spec_ctrl_flags is accessed as a 32-bit entity in certain cases. Place + * it accordingly. + */ + uint8_t spec_ctrl_flags; uint8_t xen_spec_ctrl; uint8_t last_spec_ctrl; - uint8_t spec_ctrl_flags; /* * The following field controls copying of the L4 page table of 64-bit --- a/xen/arch/x86/include/asm/spec_ctrl.h +++ b/xen/arch/x86/include/asm/spec_ctrl.h @@ -36,6 +36,8 @@ #define SCF_verw (1 << 3) #define SCF_ist_ibpb (1 << 4) #define SCF_entry_ibpb (1 << 5) +#define SCF_new_pred_ctxt_bit 6 +#define SCF_new_pred_ctxt (1 << SCF_new_pred_ctxt_bit) /* * The IST paths (NMI/#MC) can interrupt any arbitrary context. Some --- a/xen/arch/x86/include/asm/spec_ctrl_asm.h +++ b/xen/arch/x86/include/asm/spec_ctrl_asm.h @@ -117,6 +117,27 @@ .L\@_done: .endm +.macro DO_SPEC_CTRL_EXIT_IBPB disp=0 +/* + * Requires %rsp=regs + * Clobbers %rax, %rcx, %rdx + * + * Conditionally issue IBPB if SCF_new_pred_ctxt is active. The macro + * invocation may be followed by X86_BUG_IBPB_NO_RET workaround code. The + * "disp" argument is to allow invocation sites to pass in the extra amount + * of code which needs skipping in case no action is necessary. + * + * The flag is a "one-shot" indicator, so it is being cleared at the same time. + */ + btrl $SCF_new_pred_ctxt_bit, CPUINFO_spec_ctrl_flags(%rsp) + jnc .L\@_skip + (\disp) + mov $MSR_PRED_CMD, %ecx + mov $PRED_CMD_IBPB, %eax + xor %edx, %edx + wrmsr +.L\@_skip: +.endm + .macro DO_OVERWRITE_RSB tmp=rax /* * Requires nothing @@ -272,6 +293,14 @@ #define SPEC_CTRL_EXIT_TO_PV \ ALTERNATIVE "", \ DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR_PV; \ + ALTERNATIVE __stringify(jmp PASTE(.Lscexitpv_done, __LINE__)), \ + __stringify(DO_SPEC_CTRL_EXIT_IBPB \ + disp=(PASTE(.Lscexitpv_done, __LINE__) - \ + PASTE(.Lscexitpv_rsb, __LINE__))), \ + X86_FEATURE_NEW_PRED_CTXT_PV; \ +PASTE(.Lscexitpv_rsb, __LINE__): \ + ALTERNATIVE "", DO_OVERWRITE_RSB, X86_BUG_IBPB_NO_RET; \ +PASTE(.Lscexitpv_done, __LINE__): \ DO_SPEC_CTRL_COND_VERW /* --- a/xen/arch/x86/x86_64/compat/entry.S +++ b/xen/arch/x86/x86_64/compat/entry.S @@ -8,6 +8,7 @@ #include <asm/page.h> #include <asm/processor.h> #include <asm/desc.h> +#include <xen/lib.h> #include <public/xen.h> #include <irq_vectors.h> @@ -156,7 +157,7 @@ ENTRY(compat_restore_all_guest) mov VCPUMSR_spec_ctrl_raw(%rax), %eax /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */ - SPEC_CTRL_EXIT_TO_PV /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */ + SPEC_CTRL_EXIT_TO_PV /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: acd */ RESTORE_ALL adj=8 compat=1 .Lft0: iretq --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -9,6 +9,7 @@ #include <asm/asm_defns.h> #include <asm/page.h> #include <asm/processor.h> +#include <xen/lib.h> #include <public/xen.h> #include <irq_vectors.h> @@ -187,7 +188,7 @@ restore_all_guest: mov %r15d, %eax /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */ - SPEC_CTRL_EXIT_TO_PV /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */ + SPEC_CTRL_EXIT_TO_PV /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: acd */ RESTORE_ALL testw $TRAP_syscall,4(%rsp)
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |