|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v6.5 21/26] x86/entry: Use MSR_SPEC_CTRL at each entry/exit point
Set or clear IBRS in Xen context, and appropriate guest values in guest
context. See the documentation in asm-x86/spec_ctrl_asm.h for details.
Two semi-unrelated bugfixes are that various asm_defn.h macros have a hidden
dependency on PAGE_SIZE, which results in an assembler error if used in a
.macro definition. Secondly, _ASM_MK_NOP() needs a separator at the end,
rather than relying on its calling context for separation.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
v3:
* Basically rewritten from scratch
v4:
* Use STACK* macrocs
* Drop semicolons
* Fix several offset bugs
* Introduce init_shadow_spec_ctrl_state() rather than opencoding it for the
BSP and APs
* Rebase over AMD changes
---
xen/arch/x86/hvm/svm/entry.S | 8 +-
xen/arch/x86/hvm/vmx/entry.S | 11 ++
xen/arch/x86/setup.c | 1 +
xen/arch/x86/smpboot.c | 2 +
xen/arch/x86/x86_64/asm-offsets.c | 6 +
xen/arch/x86/x86_64/compat/entry.S | 12 ++
xen/arch/x86/x86_64/entry.S | 33 ++++++
xen/include/asm-x86/asm_defns.h | 3 +
xen/include/asm-x86/current.h | 6 +
xen/include/asm-x86/nops.h | 8 +-
xen/include/asm-x86/spec_ctrl.h | 9 ++
xen/include/asm-x86/spec_ctrl_asm.h | 230 ++++++++++++++++++++++++++++++++++++
12 files changed, 327 insertions(+), 2 deletions(-)
create mode 100644 xen/include/asm-x86/spec_ctrl_asm.h
diff --git a/xen/arch/x86/hvm/svm/entry.S b/xen/arch/x86/hvm/svm/entry.S
index df86da0..fb1048b 100644
--- a/xen/arch/x86/hvm/svm/entry.S
+++ b/xen/arch/x86/hvm/svm/entry.S
@@ -79,6 +79,9 @@ UNLIKELY_END(svm_trace)
or $X86_EFLAGS_MBS,%rax
mov %rax,VMCB_rflags(%rcx)
+ /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
+ SPEC_CTRL_EXIT_TO_GUEST /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
+
pop %r15
pop %r14
pop %r13
@@ -101,8 +104,11 @@ UNLIKELY_END(svm_trace)
SAVE_ALL
GET_CURRENT(bx)
- mov VCPU_svm_vmcb(%rbx),%rcx
+ SPEC_CTRL_ENTRY_FROM_VMEXIT /* Req: b=curr %rsp=regs/cpuinfo, Clob:
acd */
+ /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
+
+ mov VCPU_svm_vmcb(%rbx),%rcx
movb $0,VCPU_svm_vmcb_in_sync(%rbx)
mov VMCB_rax(%rcx),%rax
mov %rax,UREGS_rax(%rsp)
diff --git a/xen/arch/x86/hvm/vmx/entry.S b/xen/arch/x86/hvm/vmx/entry.S
index b2f98be..21e959f 100644
--- a/xen/arch/x86/hvm/vmx/entry.S
+++ b/xen/arch/x86/hvm/vmx/entry.S
@@ -38,6 +38,9 @@ ENTRY(vmx_asm_vmexit_handler)
movb $1,VCPU_vmx_launched(%rbx)
mov %rax,VCPU_hvm_guest_cr2(%rbx)
+ SPEC_CTRL_ENTRY_FROM_VMEXIT /* Req: b=curr %rsp=regs/cpuinfo, Clob:
acd */
+ /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
+
mov %rsp,%rdi
call vmx_vmexit_handler
@@ -68,6 +71,10 @@ UNLIKELY_END(realmode)
call vmx_vmenter_helper
test %al, %al
jz .Lvmx_vmentry_restart
+
+ /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
+ SPEC_CTRL_EXIT_TO_GUEST /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
+
mov VCPU_hvm_guest_cr2(%rbx),%rax
pop %r15
@@ -99,6 +106,10 @@ UNLIKELY_END(realmode)
.Lvmx_vmentry_fail:
sti
SAVE_ALL
+
+ SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo Clob: acd */
+ /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
+
call vmx_vmentry_failure
BUG /* vmx_vmentry_failure() shouldn't return. */
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 470427b..b2aa281 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -668,6 +668,7 @@ void __init noreturn __start_xen(unsigned long mbi_p)
set_processor_id(0);
set_current(INVALID_VCPU); /* debug sanity. */
idle_vcpu[0] = current;
+ init_shadow_spec_ctrl_state();
percpu_init_areas();
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 7b97ff8..a695d12 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -40,6 +40,7 @@
#include <asm/flushtlb.h>
#include <asm/msr.h>
#include <asm/mtrr.h>
+#include <asm/spec_ctrl.h>
#include <asm/time.h>
#include <asm/tboot.h>
#include <mach_apic.h>
@@ -308,6 +309,7 @@ void start_secondary(void *unused)
set_current(idle_vcpu[cpu]);
this_cpu(curr_vcpu) = idle_vcpu[cpu];
rdmsrl(MSR_EFER, this_cpu(efer));
+ init_shadow_spec_ctrl_state();
/*
* Just as during early bootstrap, it is convenient here to disable
diff --git a/xen/arch/x86/x86_64/asm-offsets.c
b/xen/arch/x86/x86_64/asm-offsets.c
index e136af6..7d36185 100644
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -88,6 +88,7 @@ void __dummy__(void)
OFFSET(VCPU_kernel_ss, struct vcpu, arch.pv_vcpu.kernel_ss);
OFFSET(VCPU_iopl, struct vcpu, arch.pv_vcpu.iopl);
OFFSET(VCPU_guest_context_flags, struct vcpu, arch.vgc_flags);
+ OFFSET(VCPU_arch_msr, struct vcpu, arch.msr);
OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
OFFSET(VCPU_mce_pending, struct vcpu, mce_pending);
OFFSET(VCPU_nmi_old_mask, struct vcpu, nmi_state.old_mask);
@@ -137,6 +138,8 @@ void __dummy__(void)
OFFSET(CPUINFO_processor_id, struct cpu_info, processor_id);
OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
OFFSET(CPUINFO_cr4, struct cpu_info, cr4);
+ OFFSET(CPUINFO_shadow_spec_ctrl, struct cpu_info, shadow_spec_ctrl);
+ OFFSET(CPUINFO_use_shadow_spec_ctrl, struct cpu_info,
use_shadow_spec_ctrl);
DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
BLANK();
@@ -152,6 +155,9 @@ void __dummy__(void)
OFFSET(TRAPBOUNCE_eip, struct trap_bounce, eip);
BLANK();
+ OFFSET(VCPUMSR_spec_ctrl_host, struct msr_vcpu_policy, spec_ctrl.host);
+ BLANK();
+
#ifdef CONFIG_PERF_COUNTERS
DEFINE(ASM_PERFC_exceptions, PERFC_exceptions);
BLANK();
diff --git a/xen/arch/x86/x86_64/compat/entry.S
b/xen/arch/x86/x86_64/compat/entry.S
index 3fea54e..422c25d 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -18,6 +18,10 @@ ENTRY(entry_int82)
pushq $0
movl $HYPERCALL_VECTOR, 4(%rsp)
SAVE_ALL compat=1 /* DPL1 gate, restricted to 32bit PV guests only. */
+
+ SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo Clob: acd */
+ /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
+
CR4_PV32_RESTORE
GET_CURRENT(bx)
@@ -142,6 +146,10 @@ ENTRY(compat_restore_all_guest)
.popsection
or $X86_EFLAGS_IF,%r11
mov %r11d,UREGS_eflags(%rsp)
+
+ /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
+ SPEC_CTRL_EXIT_TO_GUEST /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
+
RESTORE_ALL adj=8 compat=1
.Lft0: iretq
_ASM_PRE_EXTABLE(.Lft0, handle_exception)
@@ -199,6 +207,10 @@ ENTRY(cstar_enter)
pushq $0
movl $TRAP_syscall, 4(%rsp)
SAVE_ALL
+
+ SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo Clob: acd */
+ /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
+
GET_CURRENT(bx)
movq VCPU_domain(%rbx),%rcx
cmpb $0,DOMAIN_is_32bit_pv(%rcx)
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index b49d62b..ba7c5e9 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -37,6 +37,10 @@ ENTRY(switch_to_kernel)
/* %rbx: struct vcpu, interrupts disabled */
restore_all_guest:
ASSERT_INTERRUPTS_DISABLED
+
+ /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
+ SPEC_CTRL_EXIT_TO_GUEST /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
+
RESTORE_ALL
testw $TRAP_syscall,4(%rsp)
jz iret_exit_to_guest
@@ -71,6 +75,8 @@ iret_exit_to_guest:
ALIGN
/* No special register assumptions. */
restore_all_xen:
+ /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
+ SPEC_CTRL_EXIT_TO_XEN /* Req: nothing Clob: acd */
RESTORE_ALL adj=8
iretq
@@ -100,6 +106,10 @@ ENTRY(lstar_enter)
pushq $0
movl $TRAP_syscall, 4(%rsp)
SAVE_ALL
+
+ SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo Clob: acd */
+ /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
+
GET_CURRENT(bx)
testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
jz switch_to_kernel
@@ -192,6 +202,10 @@ GLOBAL(sysenter_eflags_saved)
pushq $0
movl $TRAP_syscall, 4(%rsp)
SAVE_ALL
+
+ SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo Clob: acd */
+ /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
+
GET_CURRENT(bx)
cmpb $0,VCPU_sysenter_disables_events(%rbx)
movq VCPU_sysenter_addr(%rbx),%rax
@@ -228,6 +242,9 @@ ENTRY(int80_direct_trap)
movl $0x80, 4(%rsp)
SAVE_ALL
+ SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo Clob: acd */
+ /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
+
cmpb $0,untrusted_msi(%rip)
UNLIKELY_START(ne, msi_check)
movl $0x80,%edi
@@ -391,6 +408,10 @@ ENTRY(dom_crash_sync_extable)
ENTRY(common_interrupt)
SAVE_ALL CLAC
+
+ SPEC_CTRL_ENTRY_FROM_INTR /* Req: %rsp=regs Clob: acd */
+ /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
+
CR4_PV32_RESTORE
movq %rsp,%rdi
callq do_IRQ
@@ -411,6 +432,10 @@ ENTRY(page_fault)
/* No special register assumptions. */
GLOBAL(handle_exception)
SAVE_ALL CLAC
+
+ SPEC_CTRL_ENTRY_FROM_INTR /* Req: %rsp=regs Clob: acd */
+ /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
+
handle_exception_saved:
GET_CURRENT(bx)
testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
@@ -586,6 +611,10 @@ ENTRY(double_fault)
movl $TRAP_double_fault,4(%rsp)
/* Set AC to reduce chance of further SMAP faults */
SAVE_ALL STAC
+
+ SPEC_CTRL_ENTRY_FROM_INTR /* Req: %rsp=regs Clob: acd */
+ /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
+
movq %rsp,%rdi
call do_double_fault
BUG /* do_double_fault() shouldn't return. */
@@ -604,6 +633,10 @@ ENTRY(nmi)
movl $TRAP_nmi,4(%rsp)
handle_ist_exception:
SAVE_ALL CLAC
+
+ SPEC_CTRL_ENTRY_FROM_INTR /* Req: %rsp=regs Clob: acd */
+ /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
+
CR4_PV32_RESTORE
testb $3,UREGS_cs(%rsp)
jz 1f
diff --git a/xen/include/asm-x86/asm_defns.h b/xen/include/asm-x86/asm_defns.h
index 40b0250..bc10167 100644
--- a/xen/include/asm-x86/asm_defns.h
+++ b/xen/include/asm-x86/asm_defns.h
@@ -7,6 +7,7 @@
#include <asm/asm-offsets.h>
#endif
#include <asm/bug.h>
+#include <asm/page.h>
#include <asm/processor.h>
#include <asm/percpu.h>
#include <xen/stringify.h>
@@ -344,4 +345,6 @@ static always_inline void stac(void)
#define REX64_PREFIX "rex64/"
#endif
+#include <asm/spec_ctrl_asm.h>
+
#endif /* __X86_ASM_DEFNS_H__ */
diff --git a/xen/include/asm-x86/current.h b/xen/include/asm-x86/current.h
index 8984992..749d7aa 100644
--- a/xen/include/asm-x86/current.h
+++ b/xen/include/asm-x86/current.h
@@ -41,6 +41,12 @@ struct cpu_info {
struct vcpu *current_vcpu;
unsigned long per_cpu_offset;
unsigned long cr4;
+
+ /* See asm-x86/spec_ctrl_asm.h for usage. */
+ unsigned int shadow_spec_ctrl;
+ bool use_shadow_spec_ctrl;
+
+ unsigned long __pad;
/* get_stack_bottom() must be 16-byte aligned */
};
diff --git a/xen/include/asm-x86/nops.h b/xen/include/asm-x86/nops.h
index 1a46b97..9e8f530 100644
--- a/xen/include/asm-x86/nops.h
+++ b/xen/include/asm-x86/nops.h
@@ -50,7 +50,7 @@
#define P6_NOP9 0x66,0x0f,0x1f,0x84,0x00,0,0,0,0
#ifdef __ASSEMBLY__
-#define _ASM_MK_NOP(x) .byte x
+#define _ASM_MK_NOP(x) .byte x;
#else
#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n"
#endif
@@ -65,6 +65,12 @@
#define ASM_NOP8 _ASM_MK_NOP(P6_NOP8)
#define ASM_NOP9 _ASM_MK_NOP(P6_NOP9)
+#define ASM_NOP22 ASM_NOP8 ASM_NOP8 ASM_NOP6
+#define ASM_NOP26 ASM_NOP8 ASM_NOP8 ASM_NOP8 ASM_NOP2
+#define ASM_NOP32 ASM_NOP8 ASM_NOP8 ASM_NOP8 ASM_NOP8
+#define ASM_NOP33 ASM_NOP8 ASM_NOP8 ASM_NOP8 ASM_NOP7 ASM_NOP2
+#define ASM_NOP39 ASM_NOP8 ASM_NOP8 ASM_NOP8 ASM_NOP8 ASM_NOP7
+
#define ASM_NOP_MAX 9
#endif /* __X86_ASM_NOPS_H__ */
diff --git a/xen/include/asm-x86/spec_ctrl.h b/xen/include/asm-x86/spec_ctrl.h
index d0b44f6..29a31a9 100644
--- a/xen/include/asm-x86/spec_ctrl.h
+++ b/xen/include/asm-x86/spec_ctrl.h
@@ -20,8 +20,17 @@
#ifndef __X86_SPEC_CTRL_H__
#define __X86_SPEC_CTRL_H__
+#include <asm/current.h>
+
void init_speculation_mitigations(void);
+static inline void init_shadow_spec_ctrl_state(void)
+{
+ struct cpu_info *info = get_cpu_info();
+
+ info->shadow_spec_ctrl = info->use_shadow_spec_ctrl = 0;
+}
+
#endif /* !__X86_SPEC_CTRL_H__ */
/*
diff --git a/xen/include/asm-x86/spec_ctrl_asm.h
b/xen/include/asm-x86/spec_ctrl_asm.h
new file mode 100644
index 0000000..13e058c
--- /dev/null
+++ b/xen/include/asm-x86/spec_ctrl_asm.h
@@ -0,0 +1,230 @@
+/******************************************************************************
+ * include/asm-x86/spec_ctrl.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (c) 2017 Citrix Systems Ltd.
+ */
+
+#ifndef __X86_SPEC_CTRL_ASM_H__
+#define __X86_SPEC_CTRL_ASM_H__
+
+#ifdef __ASSEMBLY__
+#include <asm/msr.h>
+
+/*
+ * Saving and restoring MSR_SPEC_CTRL state is a little tricky.
+ *
+ * We want the guests choice of SPEC_CTRL while in guest context, and IBRS
+ * (set or clear, depending on the hardware) while running in Xen context.
+ * Therefore, a simplistic algorithm is:
+ *
+ * - Set/clear IBRS on entry to Xen
+ * - Set the guests' choice on exit to guest
+ * - Leave SPEC_CTRL unchanged on exit to xen
+ *
+ * There are two complicating factors:
+ * 1) HVM guests can have direct access to the MSR, so it can change
+ * behind Xen's back.
+ * 2) An NMI or MCE can interrupt at any point, including early in the entry
+ * path, or late in the exit path after restoring the guest value. This
+ * will corrupt the guest value.
+ *
+ * Factor 1 is dealt with by relying on NMIs/MCEs being blocked immediately
+ * after VMEXIT. The VMEXIT-specific code reads MSR_SPEC_CTRL and updates
+ * current before loading Xen's MSR_SPEC_CTRL setting.
+ *
+ * Factor 2 is harder. We maintain a shadow_spec_ctrl value, and
+ * use_shadow_spec_ctrl boolean per cpu. The synchronous use is:
+ *
+ * 1) Store guest value in shadow_spec_ctrl
+ * 2) Set use_shadow_spec_ctrl boolean
+ * 3) Load guest value into MSR_SPEC_CTRL
+ * 4) Exit to guest
+ * 5) Entry from guest
+ * 6) Clear use_shadow_spec_ctrl boolean
+ *
+ * The asynchronous use for interrupts/exceptions is:
+ * - Set/clear IBRS on entry to Xen
+ * - On exit to Xen, check use_shadow_spec_ctrl
+ * - If set, load shadow_spec_ctrl
+ *
+ * Therefore, an interrupt/exception which hits the synchronous path between
+ * steps 2 and 6 will restore the shadow value rather than leaving Xen's value
+ * loaded and corrupting the value used in guest context.
+ *
+ * The following ASM fragments implement this algorithm. See their local
+ * comments for further details.
+ * - SPEC_CTRL_ENTRY_FROM_VMEXIT
+ * - SPEC_CTRL_ENTRY_FROM_PV
+ * - SPEC_CTRL_ENTRY_FROM_INTR
+ * - SPEC_CTRL_EXIT_TO_XEN
+ * - SPEC_CTRL_EXIT_TO_GUEST
+ */
+
+.macro DO_SPEC_CTRL_ENTRY_FROM_VMEXIT ibrs_val:req
+/*
+ * Requires %rbx=current, %rsp=regs/cpuinfo
+ * Clobbers %rax, %rcx, %rdx
+ *
+ * The common case is that a guest has direct access to MSR_SPEC_CTRL, at
+ * which point we need to save the guest value before setting IBRS for Xen.
+ * Unilaterally saving the guest value is shorter and faster than checking.
+ */
+ mov $MSR_SPEC_CTRL, %ecx
+ rdmsr
+
+ /* Stash the value from hardware. */
+ mov VCPU_arch_msr(%rbx), %rdx
+ mov %al, VCPUMSR_spec_ctrl_host(%rdx)
+ xor %edx, %edx
+
+ /* Clear SPEC_CTRL shadowing *before* loading Xen's value. */
+ movb %dl, CPUINFO_use_shadow_spec_ctrl(%rsp)
+
+ /* Load Xen's indented value. */
+ mov $\ibrs_val, %eax
+ wrmsr
+.endm
+
+.macro DO_SPEC_CTRL_ENTRY maybexen:req ibrs_val:req
+/*
+ * Requires %rsp=regs (also cpuinfo if !maybexen)
+ * Clobbers %rax, %rcx, %rdx
+ *
+ * PV guests can't update MSR_SPEC_CTRL behind Xen's back, so no need to read
+ * it back. Entries from guest context need to clear SPEC_CTRL shadowing,
+ * while entries from Xen must leave shadowing in its current state.
+ */
+ mov $MSR_SPEC_CTRL, %ecx
+
+ .if \maybexen
+ cmpl $__HYPERVISOR_CS, UREGS_cs(%rsp)
+ je .Lentry_from_xen\@
+ .endif
+
+ /*
+ * Clear SPEC_CTRL shadowing *before* loading Xen's value. If entering
+ * from a possibly-xen context, %rsp doesn't necesserily alias the cpuinfo
+ * block so calculate the position directly.
+ */
+ .if \maybexen
+ GET_STACK_END(dx)
+ movb $0, STACK_CPUINFO_FIELD(use_shadow_spec_ctrl)(%rdx)
+ .else
+ movb $0, CPUINFO_use_shadow_spec_ctrl(%rsp)
+ .endif
+
+.Lentry_from_xen\@:
+ /* Load Xen's indented value. */
+ mov $\ibrs_val, %eax
+ xor %edx, %edx
+ wrmsr
+.endm
+
+.macro DO_SPEC_CTRL_EXIT_TO_XEN
+/*
+ * Requires nothing
+ * Clobbers %rax, %rcx, %rdx
+ *
+ * When returning to Xen context, look to see whether SPEC_CTRL shadowing is
+ * in effect, and reload the shadow value. This covers race conditions which
+ * exist with an NMI/MCE/etc hitting late in the return-to-guest path.
+ */
+ GET_STACK_END(dx)
+ cmpb $0, STACK_CPUINFO_FIELD(use_shadow_spec_ctrl)(%rdx)
+ je .Lend_\@
+
+ mov STACK_CPUINFO_FIELD(shadow_spec_ctrl)(%rdx), %eax
+ mov $MSR_SPEC_CTRL, %ecx
+ xor %edx, %edx
+ wrmsr
+
+.Lend_\@:
+.endm
+
+.macro DO_SPEC_CTRL_EXIT_TO_GUEST
+/*
+ * Requires %rbx=current, %rsp=regs/cpuinfo
+ * Clobbers %rax, %rcx, %rdx
+ *
+ * When returning to guest context, set up SPEC_CTRL shadowing and load the
+ * guest value.
+ */
+ mov VCPU_arch_msr(%rbx), %rdx
+ mov VCPUMSR_spec_ctrl_host(%rdx), %eax
+
+ /* Set up shadow value *before* enabling shadowing. */
+ mov %eax, CPUINFO_shadow_spec_ctrl(%rsp)
+
+ /* Set SPEC_CTRL shadowing *before* loading the guest value. */
+ movb $1, CPUINFO_use_shadow_spec_ctrl(%rsp)
+
+ mov $MSR_SPEC_CTRL, %ecx
+ xor %edx, %edx
+ wrmsr
+.endm
+
+/* Use after a VMEXIT from an HVM guest. */
+#define SPEC_CTRL_ENTRY_FROM_VMEXIT \
+ ALTERNATIVE_2 __stringify(ASM_NOP32), \
+ __stringify(DO_SPEC_CTRL_ENTRY_FROM_VMEXIT \
+ ibrs_val=SPEC_CTRL_IBRS), \
+ X86_FEATURE_XEN_IBRS_SET, \
+ __stringify(DO_SPEC_CTRL_ENTRY_FROM_VMEXIT \
+ ibrs_val=0), \
+ X86_FEATURE_XEN_IBRS_CLEAR
+
+/* Use after an entry from PV context (syscall/sysenter/int80/int82/etc). */
+#define SPEC_CTRL_ENTRY_FROM_PV \
+ ALTERNATIVE_2 __stringify(ASM_NOP22), \
+ __stringify(DO_SPEC_CTRL_ENTRY maybexen=0 \
+ ibrs_val=SPEC_CTRL_IBRS), \
+ X86_FEATURE_XEN_IBRS_SET, \
+ __stringify(DO_SPEC_CTRL_ENTRY maybexen=0 ibrs_val=0), \
+ X86_FEATURE_XEN_IBRS_CLEAR
+
+/* Use in interrupt/exception context. May interrupt Xen or PV context. */
+#define SPEC_CTRL_ENTRY_FROM_INTR \
+ ALTERNATIVE_2 __stringify(ASM_NOP39), \
+ __stringify(DO_SPEC_CTRL_ENTRY maybexen=1 \
+ ibrs_val=SPEC_CTRL_IBRS), \
+ X86_FEATURE_XEN_IBRS_SET, \
+ __stringify(DO_SPEC_CTRL_ENTRY maybexen=1 ibrs_val=0), \
+ X86_FEATURE_XEN_IBRS_CLEAR
+
+/* Use when exiting to Xen context. */
+#define SPEC_CTRL_EXIT_TO_XEN \
+ ALTERNATIVE_2 __stringify(ASM_NOP26), \
+ DO_SPEC_CTRL_EXIT_TO_XEN, X86_FEATURE_XEN_IBRS_SET, \
+ DO_SPEC_CTRL_EXIT_TO_XEN, X86_FEATURE_XEN_IBRS_CLEAR
+
+/* Use when exiting to guest context. */
+#define SPEC_CTRL_EXIT_TO_GUEST \
+ ALTERNATIVE_2 __stringify(ASM_NOP33), \
+ DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_XEN_IBRS_SET, \
+ DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_XEN_IBRS_CLEAR
+
+#endif /* __ASSEMBLY__ */
+#endif /* !__X86_SPEC_CTRL_ASM_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |