|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 6/8] x86/entry: Track the IST-ness of an entry for the exit paths
Use %r12 to hold an ist_exit boolean. This register is zero elsewhere in the
entry/exit asm, so it only needs setting in the IST path.
As this is subtle and fragile, add check_ist_exit() to be used in debugging
builds to cross-check that the ist_exit boolean matches the entry vector.
Write check_ist_exit() it in C, because it's debug only and the logic more
complicated than I care about maintaining in asm.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Wei Liu <wl@xxxxxxx>
---
xen/arch/x86/traps.c | 13 +++++++++++++
xen/arch/x86/x86_64/compat/entry.S | 9 ++++++++-
xen/arch/x86/x86_64/entry.S | 23 ++++++++++++++++++++---
3 files changed, 41 insertions(+), 4 deletions(-)
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index dead728ce329..0a005f088bca 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2259,6 +2259,19 @@ void asm_domain_crash_synchronous(unsigned long addr)
do_softirq();
}
+#ifdef CONFIG_DEBUG
+void check_ist_exit(const struct cpu_user_regs *regs, bool ist_exit)
+{
+ const unsigned int ist_mask =
+ (1U << X86_EXC_NMI) | (1U << X86_EXC_DB) |
+ (1U << X86_EXC_DF) | (1U << X86_EXC_MC);
+ uint8_t ev = regs->entry_vector;
+ bool is_ist = (ev < X86_EXC_NUM) && ((1U << ev) & ist_mask);
+
+ ASSERT(is_ist == ist_exit);
+}
+#endif
+
/*
* Local variables:
* mode: C
diff --git a/xen/arch/x86/x86_64/compat/entry.S
b/xen/arch/x86/x86_64/compat/entry.S
index bd5abd8040bd..7504bfb4f326 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -117,8 +117,15 @@ compat_process_trap:
call compat_create_bounce_frame
jmp compat_test_all_events
-/* %rbx: struct vcpu, interrupts disabled */
+/* %rbx: struct vcpu, %r12: ist_exit, interrupts disabled */
ENTRY(compat_restore_all_guest)
+
+#ifdef CONFIG_DEBUG
+ mov %rsp, %rdi
+ mov %r12, %rsi
+ call check_ist_exit
+#endif
+
ASSERT_INTERRUPTS_DISABLED
mov $~(X86_EFLAGS_IOPL | X86_EFLAGS_VM), %r11d
and UREGS_eflags(%rsp),%r11d
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index 525877e97330..da084a7e8e54 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -142,10 +142,16 @@ process_trap:
.section .text.entry, "ax", @progbits
-/* %rbx: struct vcpu, interrupts disabled */
+/* %rbx: struct vcpu, %r12: ist_exit, interrupts disabled */
restore_all_guest:
- ASSERT_INTERRUPTS_DISABLED
+#ifdef CONFIG_DEBUG
+ mov %rsp, %rdi
+ mov %r12, %rsi
+ call check_ist_exit
+#endif
+
+ ASSERT_INTERRUPTS_DISABLED
/* Stash guest SPEC_CTRL value while we can read struct vcpu. */
mov VCPU_arch_msrs(%rbx), %rdx
mov VCPUMSR_spec_ctrl_raw(%rdx), %r15d
@@ -659,8 +665,15 @@ ENTRY(early_page_fault)
.section .text.entry, "ax", @progbits
ALIGN
-/* No special register assumptions. */
+/* %r12=ist_exit */
restore_all_xen:
+
+#ifdef CONFIG_DEBUG
+ mov %rsp, %rdi
+ mov %r12, %rsi
+ call check_ist_exit
+#endif
+
/*
* Check whether we need to switch to the per-CPU page tables, in
* case we return to late PV exit code (from an NMI or #MC).
@@ -1087,6 +1100,10 @@ handle_ist_exception:
.L_ist_dispatch_done:
mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
mov %bl, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14)
+
+ /* This is an IST exit */
+ mov $1, %r12
+
cmpb $X86_EXC_NMI, UREGS_entry_vector(%rsp)
jne ret_from_intr
--
2.30.2
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |