[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen master] xen/x86: add nmi continuation framework
commit 665c940d24b53e52a32a38571bc7a58a165409d9 Author: Juergen Gross <jgross@xxxxxxxx> AuthorDate: Wed Nov 18 12:38:29 2020 +0100 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Wed Nov 18 12:38:29 2020 +0100 xen/x86: add nmi continuation framework Actions in NMI context are rather limited as e.g. locking is rather fragile. Add a framework to continue processing in normal interrupt context after leaving NMI processing. This is done by a high priority interrupt vector triggered via a self IPI from NMI context, which will then call the continuation function specified during NMI handling. Signed-off-by: Juergen Gross <jgross@xxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> --- xen/arch/x86/apic.c | 13 ++++++++++--- xen/arch/x86/smp.c | 1 + xen/arch/x86/traps.c | 21 +++++++++++++++++++++ xen/include/asm-x86/nmi.h | 11 ++++++++++- 4 files changed, 42 insertions(+), 4 deletions(-) diff --git a/xen/arch/x86/apic.c b/xen/arch/x86/apic.c index 60627fd6e6..7497ddb5da 100644 --- a/xen/arch/x86/apic.c +++ b/xen/arch/x86/apic.c @@ -40,6 +40,7 @@ #include <irq_vectors.h> #include <xen/kexec.h> #include <asm/guest.h> +#include <asm/nmi.h> #include <asm/time.h> static bool __read_mostly tdt_enabled; @@ -1376,16 +1377,22 @@ void spurious_interrupt(struct cpu_user_regs *regs) { /* * Check if this is a vectored interrupt (most likely, as this is probably - * a request to dump local CPU state). Vectored interrupts are ACKed; - * spurious interrupts are not. + * a request to dump local CPU state or to continue NMI handling). + * Vectored interrupts are ACKed; spurious interrupts are not. */ if (apic_isr_read(SPURIOUS_APIC_VECTOR)) { + bool is_spurious; + ack_APIC_irq(); + is_spurious = !nmi_check_continuation(); if (this_cpu(state_dump_pending)) { this_cpu(state_dump_pending) = false; dump_execstate(regs); - return; + is_spurious = false; } + + if ( !is_spurious ) + return; } /* see sw-dev-man vol 3, chapter 7.4.13.5 */ diff --git a/xen/arch/x86/smp.c b/xen/arch/x86/smp.c index 14aa355a6b..eef0f9c6cb 100644 --- a/xen/arch/x86/smp.c +++ b/xen/arch/x86/smp.c @@ -163,6 +163,7 @@ void send_IPI_self(int vector) void send_IPI_self_legacy(uint8_t vector) { + /* NMI continuation handling relies on using a shorthand here. */ send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); } diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index c27dd4cd43..5cbaa49031 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -79,6 +79,7 @@ #include <public/hvm/params.h> #include <asm/cpuid.h> #include <xsm/xsm.h> +#include <asm/mach-default/irq_vectors.h> #include <asm/pv/traps.h> #include <asm/pv/mm.h> @@ -1800,6 +1801,26 @@ void unset_nmi_callback(void) nmi_callback = dummy_nmi_callback; } +bool nmi_check_continuation(void) +{ + bool ret = false; + + return ret; +} + +void trigger_nmi_continuation(void) +{ + /* + * Issue a self-IPI. Handling is done in spurious_interrupt(). + * NMI could have happened in IPI sequence, so wait for ICR being idle + * again before leaving NMI handler. + * This relies on self-IPI using a simple shorthand, thus avoiding any + * use of locking or percpu cpumasks. + */ + send_IPI_self(SPURIOUS_APIC_VECTOR); + apic_wait_icr_idle(); +} + void do_device_not_available(struct cpu_user_regs *regs) { #ifdef CONFIG_PV diff --git a/xen/include/asm-x86/nmi.h b/xen/include/asm-x86/nmi.h index a288f02a50..9a5da14162 100644 --- a/xen/include/asm-x86/nmi.h +++ b/xen/include/asm-x86/nmi.h @@ -33,5 +33,14 @@ nmi_callback_t *set_nmi_callback(nmi_callback_t *callback); void unset_nmi_callback(void); DECLARE_PER_CPU(unsigned int, nmi_count); - + +/** + * trigger_nmi_continuation + * + * Schedule continuation to be started in interrupt context after NMI handling. + */ +void trigger_nmi_continuation(void); + +/* Check for NMI continuation pending. */ +bool nmi_check_continuation(void); #endif /* ASM_NMI_H */ -- generated by git-patchbot for /home/xen/git/xen.git#master
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |