[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen stable-4.1] x86: defer processing events on the NMI exit path
commit d8f9d5b8b98de7d035c5064fe99a44f0a383842f Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Tue Mar 12 16:30:04 2013 +0100 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Tue Mar 12 16:30:04 2013 +0100 x86: defer processing events on the NMI exit path Otherwise, we may end up in the scheduler, keeping NMIs masked for a possibly unbounded period of time (until whenever the next IRET gets executed). Enforce timely event processing by sending a self IPI. Of course it's open for discussion whether to always use the straight exit path from handle_ist_exception. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Acked-by: Keir Fraser <keir@xxxxxxx> master changeset: d463b005bbd6475ed930a302821efe239e1b2cf9 master date: 2013-03-04 10:19:34 +0100 --- xen/arch/x86/x86_32/entry.S | 26 +++++++++++++++++++++----- xen/arch/x86/x86_64/compat/entry.S | 2 +- xen/arch/x86/x86_64/entry.S | 27 ++++++++++++++++++++++----- 3 files changed, 44 insertions(+), 11 deletions(-) diff --git a/xen/arch/x86/x86_32/entry.S b/xen/arch/x86/x86_32/entry.S index a7ce078..7941ee1 100644 --- a/xen/arch/x86/x86_32/entry.S +++ b/xen/arch/x86/x86_32/entry.S @@ -60,6 +60,7 @@ #include <asm/apicdef.h> #include <asm/page.h> #include <public/xen.h> +#include <irq_vectors.h> ALIGN restore_all_guest: @@ -564,6 +565,8 @@ ENTRY(early_page_fault) jmp restore_all_xen .popsection +ENTRY(nmi) + pushl $TRAP_nmi<<16 handle_nmi_mce: #ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL # NMI/MCE entry protocol is incompatible with guest kernel in ring 0. @@ -584,7 +587,24 @@ handle_nmi_mce: * cases we have put guest DS/ES on the guest stack frame, which will * be detected by SAVE_ALL(), or we have rolled back restore_guest. */ - jmp ret_from_intr + cmpb $TRAP_nmi,UREGS_entry_vector(%esp) + jne ret_from_intr + /* We want to get straight to the IRET on the NMI exit path. */ + GET_CURRENT(%ebx) + movl UREGS_eflags(%esp),%eax + movb UREGS_cs(%esp),%al + testl $(3|X86_EFLAGS_VM),%eax + jz restore_all_xen + /* Send an IPI to ourselves to cover for the lack of event checking. */ + movl VCPU_processor(%ebx),%eax + shll $IRQSTAT_shift,%eax + cmpl $0,irq_stat(%eax) + je restore_all_guest + pushl $EVENT_CHECK_VECTOR + call send_IPI_self + addl $4,%esp + jmp restore_all_guest + .Lnmi_mce_xen: /* Check the outer (guest) context for %ds/%es state validity. */ GET_CPUINFO_FIELD(CPUINFO_guest_cpu_user_regs,%ebx) @@ -616,10 +636,6 @@ handle_nmi_mce: jmp .Lnmi_mce_common #endif /* !CONFIG_X86_SUPERVISOR_MODE_KERNEL */ -ENTRY(nmi) - pushl $TRAP_nmi<<16 - jmp handle_nmi_mce - ENTRY(machine_check) pushl $TRAP_machine_check<<16 jmp handle_nmi_mce diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S index e7ac858..5c9b2e5 100644 --- a/xen/arch/x86/x86_64/compat/entry.S +++ b/xen/arch/x86/x86_64/compat/entry.S @@ -172,7 +172,7 @@ compat_bad_hypercall: jmp compat_test_all_events /* %rbx: struct vcpu, interrupts disabled */ -compat_restore_all_guest: +ENTRY(compat_restore_all_guest) ASSERT_INTERRUPTS_DISABLED RESTORE_ALL addq $8,%rsp diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S index bc75041..2b8c99a 100644 --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -11,6 +11,7 @@ #include <asm/apicdef.h> #include <asm/page.h> #include <public/xen.h> +#include <irq_vectors.h> ALIGN /* %rbx: struct vcpu */ @@ -622,6 +623,9 @@ ENTRY(early_page_fault) jmp restore_all_xen .popsection +ENTRY(nmi) + pushq $0 + movl $TRAP_nmi,4(%rsp) handle_ist_exception: SAVE_ALL testb $3,UREGS_cs(%rsp) @@ -636,12 +640,25 @@ handle_ist_exception: movl UREGS_entry_vector(%rsp),%eax leaq exception_table(%rip),%rdx callq *(%rdx,%rax,8) - jmp ret_from_intr + cmpb $TRAP_nmi,UREGS_entry_vector(%rsp) + jne ret_from_intr -ENTRY(nmi) - pushq $0 - movl $TRAP_nmi,4(%rsp) - jmp handle_ist_exception + /* We want to get straight to the IRET on the NMI exit path. */ + testb $3,UREGS_cs(%rsp) + jz restore_all_xen + GET_CURRENT(%rbx) + /* Send an IPI to ourselves to cover for the lack of event checking. */ + movl VCPU_processor(%rbx),%eax + shll $IRQSTAT_shift,%eax + leaq irq_stat(%rip),%rcx + cmpl $0,(%rcx,%rax,1) + je 1f + movl $EVENT_CHECK_VECTOR,%edi + call send_IPI_self +1: movq VCPU_domain(%rbx),%rax + cmpb $0,DOMAIN_is_32bit_pv(%rax) + je restore_all_guest + jmp compat_restore_all_guest ENTRY(machine_check) pushq $0 -- generated by git-patchbot for /home/xen/git/xen.git#stable-4.1 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |