[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86, hvm: Move return-to-guest timer and interrupt cranking logic



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1224758459 -3600
# Node ID 4941c5a1459839c9923f7dafe6fe7705f90ca436
# Parent  50fc79012db7b393e67cf88fda026f67990623cf
x86, hvm: Move return-to-guest timer and interrupt cranking logic
outside of IRQ-safe context. This allows us to safely take
non-IRQ-safe spinlocks.

The drawback is that {vmx,svm}_intr_assist() now races new event
notifications delivered by IRQ or IPI. We close down this race by
having vcpu_kick() send a dummy softirq -- this gets picked up in
IRQ-sage context and will cause retry of *_intr_assist(). We avoid
delivering the softirq where possible by avoiding it when we are
running in the non-IRQ context of the VCPU to be kicked.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/domain.c         |   48 ++++++++++++++++++++++++++++++++++++++++++
 xen/arch/x86/hvm/svm/entry.S  |    3 +-
 xen/arch/x86/hvm/vmx/entry.S  |    6 +++--
 xen/include/asm-x86/event.h   |   32 +---------------------------
 xen/include/asm-x86/softirq.h |    3 +-
 5 files changed, 58 insertions(+), 34 deletions(-)

diff -r 50fc79012db7 -r 4941c5a14598 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Thu Oct 23 11:20:44 2008 +0100
+++ b/xen/arch/x86/domain.c     Thu Oct 23 11:40:59 2008 +0100
@@ -1892,6 +1892,54 @@ void domain_cpuid(
     *eax = *ebx = *ecx = *edx = 0;
 }
 
+void vcpu_kick(struct vcpu *v)
+{
+    /*
+     * NB1. 'pause_flags' and 'processor' must be checked /after/ update of
+     * pending flag. These values may fluctuate (after all, we hold no
+     * locks) but the key insight is that each change will cause
+     * evtchn_upcall_pending to be polled.
+     * 
+     * NB2. We save the running flag across the unblock to avoid a needless
+     * IPI for domains that we IPI'd to unblock.
+     */
+    bool_t running = v->is_running;
+    vcpu_unblock(v);
+    if ( running && (in_irq() || (v != current)) )
+        cpu_raise_softirq(v->processor, VCPU_KICK_SOFTIRQ);
+}
+
+void vcpu_mark_events_pending(struct vcpu *v)
+{
+    int already_pending = test_and_set_bit(
+        0, (unsigned long *)&vcpu_info(v, evtchn_upcall_pending));
+
+    if ( already_pending )
+        return;
+
+    if ( is_hvm_vcpu(v) )
+        hvm_assert_evtchn_irq(v);
+    else
+        vcpu_kick(v);
+}
+
+static void vcpu_kick_softirq(void)
+{
+    /*
+     * Nothing to do here: we merely prevent notifiers from racing with checks
+     * executed on return to guest context with interrupts enabled. See, for
+     * example, xxx_intr_assist() executed on return to HVM guest context.
+     */
+}
+
+static int __init init_vcpu_kick_softirq(void)
+{
+    open_softirq(VCPU_KICK_SOFTIRQ, vcpu_kick_softirq);
+    return 0;
+}
+__initcall(init_vcpu_kick_softirq);
+
+
 /*
  * Local variables:
  * mode: C
diff -r 50fc79012db7 -r 4941c5a14598 xen/arch/x86/hvm/svm/entry.S
--- a/xen/arch/x86/hvm/svm/entry.S      Thu Oct 23 11:20:44 2008 +0100
+++ b/xen/arch/x86/hvm/svm/entry.S      Thu Oct 23 11:40:59 2008 +0100
@@ -57,6 +57,8 @@
 #endif
 
 ENTRY(svm_asm_do_resume)
+        call svm_intr_assist
+
         get_current(bx)
         CLGI
 
@@ -67,7 +69,6 @@ ENTRY(svm_asm_do_resume)
         jnz  .Lsvm_process_softirqs
 
         call svm_asid_handle_vmrun
-        call svm_intr_assist
 
         cmpb $0,addr_of(tb_init_done)
         jnz  .Lsvm_trace
diff -r 50fc79012db7 -r 4941c5a14598 xen/arch/x86/hvm/vmx/entry.S
--- a/xen/arch/x86/hvm/vmx/entry.S      Thu Oct 23 11:20:44 2008 +0100
+++ b/xen/arch/x86/hvm/vmx/entry.S      Thu Oct 23 11:40:59 2008 +0100
@@ -122,6 +122,8 @@ vmx_asm_vmexit_handler:
 
 .globl vmx_asm_do_vmentry
 vmx_asm_do_vmentry:
+        call vmx_intr_assist
+
         get_current(bx)
         cli
 
@@ -130,8 +132,6 @@ vmx_asm_do_vmentry:
         lea  addr_of(irq_stat),r(dx)
         cmpl $0,(r(dx),r(ax),1)
         jnz  .Lvmx_process_softirqs
-
-        call vmx_intr_assist
 
         testb $0xff,VCPU_vmx_emul(r(bx))
         jnz  .Lvmx_goto_realmode
@@ -179,11 +179,13 @@ vmx_asm_do_vmentry:
 
 /*.Lvmx_resume:*/
         VMRESUME
+        sti
         call vm_resume_fail
         ud2
 
 .Lvmx_launch:
         VMLAUNCH
+        sti
         call vm_launch_fail
         ud2
 
diff -r 50fc79012db7 -r 4941c5a14598 xen/include/asm-x86/event.h
--- a/xen/include/asm-x86/event.h       Thu Oct 23 11:20:44 2008 +0100
+++ b/xen/include/asm-x86/event.h       Thu Oct 23 11:40:59 2008 +0100
@@ -11,36 +11,8 @@
 
 #include <xen/shared.h>
 
-static inline void vcpu_kick(struct vcpu *v)
-{
-    /*
-     * NB1. 'pause_flags' and 'processor' must be checked /after/ update of
-     * pending flag. These values may fluctuate (after all, we hold no
-     * locks) but the key insight is that each change will cause
-     * evtchn_upcall_pending to be polled.
-     * 
-     * NB2. We save the running flag across the unblock to avoid a needless
-     * IPI for domains that we IPI'd to unblock.
-     */
-    int running = v->is_running;
-    vcpu_unblock(v);
-    if ( running )
-        smp_send_event_check_cpu(v->processor);
-}
-
-static inline void vcpu_mark_events_pending(struct vcpu *v)
-{
-    int already_pending = test_and_set_bit(
-        0, (unsigned long *)&vcpu_info(v, evtchn_upcall_pending));
-
-    if ( already_pending )
-        return;
-
-    if ( is_hvm_vcpu(v) )
-        hvm_assert_evtchn_irq(v);
-    else
-        vcpu_kick(v);
-}
+void vcpu_kick(struct vcpu *v);
+void vcpu_mark_events_pending(struct vcpu *v);
 
 int hvm_local_events_need_delivery(struct vcpu *v);
 static inline int local_events_need_delivery(void)
diff -r 50fc79012db7 -r 4941c5a14598 xen/include/asm-x86/softirq.h
--- a/xen/include/asm-x86/softirq.h     Thu Oct 23 11:20:44 2008 +0100
+++ b/xen/include/asm-x86/softirq.h     Thu Oct 23 11:40:59 2008 +0100
@@ -3,7 +3,8 @@
 
 #define NMI_MCE_SOFTIRQ        (NR_COMMON_SOFTIRQS + 0)
 #define TIME_CALIBRATE_SOFTIRQ (NR_COMMON_SOFTIRQS + 1)
+#define VCPU_KICK_SOFTIRQ      (NR_COMMON_SOFTIRQS + 2)
 
-#define NR_ARCH_SOFTIRQS       2
+#define NR_ARCH_SOFTIRQS       3
 
 #endif /* __ASM_SOFTIRQ_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.