[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Clean up event-channel notification code in Xen.



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 9d0120a5aa452049ae78488fb990c31a8b973fe8
# Parent  37ac3cf335066dfe0ab67809ac2809fede081cea
Clean up event-channel notification code in Xen.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r 37ac3cf33506 -r 9d0120a5aa45 xen/arch/ia64/vcpu.c
--- a/xen/arch/ia64/vcpu.c      Thu Aug 11 16:23:54 2005
+++ b/xen/arch/ia64/vcpu.c      Thu Aug 11 16:48:36 2005
@@ -587,6 +587,14 @@
        set_bit(vector,PSCBX(vcpu,irr));
        PSCB(vcpu,pending_interruption) = 1;
     }
+
+    /* Keir: I think you should unblock when an interrupt is pending. */
+    {
+        int running = test_bit(_VCPUF_running, &vcpu->vcpu_flags);
+        vcpu_unblock(vcpu);
+        if ( running )
+            smp_send_event_check_cpu(vcpu->processor);
+    }
 }
 
 void early_tick(VCPU *vcpu)
diff -r 37ac3cf33506 -r 9d0120a5aa45 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Thu Aug 11 16:23:54 2005
+++ b/xen/arch/x86/mm.c Thu Aug 11 16:48:36 2005
@@ -95,6 +95,7 @@
 #include <xen/irq.h>
 #include <xen/softirq.h>
 #include <xen/domain_page.h>
+#include <xen/event.h>
 #include <asm/shadow.h>
 #include <asm/page.h>
 #include <asm/flushtlb.h>
diff -r 37ac3cf33506 -r 9d0120a5aa45 xen/include/asm-x86/event.h
--- a/xen/include/asm-x86/event.h       Thu Aug 11 16:23:54 2005
+++ b/xen/include/asm-x86/event.h       Thu Aug 11 16:48:36 2005
@@ -11,6 +11,19 @@
 
 static inline void evtchn_notify(struct vcpu *v)
 {
+    /*
+     * NB1. 'vcpu_flags' and 'processor' must be checked /after/ update of
+     * pending flag. These values may fluctuate (after all, we hold no
+     * locks) but the key insight is that each change will cause
+     * evtchn_upcall_pending to be polled.
+     * 
+     * NB2. We save VCPUF_running across the unblock to avoid a needless
+     * IPI for domains that we IPI'd to unblock.
+     */
+    int running = test_bit(_VCPUF_running, &v->vcpu_flags);
+    vcpu_unblock(v);
+    if ( running )
+        smp_send_event_check_cpu(v->processor);
 }
 
 #endif
diff -r 37ac3cf33506 -r 9d0120a5aa45 xen/include/xen/event.h
--- a/xen/include/xen/event.h   Thu Aug 11 16:23:54 2005
+++ b/xen/include/xen/event.h   Thu Aug 11 16:48:36 2005
@@ -26,30 +26,14 @@
 {
     struct domain *d = v->domain;
     shared_info_t *s = d->shared_info;
-    int            running;
 
-    /* These three operations must happen in strict order. */
+    /* These four operations must happen in strict order. */
     if ( !test_and_set_bit(port,    &s->evtchn_pending[0]) &&
          !test_bit        (port,    &s->evtchn_mask[0])    &&
-         !test_and_set_bit(port>>5, &v->vcpu_info->evtchn_pending_sel) )
+         !test_and_set_bit(port>>5, &v->vcpu_info->evtchn_pending_sel) &&
+         !test_and_set_bit(0,       &v->vcpu_info->evtchn_upcall_pending) )
     {
-        /* The VCPU pending flag must be set /after/ update to evtchn-pend. */
-        set_bit(0, &v->vcpu_info->evtchn_upcall_pending);
         evtchn_notify(v);
-
-        /*
-         * NB1. 'vcpu_flags' and 'processor' must be checked /after/ update of
-         * pending flag. These values may fluctuate (after all, we hold no
-         * locks) but the key insight is that each change will cause
-         * evtchn_upcall_pending to be polled.
-         * 
-         * NB2. We save VCPUF_running across the unblock to avoid a needless
-         * IPI for domains that we IPI'd to unblock.
-         */
-        running = test_bit(_VCPUF_running, &v->vcpu_flags);
-        vcpu_unblock(v);
-        if ( running )
-            smp_send_event_check_cpu(v->processor);
     }
 }
 
@@ -73,8 +57,9 @@
  */
 extern void send_guest_pirq(struct domain *d, int pirq);
 
-#define event_pending(_d)                                     \
-    ((_d)->vcpu_info->evtchn_upcall_pending && \
-     !(_d)->vcpu_info->evtchn_upcall_mask)
+/* Note: Bitwise operations result in fast code with no branches. */
+#define event_pending(v)                        \
+    ((v)->vcpu_info->evtchn_upcall_pending &    \
+     ~(v)->vcpu_info->evtchn_upcall_mask)
 
 #endif /* __XEN_EVENT_H__ */
diff -r 37ac3cf33506 -r 9d0120a5aa45 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Thu Aug 11 16:23:54 2005
+++ b/xen/include/xen/sched.h   Thu Aug 11 16:48:36 2005
@@ -297,10 +297,9 @@
         (unsigned long)(_a1), (unsigned long)(_a2), (unsigned long)(_a3), \
         (unsigned long)(_a4), (unsigned long)(_a5), (unsigned long)(_a6))
 
-#define hypercall_preempt_check() (unlikely(            \
-        softirq_pending(smp_processor_id()) |           \
-        (!!current->vcpu_info->evtchn_upcall_pending &  \
-          !current->vcpu_info->evtchn_upcall_mask)      \
+#define hypercall_preempt_check() (unlikely(    \
+        softirq_pending(smp_processor_id()) |   \
+        event_pending(current)                  \
     ))
 
 /* This domain_hash and domain_list are protected by the domlist_lock. */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.