[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [LINUX] Disallow nested event delivery.



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1166612996 0
# Node ID 3a28be71b667a336c7589cbb7056841f9e42df6a
# Parent  516e4faac066437af4b41014da831d2ad8ae0493
[LINUX] Disallow nested event delivery.

This eliminates the risk of overflowing the kernel stack and is a
reasonable policy given that we have no concept of priorities among
event sources.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 linux-2.6-xen-sparse/drivers/xen/core/evtchn.c |   51 +++++++++++++++----------
 1 files changed, 32 insertions(+), 19 deletions(-)

diff -r 516e4faac066 -r 3a28be71b667 
linux-2.6-xen-sparse/drivers/xen/core/evtchn.c
--- a/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c    Wed Dec 20 10:41:33 
2006 +0000
+++ b/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c    Wed Dec 20 11:09:56 
2006 +0000
@@ -208,38 +208,51 @@ void force_evtchn_callback(void)
 /* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
 EXPORT_SYMBOL(force_evtchn_callback);
 
+static DEFINE_PER_CPU(unsigned int, upcall_count) = { 0 };
+
 /* NB. Interrupts are disabled on entry. */
 asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
 {
        unsigned long  l1, l2;
-       unsigned int   l1i, l2i, port;
+       unsigned int   l1i, l2i, port, count;
        int            irq, cpu = smp_processor_id();
        shared_info_t *s = HYPERVISOR_shared_info;
        vcpu_info_t   *vcpu_info = &s->vcpu_info[cpu];
 
-       vcpu_info->evtchn_upcall_pending = 0;
+       do {
+               /* Avoid a callback storm when we reenable delivery. */
+               vcpu_info->evtchn_upcall_pending = 0;
+
+               /* Nested invocations bail immediately. */
+               if (unlikely(per_cpu(upcall_count, cpu)++))
+                       return;
 
 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
-       /* Clear master pending flag /before/ clearing selector flag. */
-       rmb();
+               /* Clear master flag /before/ clearing selector flag. */
+               rmb();
 #endif
-       l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
-       while (l1 != 0) {
-               l1i = __ffs(l1);
-               l1 &= ~(1UL << l1i);
-
-               while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
-                       l2i = __ffs(l2);
-
-                       port = (l1i * BITS_PER_LONG) + l2i;
-                       if ((irq = evtchn_to_irq[port]) != -1)
-                               do_IRQ(irq, regs);
-                       else {
-                               exit_idle();
-                               evtchn_device_upcall(port);
+               l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
+               while (l1 != 0) {
+                       l1i = __ffs(l1);
+                       l1 &= ~(1UL << l1i);
+
+                       while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
+                               l2i = __ffs(l2);
+
+                               port = (l1i * BITS_PER_LONG) + l2i;
+                               if ((irq = evtchn_to_irq[port]) != -1)
+                                       do_IRQ(irq, regs);
+                               else {
+                                       exit_idle();
+                                       evtchn_device_upcall(port);
+                               }
                        }
                }
-       }
+
+               /* If there were nested callbacks then we have more to do. */
+               count = per_cpu(upcall_count, cpu);
+               per_cpu(upcall_count, cpu) = 0;
+       } while (unlikely(count != 1));
 }
 
 static int find_unbound_irq(void)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.