[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [XEN] Make per-cpu pending EOI interrupt info explicitly PER_CPU.



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID ce619ad2d13994006779df1a18c625f6daf23ecd
# Parent  7e9699af7e1296c911a0102693872b59b0508e2c
[XEN] Make per-cpu pending EOI interrupt info explicitly PER_CPU.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/irq.c |   61 ++++++++++++++++++++++++++++-------------------------
 1 files changed, 33 insertions(+), 28 deletions(-)

diff -r 7e9699af7e12 -r ce619ad2d139 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c        Tue Aug 08 14:18:57 2006 +0100
+++ b/xen/arch/x86/irq.c        Tue Aug 08 14:29:16 2006 +0100
@@ -160,11 +160,12 @@ typedef struct {
  * Stack of interrupts awaiting EOI on each CPU. These must be popped in
  * order, as only the current highest-priority pending irq can be EOIed.
  */
-static struct {
+struct pending_eoi {
     u8 vector; /* Vector awaiting EOI */
     u8 ready;  /* Ready for EOI now?  */
-} pending_eoi[NR_CPUS][NR_VECTORS] __cacheline_aligned;
-#define pending_eoi_sp(cpu) (pending_eoi[cpu][NR_VECTORS-1].vector)
+};
+static DEFINE_PER_CPU(struct pending_eoi, pending_eoi[NR_VECTORS]);
+#define pending_eoi_sp(p) ((p)[NR_VECTORS-1].vector)
 
 static void __do_IRQ_guest(int vector)
 {
@@ -172,7 +173,8 @@ static void __do_IRQ_guest(int vector)
     irq_desc_t         *desc = &irq_desc[vector];
     irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
     struct domain      *d;
-    int                 i, sp, cpu = smp_processor_id();
+    int                 i, sp;
+    struct pending_eoi *peoi = this_cpu(pending_eoi);
 
     if ( unlikely(action->nr_guests == 0) )
     {
@@ -185,13 +187,13 @@ static void __do_IRQ_guest(int vector)
 
     if ( action->ack_type == ACKTYPE_EOI )
     {
-        sp = pending_eoi_sp(cpu);
-        ASSERT((sp == 0) || (pending_eoi[cpu][sp-1].vector < vector));
+        sp = pending_eoi_sp(peoi);
+        ASSERT((sp == 0) || (peoi[sp-1].vector < vector));
         ASSERT(sp < (NR_VECTORS-1));
-        pending_eoi[cpu][sp].vector = vector;
-        pending_eoi[cpu][sp].ready = 0;
-        pending_eoi_sp(cpu) = sp+1;
-        cpu_set(cpu, action->cpu_eoi_map);
+        peoi[sp].vector = vector;
+        peoi[sp].ready = 0;
+        pending_eoi_sp(peoi) = sp+1;
+        cpu_set(smp_processor_id(), action->cpu_eoi_map);
     }
 
     for ( i = 0; i < action->nr_guests; i++ )
@@ -207,43 +209,45 @@ static void __do_IRQ_guest(int vector)
 /* Flush all ready EOIs from the top of this CPU's pending-EOI stack. */
 static void flush_ready_eoi(void *unused)
 {
-    irq_desc_t *desc;
-    int         vector, sp, cpu = smp_processor_id();
+    struct pending_eoi *peoi = this_cpu(pending_eoi);
+    irq_desc_t         *desc;
+    int                 vector, sp;
 
     ASSERT(!local_irq_is_enabled());
 
-    sp = pending_eoi_sp(cpu);
-
-    while ( (--sp >= 0) && pending_eoi[cpu][sp].ready )
-    {
-        vector = pending_eoi[cpu][sp].vector;
+    sp = pending_eoi_sp(peoi);
+
+    while ( (--sp >= 0) && peoi[sp].ready )
+    {
+        vector = peoi[sp].vector;
         desc = &irq_desc[vector];
         spin_lock(&desc->lock);
         desc->handler->end(vector);
         spin_unlock(&desc->lock);
     }
 
-    pending_eoi_sp(cpu) = sp+1;
+    pending_eoi_sp(peoi) = sp+1;
 }
 
 static void __set_eoi_ready(irq_desc_t *desc)
 {
     irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
-    int                 vector, sp, cpu = smp_processor_id();
+    struct pending_eoi *peoi = this_cpu(pending_eoi);
+    int                 vector, sp;
 
     vector = desc - irq_desc;
 
     if ( !(desc->status & IRQ_GUEST) ||
          (action->in_flight != 0) ||
-         !cpu_test_and_clear(cpu, action->cpu_eoi_map) )
+         !cpu_test_and_clear(smp_processor_id(), action->cpu_eoi_map) )
         return;
 
-    sp = pending_eoi_sp(cpu);
+    sp = pending_eoi_sp(peoi);
     do {
         ASSERT(sp > 0);
-    } while ( pending_eoi[cpu][--sp].vector != vector );
-    ASSERT(!pending_eoi[cpu][sp].ready);
-    pending_eoi[cpu][sp].ready = 1;
+    } while ( peoi[--sp].vector != vector );
+    ASSERT(!peoi[sp].ready);
+    peoi[sp].ready = 1;
 }
 
 /* Mark specified IRQ as ready-for-EOI (if it really is) and attempt to EOI. */
@@ -269,16 +273,17 @@ static void flush_all_pending_eoi(void *
 {
     irq_desc_t         *desc;
     irq_guest_action_t *action;
-    int                 i, vector, sp, cpu = smp_processor_id();
+    struct pending_eoi *peoi = this_cpu(pending_eoi);
+    int                 i, vector, sp;
 
     ASSERT(!local_irq_is_enabled());
 
-    sp = pending_eoi_sp(cpu);
+    sp = pending_eoi_sp(peoi);
     while ( --sp >= 0 )
     {
-        if ( pending_eoi[cpu][sp].ready )
+        if ( peoi[sp].ready )
             continue;
-        vector = pending_eoi[cpu][sp].vector;
+        vector = peoi[sp].vector;
         desc = &irq_desc[vector];
         spin_lock(&desc->lock);
         action = (irq_guest_action_t *)desc->action;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.