[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] pass struct irq_desc * to all other IRQ accessors



# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1316301957 -3600
# Node ID 2dab09bcec8136a5962e71beed1f5dc06275a6b1
# Parent  5f984c6735c96328f018d87b7e1b2025c3847d70
pass struct irq_desc * to all other IRQ accessors

This is again because the descriptor is generally more useful (with
the IRQ number being accessible in it if necessary) and going forward
will hopefully allow to remove all direct accesses to the IRQ
descriptor array, in turn making it possible to make this some other,
more efficient data structure.

This additionally makes the .end() accessor optional, noting that in a
number of cases the functions were empty.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---


diff -r 5f984c6735c9 -r 2dab09bcec81 xen/arch/ia64/linux-xen/iosapic.c
--- a/xen/arch/ia64/linux-xen/iosapic.c Sun Sep 18 00:24:37 2011 +0100
+++ b/xen/arch/ia64/linux-xen/iosapic.c Sun Sep 18 00:25:57 2011 +0100
@@ -276,7 +276,7 @@
 }
 
 static void
-nop (unsigned int vector)
+nop (struct irq_desc *desc)
 {
        /* do nothing... */
 }
@@ -300,13 +300,13 @@
 }
 
 static void
-mask_irq (unsigned int irq)
+mask_irq (struct irq_desc *desc)
 {
        unsigned long flags;
        char __iomem *addr;
        u32 low32;
        int rte_index;
-       ia64_vector vec = irq_to_vector(irq);
+       ia64_vector vec = irq_to_vector(desc->irq);
        struct iosapic_rte_info *rte;
 
        if (list_empty(&iosapic_intr_info[vec].rtes))
@@ -326,13 +326,13 @@
 }
 
 static void
-unmask_irq (unsigned int irq)
+unmask_irq (struct irq_desc *desc)
 {
        unsigned long flags;
        char __iomem *addr;
        u32 low32;
        int rte_index;
-       ia64_vector vec = irq_to_vector(irq);
+       ia64_vector vec = irq_to_vector(desc->irq);
        struct iosapic_rte_info *rte;
 
        if (list_empty(&iosapic_intr_info[vec].rtes))
@@ -408,19 +408,19 @@
  */
 
 static unsigned int
-iosapic_startup_level_irq (unsigned int irq)
+iosapic_startup_level_irq (struct irq_desc *desc)
 {
-       unmask_irq(irq);
+       unmask_irq(desc);
        return 0;
 }
 
 static void
-iosapic_end_level_irq (unsigned int irq)
+iosapic_end_level_irq (struct irq_desc *desc)
 {
-       ia64_vector vec = irq_to_vector(irq);
+       ia64_vector vec = irq_to_vector(desc->irq);
        struct iosapic_rte_info *rte;
 
-       move_irq(irq);
+       move_irq(desc->irq);
        list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list)
                iosapic_eoi(rte->addr, vec);
 }
diff -r 5f984c6735c9 -r 2dab09bcec81 xen/arch/x86/hpet.c
--- a/xen/arch/x86/hpet.c       Sun Sep 18 00:24:37 2011 +0100
+++ b/xen/arch/x86/hpet.c       Sun Sep 18 00:25:57 2011 +0100
@@ -49,9 +49,6 @@
 
 DEFINE_PER_CPU(struct hpet_event_channel *, cpu_bc_channel);
 
-static unsigned int *__read_mostly irq_channel;
-#define irq_to_channel(irq)   irq_channel[irq]
-
 unsigned long __read_mostly hpet_address;
 
 /*
@@ -232,26 +229,20 @@
     ch->event_handler(ch);
 }
 
-static void hpet_msi_unmask(unsigned int irq)
+static void hpet_msi_unmask(struct irq_desc *desc)
 {
     u32 cfg;
-    unsigned int ch_idx = irq_to_channel(irq);
-    struct hpet_event_channel *ch = hpet_events + ch_idx;
-
-    BUG_ON(ch_idx >= num_hpets_used);
+    struct hpet_event_channel *ch = desc->action->dev_id;
 
     cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
     cfg |= HPET_TN_FSB;
     hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
 }
 
-static void hpet_msi_mask(unsigned int irq)
+static void hpet_msi_mask(struct irq_desc *desc)
 {
     u32 cfg;
-    unsigned int ch_idx = irq_to_channel(irq);
-    struct hpet_event_channel *ch = hpet_events + ch_idx;
-
-    BUG_ON(ch_idx >= num_hpets_used);
+    struct hpet_event_channel *ch = desc->action->dev_id;
 
     cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
     cfg &= ~HPET_TN_FSB;
@@ -271,30 +262,21 @@
     msg->address_hi = 0;
 }
 
-static unsigned int hpet_msi_startup(unsigned int irq)
+static unsigned int hpet_msi_startup(struct irq_desc *desc)
 {
-    hpet_msi_unmask(irq);
+    hpet_msi_unmask(desc);
     return 0;
 }
 
-static void hpet_msi_shutdown(unsigned int irq)
+#define hpet_msi_shutdown hpet_msi_mask
+
+static void hpet_msi_ack(struct irq_desc *desc)
 {
-    hpet_msi_mask(irq);
-}
-
-static void hpet_msi_ack(unsigned int irq)
-{
-    struct irq_desc *desc = irq_to_desc(irq);
-
     irq_complete_move(desc);
-    move_native_irq(irq);
+    move_native_irq(desc);
     ack_APIC_irq();
 }
 
-static void hpet_msi_end(unsigned int irq, u8 vector)
-{
-}
-
 static void hpet_msi_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
 {
     struct msi_msg msg;
@@ -323,7 +305,6 @@
     .enable        = hpet_msi_unmask,
     .disable    = hpet_msi_mask,
     .ack        = hpet_msi_ack,
-    .end        = hpet_msi_end,
     .set_affinity   = hpet_msi_set_affinity,
 };
 
@@ -335,14 +316,13 @@
     hpet_msi_write(desc->action->dev_id, &msg);
 }
 
-static int __init hpet_setup_msi_irq(unsigned int irq)
+static int __init hpet_setup_msi_irq(unsigned int irq, struct 
hpet_event_channel *ch)
 {
     int ret;
     irq_desc_t *desc = irq_to_desc(irq);
 
     desc->handler = &hpet_msi_type;
-    ret = request_irq(irq, hpet_interrupt_handler,
-                      0, "HPET", hpet_events + irq_channel[irq]);
+    ret = request_irq(irq, hpet_interrupt_handler, 0, "HPET", ch);
     if ( ret < 0 )
         return ret;
 
@@ -358,12 +338,9 @@
     if ( (irq = create_irq()) < 0 )
         return irq;
 
-    irq_channel[irq] = idx;
-
-    if ( hpet_setup_msi_irq(irq) )
+    if ( hpet_setup_msi_irq(irq, hpet_events + idx) )
     {
         destroy_irq(irq);
-        irq_channel[irq] = -1;
         return -EINVAL;
     }
 
@@ -511,11 +488,6 @@
     if ( hpet_rate == 0 )
         return;
 
-    irq_channel = xmalloc_array(unsigned int, nr_irqs);
-    BUG_ON(irq_channel == NULL);
-    for ( i = 0; i < nr_irqs; i++ )
-        irq_channel[i] = -1;
-
     cfg = hpet_read32(HPET_CFG);
 
     hpet_fsb_cap_lookup();
@@ -527,9 +499,6 @@
     }
     else
     {
-        xfree(irq_channel);
-        irq_channel = NULL;
-
         hpet_id = hpet_read32(HPET_ID);
         if ( !(hpet_id & HPET_ID_LEGSUP) )
             return;
diff -r 5f984c6735c9 -r 2dab09bcec81 xen/arch/x86/i8259.c
--- a/xen/arch/x86/i8259.c      Sun Sep 18 00:24:37 2011 +0100
+++ b/xen/arch/x86/i8259.c      Sun Sep 18 00:25:57 2011 +0100
@@ -85,18 +85,18 @@
 
 static DEFINE_SPINLOCK(i8259A_lock);
 
-static void mask_and_ack_8259A_irq(unsigned int irq);
+static void mask_and_ack_8259A_irq(struct irq_desc *);
 
-static unsigned int startup_8259A_irq(unsigned int irq)
+static unsigned int startup_8259A_irq(struct irq_desc *desc)
 {
-    enable_8259A_irq(irq);
+    enable_8259A_irq(desc);
     return 0; /* never anything pending */
 }
 
-static void end_8259A_irq(unsigned int irq, u8 vector)
+static void end_8259A_irq(struct irq_desc *desc, u8 vector)
 {
-    if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-        enable_8259A_irq(irq);
+    if (!(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+        enable_8259A_irq(desc);
 }
 
 static struct hw_interrupt_type __read_mostly i8259A_irq_type = {
@@ -133,28 +133,28 @@
  */
 unsigned int __read_mostly io_apic_irqs;
 
-void disable_8259A_irq(unsigned int irq)
+void disable_8259A_irq(struct irq_desc *desc)
 {
-    unsigned int mask = 1 << irq;
+    unsigned int mask = 1 << desc->irq;
     unsigned long flags;
 
     spin_lock_irqsave(&i8259A_lock, flags);
     cached_irq_mask |= mask;
-    if (irq & 8)
+    if (desc->irq & 8)
         outb(cached_A1,0xA1);
     else
         outb(cached_21,0x21);
     spin_unlock_irqrestore(&i8259A_lock, flags);
 }
 
-void enable_8259A_irq(unsigned int irq)
+void enable_8259A_irq(struct irq_desc *desc)
 {
-    unsigned int mask = ~(1 << irq);
+    unsigned int mask = ~(1 << desc->irq);
     unsigned long flags;
 
     spin_lock_irqsave(&i8259A_lock, flags);
     cached_irq_mask &= mask;
-    if (irq & 8)
+    if (desc->irq & 8)
         outb(cached_A1,0xA1);
     else
         outb(cached_21,0x21);
@@ -226,9 +226,9 @@
  * first, _then_ send the EOI, and the order of EOI
  * to the two 8259s is important!
  */
-static void mask_and_ack_8259A_irq(unsigned int irq)
+static void mask_and_ack_8259A_irq(struct irq_desc *desc)
 {
-    unsigned int irqmask = 1 << irq;
+    unsigned int irqmask = 1 << desc->irq;
     unsigned long flags;
 
     spin_lock_irqsave(&i8259A_lock, flags);
@@ -252,15 +252,15 @@
     cached_irq_mask |= irqmask;
 
  handle_real_irq:
-    if (irq & 8) {
+    if (desc->irq & 8) {
         inb(0xA1);              /* DUMMY - (do we need this?) */
         outb(cached_A1,0xA1);
-        outb(0x60+(irq&7),0xA0);/* 'Specific EOI' to slave */
+        outb(0x60 + (desc->irq & 7), 0xA0);/* 'Specific EOI' to slave */
         outb(0x62,0x20);        /* 'Specific EOI' to master-IRQ2 */
     } else {
         inb(0x21);              /* DUMMY - (do we need this?) */
         outb(cached_21,0x21);
-        outb(0x60+irq,0x20);    /* 'Specific EOI' to master */
+        outb(0x60 + desc->irq, 0x20);/* 'Specific EOI' to master */
     }
     spin_unlock_irqrestore(&i8259A_lock, flags);
     return;
@@ -269,7 +269,7 @@
     /*
      * this is the slow path - should happen rarely.
      */
-    if (i8259A_irq_real(irq))
+    if (i8259A_irq_real(desc->irq))
         /*
          * oops, the IRQ _is_ in service according to the
          * 8259A - not spurious, go handle it.
@@ -283,7 +283,7 @@
          * lets ACK and report it. [once per IRQ]
          */
         if (!(spurious_irq_mask & irqmask)) {
-            printk("spurious 8259A interrupt: IRQ%d.\n", irq);
+            printk("spurious 8259A interrupt: IRQ%d.\n", desc->irq);
             spurious_irq_mask |= irqmask;
         }
         /*
diff -r 5f984c6735c9 -r 2dab09bcec81 xen/arch/x86/io_apic.c
--- a/xen/arch/x86/io_apic.c    Sun Sep 18 00:24:37 2011 +0100
+++ b/xen/arch/x86/io_apic.c    Sun Sep 18 00:25:57 2011 +0100
@@ -436,21 +436,21 @@
     __modify_IO_APIC_irq(irq, 0x00008000, 0);
 }
 
-static void mask_IO_APIC_irq (unsigned int irq)
+static void mask_IO_APIC_irq(struct irq_desc *desc)
 {
     unsigned long flags;
 
     spin_lock_irqsave(&ioapic_lock, flags);
-    __mask_IO_APIC_irq(irq);
+    __mask_IO_APIC_irq(desc->irq);
     spin_unlock_irqrestore(&ioapic_lock, flags);
 }
 
-static void unmask_IO_APIC_irq (unsigned int irq)
+static void unmask_IO_APIC_irq(struct irq_desc *desc)
 {
     unsigned long flags;
 
     spin_lock_irqsave(&ioapic_lock, flags);
-    __unmask_IO_APIC_irq(irq);
+    __unmask_IO_APIC_irq(desc->irq);
     spin_unlock_irqrestore(&ioapic_lock, flags);
 }
 
@@ -1145,7 +1145,7 @@
                 ioapic_register_intr(irq, IOAPIC_AUTO);
 
                 if (!apic && platform_legacy_irq(irq))
-                    disable_8259A_irq(irq);
+                    disable_8259A_irq(irq_to_desc(irq));
             }
             cfg = irq_cfg(irq);
             SET_DEST(entry.dest.dest32, entry.dest.logical.logical_dest,
@@ -1170,7 +1170,7 @@
 
     memset(&entry,0,sizeof(entry));
 
-    disable_8259A_irq(0);
+    disable_8259A_irq(irq_to_desc(0));
 
     /* mask LVT0 */
     apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
@@ -1199,7 +1199,7 @@
      */
     ioapic_write_entry(apic, pin, 0, entry);
 
-    enable_8259A_irq(0);
+    enable_8259A_irq(irq_to_desc(0));
 }
 
 static inline void UNEXPECTED_IO_APIC(void)
@@ -1627,18 +1627,18 @@
  * This is not complete - we should be able to fake
  * an edge even if it isn't on the 8259A...
  */
-static unsigned int startup_edge_ioapic_irq(unsigned int irq)
+static unsigned int startup_edge_ioapic_irq(struct irq_desc *desc)
 {
     int was_pending = 0;
     unsigned long flags;
 
     spin_lock_irqsave(&ioapic_lock, flags);
-    if (platform_legacy_irq(irq)) {
-        disable_8259A_irq(irq);
-        if (i8259A_irq_pending(irq))
+    if (platform_legacy_irq(desc->irq)) {
+        disable_8259A_irq(desc);
+        if (i8259A_irq_pending(desc->irq))
             was_pending = 1;
     }
-    __unmask_IO_APIC_irq(irq);
+    __unmask_IO_APIC_irq(desc->irq);
     spin_unlock_irqrestore(&ioapic_lock, flags);
 
     return was_pending;
@@ -1649,16 +1649,14 @@
  * interrupt for real. This prevents IRQ storms from unhandled
  * devices.
  */
-static void ack_edge_ioapic_irq(unsigned int irq)
+static void ack_edge_ioapic_irq(struct irq_desc *desc)
 {
-    struct irq_desc *desc = irq_to_desc(irq);
-    
     irq_complete_move(desc);
-    move_native_irq(irq);
+    move_native_irq(desc);
 
     if ((desc->status & (IRQ_PENDING | IRQ_DISABLED))
         == (IRQ_PENDING | IRQ_DISABLED))
-        mask_IO_APIC_irq(irq);
+        mask_IO_APIC_irq(desc);
     ack_APIC_irq();
 }
 
@@ -1676,9 +1674,9 @@
  * generic IRQ layer and by the fact that an unacked local
  * APIC does not accept IRQs.
  */
-static unsigned int startup_level_ioapic_irq (unsigned int irq)
+static unsigned int startup_level_ioapic_irq(struct irq_desc *desc)
 {
-    unmask_IO_APIC_irq(irq);
+    unmask_IO_APIC_irq(desc);
 
     return 0; /* don't check for pending */
 }
@@ -1726,11 +1724,10 @@
     return 0;
 }
 
-static void mask_and_ack_level_ioapic_irq (unsigned int irq)
+static void mask_and_ack_level_ioapic_irq(struct irq_desc *desc)
 {
     unsigned long v;
     int i;
-    struct irq_desc *desc = irq_to_desc(irq);
 
     irq_complete_move(desc);
 
@@ -1738,7 +1735,7 @@
         return;
 
     if ( !directed_eoi_enabled )
-        mask_IO_APIC_irq(irq);
+        mask_IO_APIC_irq(desc);
 
 /*
  * It appears there is an erratum which affects at least version 0x11
@@ -1759,7 +1756,7 @@
  * operation to prevent an edge-triggered interrupt escaping meanwhile.
  * The idea is from Manfred Spraul.  --macro
  */
-    i = IO_APIC_VECTOR(irq);
+    i = IO_APIC_VECTOR(desc->irq);
 
     v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
 
@@ -1768,19 +1765,19 @@
     if ( directed_eoi_enabled )
         return;
 
-    if ((irq_desc[irq].status & IRQ_MOVE_PENDING) &&
-       !io_apic_level_ack_pending(irq))
+    if ((desc->status & IRQ_MOVE_PENDING) &&
+       !io_apic_level_ack_pending(desc->irq))
         move_masked_irq(desc);
 
     if ( !(v & (1 << (i & 0x1f))) ) {
         spin_lock(&ioapic_lock);
-        __edge_IO_APIC_irq(irq);
-        __level_IO_APIC_irq(irq);
+        __edge_IO_APIC_irq(desc->irq);
+        __level_IO_APIC_irq(desc->irq);
         spin_unlock(&ioapic_lock);
     }
 }
 
-static void end_level_ioapic_irq (unsigned int irq, u8 vector)
+static void end_level_ioapic_irq(struct irq_desc *desc, u8 vector)
 {
     unsigned long v;
     int i;
@@ -1789,23 +1786,21 @@
     {
         if ( directed_eoi_enabled )
         {
-            struct irq_desc *desc = irq_to_desc(irq);
-
             if ( !(desc->status & (IRQ_DISABLED|IRQ_MOVE_PENDING)) )
             {
-                eoi_IO_APIC_irq(irq);
+                eoi_IO_APIC_irq(desc->irq);
                 return;
             }
 
-            mask_IO_APIC_irq(irq);
-            eoi_IO_APIC_irq(irq);
+            mask_IO_APIC_irq(desc);
+            eoi_IO_APIC_irq(desc->irq);
             if ( (desc->status & IRQ_MOVE_PENDING) &&
-                 !io_apic_level_ack_pending(irq) )
+                 !io_apic_level_ack_pending(desc->irq) )
                 move_masked_irq(desc);
         }
 
-        if ( !(irq_desc[irq].status & IRQ_DISABLED) )
-            unmask_IO_APIC_irq(irq);
+        if ( !(desc->status & IRQ_DISABLED) )
+            unmask_IO_APIC_irq(desc);
 
         return;
     }
@@ -1829,7 +1824,7 @@
  * operation to prevent an edge-triggered interrupt escaping meanwhile.
  * The idea is from Manfred Spraul.  --macro
  */
-    i = IO_APIC_VECTOR(irq);
+    i = IO_APIC_VECTOR(desc->irq);
 
     /* Manually EOI the old vector if we are moving to the new */
     if ( vector && i != vector )
@@ -1843,30 +1838,21 @@
 
     ack_APIC_irq();
 
-    if ((irq_desc[irq].status & IRQ_MOVE_PENDING) &&
-            !io_apic_level_ack_pending(irq))
-        move_native_irq(irq);
+    if ( (desc->status & IRQ_MOVE_PENDING) &&
+         !io_apic_level_ack_pending(desc->irq) )
+        move_native_irq(desc);
 
     if (!(v & (1 << (i & 0x1f)))) {
         spin_lock(&ioapic_lock);
-        __mask_IO_APIC_irq(irq);
-        __edge_IO_APIC_irq(irq);
-        __level_IO_APIC_irq(irq);
-        if ( !(irq_desc[irq].status & IRQ_DISABLED) )
-            __unmask_IO_APIC_irq(irq);
+        __mask_IO_APIC_irq(desc->irq);
+        __edge_IO_APIC_irq(desc->irq);
+        __level_IO_APIC_irq(desc->irq);
+        if ( !(desc->status & IRQ_DISABLED) )
+            __unmask_IO_APIC_irq(desc->irq);
         spin_unlock(&ioapic_lock);
     }
 }
 
-static void disable_edge_ioapic_irq(unsigned int irq)
-{
-}
-
-static void end_edge_ioapic_irq(unsigned int irq, u8 vector)
-{
-}
-
-
 /*
  * Level and edge triggered IO-APIC interrupts need different handling,
  * so we use two separate IRQ descriptors. Edge triggered IRQs can be
@@ -1878,11 +1864,10 @@
 static hw_irq_controller ioapic_edge_type = {
     .typename  = "IO-APIC-edge",
     .startup   = startup_edge_ioapic_irq,
-    .shutdown  = disable_edge_ioapic_irq,
+    .shutdown  = irq_shutdown_none,
     .enable    = unmask_IO_APIC_irq,
-    .disable   = disable_edge_ioapic_irq,
+    .disable   = irq_disable_none,
     .ack               = ack_edge_ioapic_irq,
-    .end               = end_edge_ioapic_irq,
     .set_affinity      = set_ioapic_affinity_irq,
 };
 
@@ -1897,26 +1882,24 @@
     .set_affinity      = set_ioapic_affinity_irq,
 };
 
-static unsigned int startup_msi_irq(unsigned int irq)
+static unsigned int startup_msi_irq(struct irq_desc *desc)
 {
-    unmask_msi_irq(irq);
+    unmask_msi_irq(desc);
     return 0;
 }
 
-static void ack_msi_irq(unsigned int irq)
+static void ack_msi_irq(struct irq_desc *desc)
 {
-    struct irq_desc *desc = irq_to_desc(irq);
-
     irq_complete_move(desc);
-    move_native_irq(irq);
+    move_native_irq(desc);
 
     if ( msi_maskable_irq(desc->msi_desc) )
         ack_APIC_irq(); /* ACKTYPE_NONE */
 }
 
-static void end_msi_irq(unsigned int irq, u8 vector)
+static void end_msi_irq(struct irq_desc *desc, u8 vector)
 {
-    if ( !msi_maskable_irq(irq_desc[irq].msi_desc) )
+    if ( !msi_maskable_irq(desc->msi_desc) )
         ack_APIC_irq(); /* ACKTYPE_EOI */
 }
 
@@ -1946,7 +1929,7 @@
             make_8259A_irq(irq);
 }
 
-static void enable_lapic_irq(unsigned int irq)
+static void enable_lapic_irq(struct irq_desc *desc)
 {
     unsigned long v;
 
@@ -1954,7 +1937,7 @@
     apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
 }
 
-static void disable_lapic_irq(unsigned int irq)
+static void disable_lapic_irq(struct irq_desc *desc)
 {
     unsigned long v;
 
@@ -1962,13 +1945,11 @@
     apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
 }
 
-static void ack_lapic_irq(unsigned int irq)
+static void ack_lapic_irq(struct irq_desc *desc)
 {
     ack_APIC_irq();
 }
 
-#define end_lapic_irq end_edge_ioapic_irq
-
 static hw_irq_controller lapic_irq_type = {
     .typename  = "local-APIC-edge",
     .startup   = NULL, /* startup_irq() not used for IRQ0 */
@@ -1976,7 +1957,6 @@
     .enable    = enable_lapic_irq,
     .disable   = disable_lapic_irq,
     .ack               = ack_lapic_irq,
-    .end               = end_lapic_irq,
 };
 
 /*
@@ -2051,7 +2031,7 @@
     /*
      * get/set the timer IRQ vector:
      */
-    disable_8259A_irq(0);
+    disable_8259A_irq(irq_to_desc(0));
     vector = FIRST_HIPRIORITY_VECTOR;
     clear_irq_vector(0);
 
@@ -2071,7 +2051,7 @@
     init_8259A(1);
     /* XEN: Ripped out the legacy missed-tick logic, so below is not needed. */
     /*timer_ack = 1;*/
-    /*enable_8259A_irq(0);*/
+    /*enable_8259A_irq(irq_to_desc(0));*/
 
     pin1  = find_isa_irq_pin(0, mp_INT);
     apic1 = find_isa_irq_apic(0, mp_INT);
@@ -2085,7 +2065,7 @@
         /*
          * Ok, does IRQ0 through the IOAPIC work?
          */
-        unmask_IO_APIC_irq(0);
+        unmask_IO_APIC_irq(irq_to_desc(0));
         if (timer_irq_works()) {
             local_irq_restore(flags);
             return;
@@ -2125,10 +2105,10 @@
 
     printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
 
-    disable_8259A_irq(0);
+    disable_8259A_irq(irq_to_desc(0));
     irq_desc[0].handler = &lapic_irq_type;
     apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector);      /* Fixed mode */
-    enable_8259A_irq(0);
+    enable_8259A_irq(irq_to_desc(0));
 
     if (timer_irq_works()) {
         local_irq_restore(flags);
@@ -2401,7 +2381,7 @@
     ioapic_register_intr(irq, edge_level);
 
     if (!ioapic && platform_legacy_irq(irq))
-        disable_8259A_irq(irq);
+        disable_8259A_irq(desc);
 
     spin_lock_irqsave(&ioapic_lock, flags);
     __ioapic_write_entry(ioapic, pin, 0, entry);
@@ -2410,7 +2390,7 @@
 
     spin_lock(&desc->lock);
     if (!(desc->status & (IRQ_DISABLED | IRQ_GUEST)))
-        desc->handler->startup(irq);
+        desc->handler->startup(desc);
     spin_unlock_irqrestore(&desc->lock, flags);
 
     return 0;
diff -r 5f984c6735c9 -r 2dab09bcec81 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c        Sun Sep 18 00:24:37 2011 +0100
+++ b/xen/arch/x86/irq.c        Sun Sep 18 00:25:57 2011 +0100
@@ -193,7 +193,7 @@
     spin_lock_irqsave(&desc->lock, flags);
     desc->status  |= IRQ_DISABLED;
     desc->status  &= ~IRQ_GUEST;
-    desc->handler->shutdown(irq);
+    desc->handler->shutdown(desc);
     action = desc->action;
     desc->action  = NULL;
     desc->msi_desc = NULL;
@@ -348,25 +348,20 @@
 
 void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs) { }
 
-static void enable_none(unsigned int vector) { }
-static void end_none(unsigned int irq, u8 vector) { }
-static unsigned int startup_none(unsigned int vector) { return 0; }
-static void disable_none(unsigned int vector) { }
-static void ack_none(unsigned int irq)
+void irq_actor_none(struct irq_desc *desc) { }
+unsigned int irq_startup_none(struct irq_desc *desc) { return 0; }
+static void ack_none(struct irq_desc *desc)
 {
-    ack_bad_irq(irq);
+    ack_bad_irq(desc->irq);
 }
 
-#define shutdown_none   disable_none
-
 hw_irq_controller no_irq_type = {
     "none",
-    startup_none,
-    shutdown_none,
-    enable_none,
-    disable_none,
+    irq_startup_none,
+    irq_shutdown_none,
+    irq_enable_none,
+    irq_disable_none,
     ack_none,
-    end_none
 };
 
 static vmask_t *irq_get_used_vector_mask(int irq)
@@ -586,19 +581,17 @@
     cpus_clear(desc->pending_mask);
 }
 
-void move_native_irq(int irq)
+void move_native_irq(struct irq_desc *desc)
 {
-    struct irq_desc *desc = irq_to_desc(irq);
-
     if (likely(!(desc->status & IRQ_MOVE_PENDING)))
         return;
 
     if (unlikely(desc->status & IRQ_DISABLED))
         return;
 
-    desc->handler->disable(irq);
+    desc->handler->disable(desc);
     move_masked_irq(desc);
-    desc->handler->enable(irq);
+    desc->handler->enable(desc);
 }
 
 /* For re-setting irq interrupt affinity for specific irq */
@@ -655,7 +648,7 @@
     desc = irq_to_desc(irq);
 
     spin_lock(&desc->lock);
-    desc->handler->ack(irq);
+    desc->handler->ack(desc);
 
     if ( likely(desc->status & IRQ_GUEST) )
     {
@@ -665,7 +658,7 @@
             s_time_t now = NOW();
             if ( now < (desc->rl_quantum_start + MILLISECS(10)) )
             {
-                desc->handler->disable(irq);
+                desc->handler->disable(desc);
                 /*
                  * If handler->disable doesn't actually mask the interrupt, a 
                  * disabled irq still can fire. This check also avoids 
possible 
@@ -717,7 +710,8 @@
     desc->status &= ~IRQ_INPROGRESS;
 
  out:
-    desc->handler->end(irq, regs->entry_vector);
+    if ( desc->handler->end )
+        desc->handler->end(desc, regs->entry_vector);
  out_no_end:
     spin_unlock(&desc->lock);
     irq_exit();
@@ -734,7 +728,7 @@
     list_for_each_entry_safe ( desc, tmp, &irq_ratelimit_list, rl_link )
     {
         spin_lock(&desc->lock);
-        desc->handler->enable(desc->irq);
+        desc->handler->enable(desc);
         list_del(&desc->rl_link);
         INIT_LIST_HEAD(&desc->rl_link);
         spin_unlock(&desc->lock);
@@ -797,7 +791,7 @@
     action = desc->action;
     desc->action  = NULL;
     desc->status |= IRQ_DISABLED;
-    desc->handler->shutdown(irq);
+    desc->handler->shutdown(desc);
     spin_unlock_irqrestore(&desc->lock,flags);
 
     /* Wait to make sure it's not being used on another CPU */
@@ -824,7 +818,7 @@
 
     desc->action  = new;
     desc->status &= ~IRQ_DISABLED;
-    desc->handler->startup(irq);
+    desc->handler->startup(desc);
 
     spin_unlock_irqrestore(&desc->lock,flags);
 
@@ -915,7 +909,8 @@
     switch ( action->ack_type )
     {
     case ACKTYPE_UNMASK:
-        desc->handler->end(irq, 0);
+        if ( desc->handler->end )
+            desc->handler->end(desc, 0);
         break;
     case ACKTYPE_EOI:
         cpu_eoi_map = action->cpu_eoi_map;
@@ -943,7 +938,8 @@
         /* An interrupt may slip through while freeing an ACKTYPE_EOI irq. */
         ASSERT(action->ack_type == ACKTYPE_EOI);
         ASSERT(desc->status & IRQ_DISABLED);
-        desc->handler->end(irq, vector);
+        if ( desc->handler->end )
+            desc->handler->end(desc, vector);
         return;
     }
 
@@ -1157,7 +1153,8 @@
         ASSERT(irq > 0);
         desc = irq_to_desc(irq);
         spin_lock(&desc->lock);
-        desc->handler->end(irq, peoi[sp].vector);
+        if ( desc->handler->end )
+            desc->handler->end(desc, peoi[sp].vector);
         spin_unlock(&desc->lock);
     }
 
@@ -1235,7 +1232,8 @@
     if ( action->ack_type == ACKTYPE_UNMASK )
     {
         ASSERT(cpus_empty(action->cpu_eoi_map));
-        desc->handler->end(irq, 0);
+        if ( desc->handler->end )
+            desc->handler->end(desc, 0);
         spin_unlock_irq(&desc->lock);
         return;
     }
@@ -1403,7 +1401,7 @@
 
         desc->status |= IRQ_GUEST;
         desc->status &= ~IRQ_DISABLED;
-        desc->handler->startup(irq);
+        desc->handler->startup(desc);
 
         /* Attempt to bind the interrupt target to the correct CPU. */
         cpu_set(v->processor, cpumask);
@@ -1487,8 +1485,9 @@
     {
     case ACKTYPE_UNMASK:
         if ( test_and_clear_bool(pirq->masked) &&
-             (--action->in_flight == 0) )
-            desc->handler->end(irq, 0);
+             (--action->in_flight == 0) &&
+             desc->handler->end )
+                desc->handler->end(desc, 0);
         break;
     case ACKTYPE_EOI:
         /* NB. If #guests == 0 then we clear the eoi_map later on. */
@@ -1517,7 +1516,7 @@
 
     /* Disabling IRQ before releasing the desc_lock avoids an IRQ storm. */
     desc->status |= IRQ_DISABLED;
-    desc->handler->disable(irq);
+    desc->handler->disable(desc);
 
     /*
      * Mark any remaining pending EOIs as ready to flush.
@@ -1539,7 +1538,7 @@
 
     desc->action = NULL;
     desc->status &= ~(IRQ_GUEST|IRQ_INPROGRESS);
-    desc->handler->shutdown(irq);
+    desc->handler->shutdown(desc);
 
     /* Caller frees the old guest descriptor block. */
     return action;
@@ -1959,7 +1958,7 @@
         }
 
         if ( desc->handler->disable )
-            desc->handler->disable(irq);
+            desc->handler->disable(desc);
 
         if ( desc->handler->set_affinity )
             desc->handler->set_affinity(desc, &affinity);
@@ -1967,7 +1966,7 @@
             set_affinity = 0;
 
         if ( desc->handler->enable )
-            desc->handler->enable(irq);
+            desc->handler->enable(desc);
 
         spin_unlock(&desc->lock);
 
diff -r 5f984c6735c9 -r 2dab09bcec81 xen/arch/x86/msi.c
--- a/xen/arch/x86/msi.c        Sun Sep 18 00:24:37 2011 +0100
+++ b/xen/arch/x86/msi.c        Sun Sep 18 00:25:57 2011 +0100
@@ -336,11 +336,11 @@
            || entry->msi_attrib.maskbit;
 }
 
-static void msi_set_mask_bit(unsigned int irq, int flag)
+static void msi_set_mask_bit(struct irq_desc *desc, int flag)
 {
-    struct msi_desc *entry = irq_desc[irq].msi_desc;
+    struct msi_desc *entry = desc->msi_desc;
 
-    ASSERT(spin_is_locked(&irq_desc[irq].lock));
+    ASSERT(spin_is_locked(&desc->lock));
     BUG_ON(!entry || !entry->dev);
     switch (entry->msi_attrib.type) {
     case PCI_CAP_ID_MSI:
@@ -387,14 +387,14 @@
     return -1;
 }
 
-void mask_msi_irq(unsigned int irq)
+void mask_msi_irq(struct irq_desc *desc)
 {
-    msi_set_mask_bit(irq, 1);
+    msi_set_mask_bit(desc, 1);
 }
 
-void unmask_msi_irq(unsigned int irq)
+void unmask_msi_irq(struct irq_desc *desc)
 {
-    msi_set_mask_bit(irq, 0);
+    msi_set_mask_bit(desc, 0);
 }
 
 static struct msi_desc* alloc_msi_entry(void)
@@ -978,7 +978,7 @@
 
         write_msi_msg(entry, &entry->msg);
 
-        msi_set_mask_bit(irq, entry->msi_attrib.masked);
+        msi_set_mask_bit(desc, entry->msi_attrib.masked);
 
         if ( entry->msi_attrib.type == PCI_CAP_ID_MSI )
             msi_set_enable(pdev, 1);
diff -r 5f984c6735c9 -r 2dab09bcec81 xen/drivers/passthrough/amd/iommu_init.c
--- a/xen/drivers/passthrough/amd/iommu_init.c  Sun Sep 18 00:24:37 2011 +0100
+++ b/xen/drivers/passthrough/amd/iommu_init.c  Sun Sep 18 00:25:57 2011 +0100
@@ -29,7 +29,6 @@
 #include <asm-x86/fixmap.h>
 #include <mach_apic.h>
 
-static struct amd_iommu **__read_mostly irq_to_iommu;
 static int __initdata nr_amd_iommus;
 
 unsigned short ivrs_bdf_entries;
@@ -403,10 +402,10 @@
         iommu->msi_cap + PCI_MSI_FLAGS, control);
 }
 
-static void iommu_msi_unmask(unsigned int irq)
+static void iommu_msi_unmask(struct irq_desc *desc)
 {
     unsigned long flags;
-    struct amd_iommu *iommu = irq_to_iommu[irq];
+    struct amd_iommu *iommu = desc->action->dev_id;
 
     /* FIXME: do not support mask bits at the moment */
     if ( iommu->maskbit )
@@ -417,11 +416,10 @@
     spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
-static void iommu_msi_mask(unsigned int irq)
+static void iommu_msi_mask(struct irq_desc *desc)
 {
     unsigned long flags;
-    struct amd_iommu *iommu = irq_to_iommu[irq];
-    struct irq_desc *desc = irq_to_desc(irq);
+    struct amd_iommu *iommu = desc->action->dev_id;
 
     irq_complete_move(desc);
 
@@ -434,15 +432,15 @@
     spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
-static unsigned int iommu_msi_startup(unsigned int irq)
+static unsigned int iommu_msi_startup(struct irq_desc *desc)
 {
-    iommu_msi_unmask(irq);
+    iommu_msi_unmask(desc);
     return 0;
 }
 
-static void iommu_msi_end(unsigned int irq, u8 vector)
+static void iommu_msi_end(struct irq_desc *desc, u8 vector)
 {
-    iommu_msi_unmask(irq);
+    iommu_msi_unmask(desc);
     ack_APIC_irq();
 }
 
@@ -557,13 +555,11 @@
     }
     
     irq_desc[irq].handler = &iommu_msi_type;
-    irq_to_iommu[irq] = iommu;
     ret = request_irq(irq, amd_iommu_page_fault, 0,
                              "amd_iommu", iommu);
     if ( ret )
     {
         irq_desc[irq].handler = &no_irq_type;
-        irq_to_iommu[irq] = NULL;
         destroy_irq(irq);
         AMD_IOMMU_DEBUG("can't request irq\n");
         return 0;
@@ -728,13 +724,6 @@
         ivrs_mappings = NULL;
     }
 
-    /* free irq_to_iommu[] */
-    if ( irq_to_iommu )
-    {
-        xfree(irq_to_iommu);
-        irq_to_iommu = NULL;
-    }
-
     iommu_enabled = 0;
     iommu_passthrough = 0;
     iommu_intremap = 0;
@@ -838,11 +827,6 @@
 
     BUG_ON( !iommu_found() );
 
-    irq_to_iommu = xmalloc_array(struct amd_iommu *, nr_irqs);
-    if ( irq_to_iommu == NULL )
-        goto error_out;
-    memset(irq_to_iommu, 0, nr_irqs * sizeof(struct iommu*));
-
     ivrs_bdf_entries = amd_iommu_get_ivrs_dev_entries();
 
     if ( !ivrs_bdf_entries )
diff -r 5f984c6735c9 -r 2dab09bcec81 xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Sun Sep 18 00:24:37 2011 +0100
+++ b/xen/drivers/passthrough/vtd/iommu.c       Sun Sep 18 00:25:57 2011 +0100
@@ -829,7 +829,6 @@
     }
 }
 
-static struct iommu **irq_to_iommu;
 static int iommu_page_fault_do_one(struct iommu *iommu, int type,
                                    u8 fault_reason, u16 source_id, u64 addr)
 {
@@ -961,9 +960,9 @@
     }
 }
 
-static void dma_msi_unmask(unsigned int irq)
+static void dma_msi_unmask(struct irq_desc *desc)
 {
-    struct iommu *iommu = irq_to_iommu[irq];
+    struct iommu *iommu = desc->action->dev_id;
     unsigned long flags;
 
     /* unmask it */
@@ -972,11 +971,10 @@
     spin_unlock_irqrestore(&iommu->register_lock, flags);
 }
 
-static void dma_msi_mask(unsigned int irq)
+static void dma_msi_mask(struct irq_desc *desc)
 {
     unsigned long flags;
-    struct iommu *iommu = irq_to_iommu[irq];
-    struct irq_desc *desc = irq_to_desc(irq);
+    struct iommu *iommu = desc->action->dev_id;
 
     irq_complete_move(desc);
 
@@ -986,15 +984,15 @@
     spin_unlock_irqrestore(&iommu->register_lock, flags);
 }
 
-static unsigned int dma_msi_startup(unsigned int irq)
+static unsigned int dma_msi_startup(struct irq_desc *desc)
 {
-    dma_msi_unmask(irq);
+    dma_msi_unmask(desc);
     return 0;
 }
 
-static void dma_msi_end(unsigned int irq, u8 vector)
+static void dma_msi_end(struct irq_desc *desc, u8 vector)
 {
-    dma_msi_unmask(irq);
+    dma_msi_unmask(desc);
     ack_APIC_irq();
 }
 
@@ -1071,7 +1069,6 @@
     }
 
     irq_desc[irq].handler = &dma_msi_type;
-    irq_to_iommu[irq] = iommu;
 #ifdef CONFIG_X86
     ret = request_irq(irq, iommu_page_fault, 0, "dmar", iommu);
 #else
@@ -1080,7 +1077,6 @@
     if ( ret )
     {
         irq_desc[irq].handler = &no_irq_type;
-        irq_to_iommu[irq] = NULL;
         destroy_irq(irq);
         dprintk(XENLOG_ERR VTDPREFIX, "IOMMU: can't request irq\n");
         return ret;
@@ -2091,13 +2087,6 @@
 
     platform_quirks_init();
 
-    irq_to_iommu = xmalloc_array(struct iommu*, nr_irqs);
-    BUG_ON(!irq_to_iommu);
-    memset(irq_to_iommu, 0, nr_irqs * sizeof(struct iommu*));
-
-    if(!irq_to_iommu)
-        return -ENOMEM;
-
     /* We enable the following features only if they are supported by all VT-d
      * engines: Snoop Control, DMA passthrough, Queued Invalidation and
      * Interrupt Remapping.
diff -r 5f984c6735c9 -r 2dab09bcec81 xen/include/asm-x86/irq.h
--- a/xen/include/asm-x86/irq.h Sun Sep 18 00:24:37 2011 +0100
+++ b/xen/include/asm-x86/irq.h Sun Sep 18 00:25:57 2011 +0100
@@ -27,6 +27,8 @@
     DECLARE_BITMAP(_bits,NR_VECTORS);
 } vmask_t;
 
+struct irq_desc;
+
 struct irq_cfg {
         s16 vector;                  /* vector itself is only 8 bits, */
         s16 old_vector;              /* but we use -1 for unassigned  */
@@ -107,8 +109,8 @@
 
 asmlinkage void do_IRQ(struct cpu_user_regs *regs);
 
-void disable_8259A_irq(unsigned int irq);
-void enable_8259A_irq(unsigned int irq);
+void disable_8259A_irq(struct irq_desc *);
+void enable_8259A_irq(struct irq_desc *);
 int i8259A_irq_pending(unsigned int irq);
 void mask_8259A(void);
 void unmask_8259A(void);
@@ -161,7 +163,6 @@
 int create_irq(void);
 void destroy_irq(unsigned int irq);
 
-struct irq_desc;
 extern void irq_complete_move(struct irq_desc *);
 
 extern struct irq_desc *irq_desc;
@@ -171,7 +172,7 @@
 
 void __setup_vector_irq(int cpu);
 
-void move_native_irq(int irq);
+void move_native_irq(struct irq_desc *);
 void move_masked_irq(struct irq_desc *);
 
 int __assign_irq_vector(int irq, struct irq_cfg *, const cpumask_t *);
diff -r 5f984c6735c9 -r 2dab09bcec81 xen/include/asm-x86/msi.h
--- a/xen/include/asm-x86/msi.h Sun Sep 18 00:24:37 2011 +0100
+++ b/xen/include/asm-x86/msi.h Sun Sep 18 00:25:57 2011 +0100
@@ -76,8 +76,8 @@
 
 struct msi_desc;
 /* Helper functions */
-extern void mask_msi_irq(unsigned int irq);
-extern void unmask_msi_irq(unsigned int irq);
+extern void mask_msi_irq(struct irq_desc *);
+extern void unmask_msi_irq(struct irq_desc *);
 extern void set_msi_affinity(struct irq_desc *, const cpumask_t *);
 extern int pci_enable_msi(struct msi_info *msi, struct msi_desc **desc);
 extern void pci_disable_msi(struct msi_desc *desc);
diff -r 5f984c6735c9 -r 2dab09bcec81 xen/include/xen/irq.h
--- a/xen/include/xen/irq.h     Sun Sep 18 00:24:37 2011 +0100
+++ b/xen/include/xen/irq.h     Sun Sep 18 00:25:57 2011 +0100
@@ -41,12 +41,12 @@
  */
 struct hw_interrupt_type {
     const char *typename;
-    unsigned int (*startup)(unsigned int irq);
-    void (*shutdown)(unsigned int irq);
-    void (*enable)(unsigned int irq);
-    void (*disable)(unsigned int irq);
-    void (*ack)(unsigned int irq);
-    void (*end)(unsigned int irq, u8 vector);
+    unsigned int (*startup)(struct irq_desc *);
+    void (*shutdown)(struct irq_desc *);
+    void (*enable)(struct irq_desc *);
+    void (*disable)(struct irq_desc *);
+    void (*ack)(struct irq_desc *);
+    void (*end)(struct irq_desc *, u8 vector);
     void (*set_affinity)(struct irq_desc *, const cpumask_t *);
 };
 
@@ -133,6 +133,11 @@
 
 extern hw_irq_controller no_irq_type;
 extern void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs);
+extern unsigned int irq_startup_none(struct irq_desc *);
+extern void irq_actor_none(struct irq_desc *);
+#define irq_shutdown_none irq_actor_none
+#define irq_disable_none irq_actor_none
+#define irq_enable_none irq_actor_none
 
 struct domain;
 struct vcpu;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.