[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] pass struct irq_desc * to set_affinity() IRQ accessors



# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1316301877 -3600
# Node ID 5f984c6735c96328f018d87b7e1b2025c3847d70
# Parent  39d450853a3895fb50467997f029c698d5e5edb9
pass struct irq_desc * to set_affinity() IRQ accessors

This is because the descriptor is generally more useful (with the IRQ
number being accessible in it if necessary) and going forward will
hopefully allow to remove all direct accesses to the IRQ descriptor
array, in turn making it possible to make this some other, more
efficient data structure.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---


diff -r 39d450853a38 -r 5f984c6735c9 xen/arch/ia64/linux-xen/iosapic.c
--- a/xen/arch/ia64/linux-xen/iosapic.c Sun Sep 18 00:22:57 2011 +0100
+++ b/xen/arch/ia64/linux-xen/iosapic.c Sun Sep 18 00:24:37 2011 +0100
@@ -352,18 +352,18 @@
 
 
 static void
-iosapic_set_affinity (unsigned int irq, const cpumask_t *mask)
+iosapic_set_affinity (struct irq_desc *desc, const cpumask_t *mask)
 {
 #ifdef CONFIG_SMP
        unsigned long flags;
        u32 high32, low32;
        int dest, rte_index;
        char __iomem *addr;
-       int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
+       int redir = (desc->irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
+       unsigned int irq = desc->irq & ~IA64_IRQ_REDIRECTED;
        ia64_vector vec;
        struct iosapic_rte_info *rte;
 
-       irq &= (~IA64_IRQ_REDIRECTED);
        vec = irq_to_vector(irq);
 
        if (cpumask_empty(mask))
diff -r 39d450853a38 -r 5f984c6735c9 xen/arch/x86/hpet.c
--- a/xen/arch/x86/hpet.c       Sun Sep 18 00:22:57 2011 +0100
+++ b/xen/arch/x86/hpet.c       Sun Sep 18 00:24:37 2011 +0100
@@ -258,24 +258,14 @@
     hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
 }
 
-static void hpet_msi_write(unsigned int irq, struct msi_msg *msg)
+static void hpet_msi_write(struct hpet_event_channel *ch, struct msi_msg *msg)
 {
-    unsigned int ch_idx = irq_to_channel(irq);
-    struct hpet_event_channel *ch = hpet_events + ch_idx;
-
-    BUG_ON(ch_idx >= num_hpets_used);
-
     hpet_write32(msg->data, HPET_Tn_ROUTE(ch->idx));
     hpet_write32(msg->address_lo, HPET_Tn_ROUTE(ch->idx) + 4);
 }
 
-static void hpet_msi_read(unsigned int irq, struct msi_msg *msg)
+static void hpet_msi_read(struct hpet_event_channel *ch, struct msi_msg *msg)
 {
-    unsigned int ch_idx = irq_to_channel(irq);
-    struct hpet_event_channel *ch = hpet_events + ch_idx;
-
-    BUG_ON(ch_idx >= num_hpets_used);
-
     msg->data = hpet_read32(HPET_Tn_ROUTE(ch->idx));
     msg->address_lo = hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4);
     msg->address_hi = 0;
@@ -305,23 +295,22 @@
 {
 }
 
-static void hpet_msi_set_affinity(unsigned int irq, const cpumask_t *mask)
+static void hpet_msi_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
 {
     struct msi_msg msg;
     unsigned int dest;
-    struct irq_desc * desc = irq_to_desc(irq);
     struct irq_cfg *cfg= desc->chip_data;
 
     dest = set_desc_affinity(desc, mask);
     if (dest == BAD_APICID)
         return;
 
-    hpet_msi_read(irq, &msg);
+    hpet_msi_read(desc->action->dev_id, &msg);
     msg.data &= ~MSI_DATA_VECTOR_MASK;
     msg.data |= MSI_DATA_VECTOR(cfg->vector);
     msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
     msg.address_lo |= MSI_ADDR_DEST_ID(dest);
-    hpet_msi_write(irq, &msg);
+    hpet_msi_write(desc->action->dev_id, &msg);
 }
 
 /*
@@ -338,12 +327,12 @@
     .set_affinity   = hpet_msi_set_affinity,
 };
 
-static void __hpet_setup_msi_irq(unsigned int irq)
+static void __hpet_setup_msi_irq(struct irq_desc *desc)
 {
     struct msi_msg msg;
 
-    msi_compose_msg(irq, &msg);
-    hpet_msi_write(irq, &msg);
+    msi_compose_msg(desc->irq, &msg);
+    hpet_msi_write(desc->action->dev_id, &msg);
 }
 
 static int __init hpet_setup_msi_irq(unsigned int irq)
@@ -357,7 +346,7 @@
     if ( ret < 0 )
         return ret;
 
-    __hpet_setup_msi_irq(irq);
+    __hpet_setup_msi_irq(desc);
 
     return 0;
 }
@@ -471,7 +460,7 @@
     if ( ch->cpu != cpu )
         return;
 
-    hpet_msi_set_affinity(ch->irq, cpumask_of(ch->cpu));
+    hpet_msi_set_affinity(irq_to_desc(ch->irq), cpumask_of(ch->cpu));
 }
 
 static void hpet_detach_channel(unsigned int cpu,
@@ -493,7 +482,7 @@
     }
 
     ch->cpu = first_cpu(ch->cpumask);
-    hpet_msi_set_affinity(ch->irq, cpumask_of(ch->cpu));
+    hpet_msi_set_affinity(irq_to_desc(ch->irq), cpumask_of(ch->cpu));
 }
 
 #include <asm/mc146818rtc.h>
@@ -619,7 +608,7 @@
     for ( i = 0; i < n; i++ )
     {
         if ( hpet_events[i].irq >= 0 )
-            __hpet_setup_msi_irq(hpet_events[i].irq);
+            __hpet_setup_msi_irq(irq_to_desc(hpet_events[i].irq));
 
         /* set HPET Tn as oneshot */
         cfg = hpet_read32(HPET_Tn_CFG(hpet_events[i].idx));
diff -r 39d450853a38 -r 5f984c6735c9 xen/arch/x86/io_apic.c
--- a/xen/arch/x86/io_apic.c    Sun Sep 18 00:22:57 2011 +0100
+++ b/xen/arch/x86/io_apic.c    Sun Sep 18 00:24:37 2011 +0100
@@ -658,7 +658,7 @@
 }
 
 static void
-set_ioapic_affinity_irq_desc(struct irq_desc *desc, const cpumask_t *mask)
+set_ioapic_affinity_irq(struct irq_desc *desc, const cpumask_t *mask)
 {
     unsigned long flags;
     unsigned int dest;
@@ -695,16 +695,6 @@
     spin_unlock_irqrestore(&ioapic_lock, flags);
 
 }
-
-static void
-set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
-{
-    struct irq_desc *desc;
-
-    desc = irq_to_desc(irq);
-
-    set_ioapic_affinity_irq_desc(desc, mask);
-}
 #endif /* CONFIG_SMP */
 
 /*
@@ -802,7 +792,7 @@
             irq = pin_2_irq(irq_entry, ioapic, pin);
             cfg = irq_cfg(irq);
             BUG_ON(cpus_empty(cfg->cpu_mask));
-            set_ioapic_affinity_irq(irq, &cfg->cpu_mask);
+            set_ioapic_affinity_irq(irq_to_desc(irq), &cfg->cpu_mask);
         }
 
     }
@@ -1780,7 +1770,7 @@
 
     if ((irq_desc[irq].status & IRQ_MOVE_PENDING) &&
        !io_apic_level_ack_pending(irq))
-        move_masked_irq(irq);
+        move_masked_irq(desc);
 
     if ( !(v & (1 << (i & 0x1f))) ) {
         spin_lock(&ioapic_lock);
@@ -1799,7 +1789,9 @@
     {
         if ( directed_eoi_enabled )
         {
-            if ( !(irq_desc[irq].status & (IRQ_DISABLED|IRQ_MOVE_PENDING)) )
+            struct irq_desc *desc = irq_to_desc(irq);
+
+            if ( !(desc->status & (IRQ_DISABLED|IRQ_MOVE_PENDING)) )
             {
                 eoi_IO_APIC_irq(irq);
                 return;
@@ -1807,9 +1799,9 @@
 
             mask_IO_APIC_irq(irq);
             eoi_IO_APIC_irq(irq);
-            if ( (irq_desc[irq].status & IRQ_MOVE_PENDING) &&
+            if ( (desc->status & IRQ_MOVE_PENDING) &&
                  !io_apic_level_ack_pending(irq) )
-                move_masked_irq(irq);
+                move_masked_irq(desc);
         }
 
         if ( !(irq_desc[irq].status & IRQ_DISABLED) )
diff -r 39d450853a38 -r 5f984c6735c9 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c        Sun Sep 18 00:22:57 2011 +0100
+++ b/xen/arch/x86/irq.c        Sun Sep 18 00:24:37 2011 +0100
@@ -558,10 +558,8 @@
     }
 }
 
-void move_masked_irq(int irq)
+void move_masked_irq(struct irq_desc *desc)
 {
-    struct irq_desc *desc = irq_to_desc(irq);
-
     if (likely(!(desc->status & IRQ_MOVE_PENDING)))
         return;
     
@@ -583,7 +581,7 @@
      * For correct operation this depends on the caller masking the irqs.
      */
     if (likely(cpus_intersects(desc->pending_mask, cpu_online_map)))
-        desc->handler->set_affinity(irq, &desc->pending_mask);
+        desc->handler->set_affinity(desc, &desc->pending_mask);
 
     cpus_clear(desc->pending_mask);
 }
@@ -599,7 +597,7 @@
         return;
 
     desc->handler->disable(irq);
-    move_masked_irq(irq);
+    move_masked_irq(desc);
     desc->handler->enable(irq);
 }
 
@@ -1410,7 +1408,7 @@
         /* Attempt to bind the interrupt target to the correct CPU. */
         cpu_set(v->processor, cpumask);
         if ( !opt_noirqbalance && (desc->handler->set_affinity != NULL) )
-            desc->handler->set_affinity(irq, &cpumask);
+            desc->handler->set_affinity(desc, &cpumask);
     }
     else if ( !will_share || !action->shareable )
     {
@@ -1964,7 +1962,7 @@
             desc->handler->disable(irq);
 
         if ( desc->handler->set_affinity )
-            desc->handler->set_affinity(irq, &affinity);
+            desc->handler->set_affinity(desc, &affinity);
         else if ( !(warned++) )
             set_affinity = 0;
 
diff -r 39d450853a38 -r 5f984c6735c9 xen/arch/x86/msi.c
--- a/xen/arch/x86/msi.c        Sun Sep 18 00:22:57 2011 +0100
+++ b/xen/arch/x86/msi.c        Sun Sep 18 00:24:37 2011 +0100
@@ -266,11 +266,10 @@
     }
 }
 
-void set_msi_affinity(unsigned int irq, const cpumask_t *mask)
+void set_msi_affinity(struct irq_desc *desc, const cpumask_t *mask)
 {
     struct msi_msg msg;
     unsigned int dest;
-    struct irq_desc *desc = irq_to_desc(irq);
     struct msi_desc *msi_desc = desc->msi_desc;
     struct irq_cfg *cfg = desc->chip_data;
 
diff -r 39d450853a38 -r 5f984c6735c9 xen/drivers/passthrough/amd/iommu_init.c
--- a/xen/drivers/passthrough/amd/iommu_init.c  Sun Sep 18 00:22:57 2011 +0100
+++ b/xen/drivers/passthrough/amd/iommu_init.c  Sun Sep 18 00:24:37 2011 +0100
@@ -344,12 +344,11 @@
     set_iommu_event_log_control(iommu, IOMMU_CONTROL_ENABLED);
 }
 
-static void iommu_msi_set_affinity(unsigned int irq, const cpumask_t *mask)
+static void iommu_msi_set_affinity(struct irq_desc *desc, const cpumask_t 
*mask)
 {
     struct msi_msg msg;
     unsigned int dest;
-    struct amd_iommu *iommu = irq_to_iommu[irq];
-    struct irq_desc *desc = irq_to_desc(irq);
+    struct amd_iommu *iommu = desc->action->dev_id;
     struct irq_cfg *cfg = desc->chip_data;
     u8 bus = (iommu->bdf >> 8) & 0xff;
     u8 dev = PCI_SLOT(iommu->bdf & 0xff);
@@ -591,7 +590,7 @@
     register_iommu_event_log_in_mmio_space(iommu);
     register_iommu_exclusion_range(iommu);
 
-    iommu_msi_set_affinity(iommu->irq, &cpu_online_map);
+    iommu_msi_set_affinity(irq_to_desc(iommu->irq), &cpu_online_map);
     amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED);
 
     set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_ENABLED);
diff -r 39d450853a38 -r 5f984c6735c9 xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Sun Sep 18 00:22:57 2011 +0100
+++ b/xen/drivers/passthrough/vtd/iommu.c       Sun Sep 18 00:24:37 2011 +0100
@@ -998,14 +998,12 @@
     ack_APIC_irq();
 }
 
-static void dma_msi_set_affinity(unsigned int irq, const cpumask_t *mask)
+static void dma_msi_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
 {
     struct msi_msg msg;
     unsigned int dest;
     unsigned long flags;
-
-    struct iommu *iommu = irq_to_iommu[irq];
-    struct irq_desc *desc = irq_to_desc(irq);
+    struct iommu *iommu = desc->action->dev_id;
     struct irq_cfg *cfg = desc->chip_data;
 
 #ifdef CONFIG_X86
@@ -1984,7 +1982,7 @@
         iommu = drhd->iommu;
 
         cfg = irq_cfg(iommu->irq);
-        dma_msi_set_affinity(iommu->irq, &cfg->cpu_mask);
+        dma_msi_set_affinity(irq_to_desc(iommu->irq), &cfg->cpu_mask);
 
         clear_fault_bits(iommu);
 
diff -r 39d450853a38 -r 5f984c6735c9 xen/include/asm-x86/irq.h
--- a/xen/include/asm-x86/irq.h Sun Sep 18 00:22:57 2011 +0100
+++ b/xen/include/asm-x86/irq.h Sun Sep 18 00:24:37 2011 +0100
@@ -172,7 +172,7 @@
 void __setup_vector_irq(int cpu);
 
 void move_native_irq(int irq);
-void move_masked_irq(int irq);
+void move_masked_irq(struct irq_desc *);
 
 int __assign_irq_vector(int irq, struct irq_cfg *, const cpumask_t *);
 
diff -r 39d450853a38 -r 5f984c6735c9 xen/include/asm-x86/msi.h
--- a/xen/include/asm-x86/msi.h Sun Sep 18 00:22:57 2011 +0100
+++ b/xen/include/asm-x86/msi.h Sun Sep 18 00:24:37 2011 +0100
@@ -78,7 +78,7 @@
 /* Helper functions */
 extern void mask_msi_irq(unsigned int irq);
 extern void unmask_msi_irq(unsigned int irq);
-extern void set_msi_affinity(unsigned int vector, const cpumask_t *);
+extern void set_msi_affinity(struct irq_desc *, const cpumask_t *);
 extern int pci_enable_msi(struct msi_info *msi, struct msi_desc **desc);
 extern void pci_disable_msi(struct msi_desc *desc);
 extern void pci_cleanup_msi(struct pci_dev *pdev);
diff -r 39d450853a38 -r 5f984c6735c9 xen/include/xen/irq.h
--- a/xen/include/xen/irq.h     Sun Sep 18 00:22:57 2011 +0100
+++ b/xen/include/xen/irq.h     Sun Sep 18 00:24:37 2011 +0100
@@ -33,6 +33,8 @@
 #define NEVER_ASSIGN_IRQ        (-2)
 #define FREE_TO_ASSIGN_IRQ      (-3)
 
+struct irq_desc;
+
 /*
  * Interrupt controller descriptor. This is all we need
  * to describe about the low-level hardware. 
@@ -45,7 +47,7 @@
     void (*disable)(unsigned int irq);
     void (*ack)(unsigned int irq);
     void (*end)(unsigned int irq, u8 vector);
-    void (*set_affinity)(unsigned int irq, const cpumask_t *);
+    void (*set_affinity)(struct irq_desc *, const cpumask_t *);
 };
 
 typedef const struct hw_interrupt_type hw_irq_controller;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.