[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/IO-APIC: fix setup of Xen internally used IRQs (take 2)



commit fc0c3fa2ad5cdb7e9ccb068e3301499acf643bec
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Mon Nov 16 13:11:08 2015 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon Nov 16 13:11:08 2015 +0100

    x86/IO-APIC: fix setup of Xen internally used IRQs (take 2)
    
    ..., i.e. namely that of a PCI serial card with an IRQ above the
    legacy range. This had got broken by the switch to cpumask_any() in
    cpu_mask_to_apicid_phys(). Fix this by allowing all CPUs for that IRQ
    (via setup_vector_irq() properly updating a booting CPU's vector_irq[],
    thus avoiding "No irq handler for vector" messages and the interrupt
    not working).
    
    Cleanup coding style and types there at once.
    
    While doing this I also noticed that io_apic_set_pci_routing() can't
    be quite right: It sets up the destination _before_ getting a vector
    allocated (which on other than systems using the flat APIC mode
    affects the possible destinations), and also didn't restrict affinity
    to ->arch.cpu_mask (as established by assign_irq_vector()).
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/arch/x86/io_apic.c    |    8 +++++++-
 xen/arch/x86/irq.c        |   27 +++++++++++++++++++--------
 xen/arch/x86/smpboot.c    |    2 +-
 xen/include/asm-x86/irq.h |    2 +-
 4 files changed, 28 insertions(+), 11 deletions(-)

diff --git a/xen/arch/x86/io_apic.c b/xen/arch/x86/io_apic.c
index f8c8e55..511cd40 100644
--- a/xen/arch/x86/io_apic.c
+++ b/xen/arch/x86/io_apic.c
@@ -2201,6 +2201,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int 
irq, int edge_level, int a
 {
     struct irq_desc *desc = irq_to_desc(irq);
     struct IO_APIC_route_entry entry;
+    cpumask_t mask;
     unsigned long flags;
     int vector;
 
@@ -2220,7 +2221,6 @@ int io_apic_set_pci_routing (int ioapic, int pin, int 
irq, int edge_level, int a
 
     entry.delivery_mode = INT_DELIVERY_MODE;
     entry.dest_mode = INT_DEST_MODE;
-    SET_DEST(entry, logical, cpu_mask_to_apicid(TARGET_CPUS));
     entry.trigger = edge_level;
     entry.polarity = active_high_low;
     entry.mask  = 1;
@@ -2236,6 +2236,12 @@ int io_apic_set_pci_routing (int ioapic, int pin, int 
irq, int edge_level, int a
         return vector;
     entry.vector = vector;
 
+    cpumask_copy(&mask, TARGET_CPUS);
+    /* Don't chance ending up with an empty mask. */
+    if (cpumask_intersects(&mask, desc->arch.cpu_mask))
+        cpumask_and(&mask, &mask, desc->arch.cpu_mask);
+    SET_DEST(entry, logical, cpu_mask_to_apicid(&mask));
+
     apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry "
                "(%d-%d -> %#x -> IRQ %d Mode:%i Active:%i)\n", ioapic,
                mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index f1397d6..5f515a0 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -563,21 +563,26 @@ int assign_irq_vector(int irq, const cpumask_t *mask)
  * Initialize vector_irq on a new cpu. This function must be called
  * with vector_lock held.
  */
-void __setup_vector_irq(int cpu)
+void setup_vector_irq(unsigned int cpu)
 {
-    int irq, vector;
+    unsigned int irq, vector;
 
     /* Clear vector_irq */
-    for (vector = 0; vector < NR_VECTORS; ++vector)
+    for ( vector = 0; vector < NR_VECTORS; ++vector )
         per_cpu(vector_irq, cpu)[vector] = INT_MIN;
     /* Mark the inuse vectors */
-    for (irq = 0; irq < nr_irqs; ++irq) {
+    for ( irq = 0; irq < nr_irqs; ++irq )
+    {
         struct irq_desc *desc = irq_to_desc(irq);
 
-        if (!irq_desc_initialized(desc) ||
-            !cpumask_test_cpu(cpu, desc->arch.cpu_mask))
+        if ( !irq_desc_initialized(desc) )
             continue;
         vector = irq_to_vector(irq);
+        if ( vector >= FIRST_HIPRIORITY_VECTOR &&
+             vector <= LAST_HIPRIORITY_VECTOR )
+            cpumask_set_cpu(cpu, desc->arch.cpu_mask);
+        else if ( !cpumask_test_cpu(cpu, desc->arch.cpu_mask) )
+            continue;
         per_cpu(vector_irq, cpu)[vector] = irq;
     }
 }
@@ -2334,8 +2339,8 @@ void fixup_irqs(void)
 
     for ( irq = 0; irq < nr_irqs; irq++ )
     {
-        int break_affinity = 0;
-        int set_affinity = 1;
+        bool_t break_affinity = 0, set_affinity = 1;
+        unsigned int vector;
         cpumask_t affinity;
 
         if ( irq == 2 )
@@ -2347,6 +2352,12 @@ void fixup_irqs(void)
 
         spin_lock(&desc->lock);
 
+        vector = irq_to_vector(irq);
+        if ( vector >= FIRST_HIPRIORITY_VECTOR &&
+             vector <= LAST_HIPRIORITY_VECTOR )
+            cpumask_and(desc->arch.cpu_mask, desc->arch.cpu_mask,
+                        &cpu_online_map);
+
         cpumask_copy(&affinity, desc->affinity);
         if ( !desc->action || cpumask_subset(&affinity, &cpu_online_map) )
         {
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index dabc929..833a0a5 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -387,7 +387,7 @@ void start_secondary(void *unused)
      * this lock ensures we don't half assign or remove an irq from a cpu.
      */
     lock_vector_lock();
-    __setup_vector_irq(cpu);
+    setup_vector_irq(cpu);
     cpumask_set_cpu(cpu, &cpu_online_map);
     unlock_vector_lock();
 
diff --git a/xen/include/asm-x86/irq.h b/xen/include/asm-x86/irq.h
index a44305e..fcf37a3 100644
--- a/xen/include/asm-x86/irq.h
+++ b/xen/include/asm-x86/irq.h
@@ -167,7 +167,7 @@ extern struct irq_desc *irq_desc;
 void lock_vector_lock(void);
 void unlock_vector_lock(void);
 
-void __setup_vector_irq(int cpu);
+void setup_vector_irq(unsigned int cpu);
 
 void move_native_irq(struct irq_desc *);
 void move_masked_irq(struct irq_desc *);
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.