[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3] xen/gic: EOI irqs on the right pcpu



We need to write the irq number to GICC_DIR on the physical cpu that
previously received the interrupt, but currently we are doing it on the
pcpu that received the maintenance interrupt. As a consequence if a
vcpu is migrated to a different pcpu, the irq is going to be EOI'ed on
the wrong pcpu.

This covers the case where dom0 vcpu0 is running on pcpu1 for example
(you can test this scenario by using xl vcpu-pin).


Changes in v3:
- use an int rather than a cpumask to store the EOI cpu;
- adds the eoi cpu to arch_irq_desc.

Changes in v2:
- pass virq by value to gic_irq_eoi;
- EOI the interrupt without any spin locks held and with interrupt
enabled.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
CC: keir@xxxxxxx
---
 xen/arch/arm/gic.c        |   23 ++++++++++++++++++++++-
 xen/arch/arm/irq.c        |    1 +
 xen/include/asm-arm/irq.h |    1 +
 3 files changed, 24 insertions(+), 1 deletions(-)

diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index 61de230..feb7b29 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -723,6 +723,12 @@ int gicv_setup(struct domain *d)
                         gic.vbase);
 }
 
+static void gic_irq_eoi(void *info)
+{
+    int virq = (int) info;
+    GICC[GICC_DIR] = virq;
+}
+
 static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs 
*regs)
 {
     int i = 0, virq;
@@ -733,6 +739,10 @@ static void maintenance_interrupt(int irq, void *dev_id, 
struct cpu_user_regs *r
     while ((i = find_next_bit((const long unsigned int *) &eisr,
                               64, i)) < 64) {
         struct pending_irq *p;
+        int cpu, eoi;
+
+        cpu = -1;
+        eoi = 0;
 
         spin_lock_irq(&gic.lock);
         lr = GICH[GICH_LR + i];
@@ -754,11 +764,22 @@ static void maintenance_interrupt(int irq, void *dev_id, 
struct cpu_user_regs *r
         p = irq_to_pending(v, virq);
         if ( p->desc != NULL ) {
             p->desc->status &= ~IRQ_INPROGRESS;
-            GICC[GICC_DIR] = virq;
+            /* Assume only one pcpu needs to EOI the irq */
+            cpu = p->desc->arch.eoi_cpu;
+            eoi = 1;
         }
         list_del_init(&p->inflight);
         spin_unlock_irq(&v->arch.vgic.lock);
 
+        if ( eoi ) {
+            /* this is not racy because we can't receive another irq of the
+             * same type until we EOI it.  */
+            if ( cpu == smp_processor_id() )
+                gic_irq_eoi((void*)virq);
+            else
+                on_selected_cpus(cpumask_of(cpu), gic_irq_eoi, (void*)virq, 0);
+        }
+
         i++;
     }
 }
diff --git a/xen/arch/arm/irq.c b/xen/arch/arm/irq.c
index 8c96a0a..b2e486f 100644
--- a/xen/arch/arm/irq.c
+++ b/xen/arch/arm/irq.c
@@ -156,6 +156,7 @@ void do_IRQ(struct cpu_user_regs *regs, unsigned int irq, 
int is_fiq)
         desc->handler->end(desc);
 
         desc->status |= IRQ_INPROGRESS;
+        desc->arch.eoi_cpu = smp_processor_id();
 
         /* XXX: inject irq into all guest vcpus */
         vgic_vcpu_inject_irq(d->vcpu[0], irq, 0);
diff --git a/xen/include/asm-arm/irq.h b/xen/include/asm-arm/irq.h
index 9fc008c..eeb733a 100644
--- a/xen/include/asm-arm/irq.h
+++ b/xen/include/asm-arm/irq.h
@@ -15,6 +15,7 @@ struct arch_pirq
 
 struct irq_cfg {
 #define arch_irq_desc irq_cfg
+    int eoi_cpu;
 };
 
 #define NR_LOCAL_IRQS  32
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.