[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH-4.5 v3 09/12] xen/arm: second irq injection while the first irq is still inflight



Set GICH_LR_PENDING in the corresponding GICH_LR to inject a second irq
while the first one is still active.
If the first irq is already pending (not active), just clear
GIC_IRQ_GUEST_PENDING because the irq has already been injected and is
already visible by the guest.
If the irq has already been EOI'ed then just clear the GICH_LR right
away and move the interrupt to lr_pending so that it is going to be
reinjected by gic_restore_pending_irqs on return to guest.

If the target cpu is not the current cpu, then set GIC_IRQ_GUEST_PENDING
and send an SGI. The target cpu is going to be interrupted and call
gic_clear_lrs, that is going to take the same actions.

Unify the inflight and non-inflight code paths in vgic_vcpu_inject_irq.

Do not call vgic_vcpu_inject_irq from gic_inject if
evtchn_upcall_pending is set. If we remove that call, we don't need to
special case evtchn_irq in vgic_vcpu_inject_irq anymore.
We also need to force the first injection of evtchn_irq (call
gic_vcpu_inject_irq) from vgic_enable_irqs because evtchn_upcall_pending
is already set by common code on vcpu creation.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>

---
Changes in v3:
- do not use the PENDING and ACTIVE state for HW interrupts,
- check that p->lr is valid in gic_set_clear_lr.
---
 xen/arch/arm/gic.c  |   89 +++++++++++++++++++++++++++++----------------------
 xen/arch/arm/vgic.c |   33 ++++++++++---------
 2 files changed, 68 insertions(+), 54 deletions(-)

diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index 0e429c8..2dc6386 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -67,6 +67,8 @@ static DEFINE_PER_CPU(u8, gic_cpu_id);
 /* Maximum cpu interface per GIC */
 #define NR_GIC_CPU_IF 8
 
+static void _gic_clear_lr(struct vcpu *v, int i);
+
 static unsigned int gic_cpu_mask(const cpumask_t *cpumask)
 {
     unsigned int cpu;
@@ -677,6 +679,14 @@ void gic_raise_guest_irq(struct vcpu *v, unsigned int irq,
 {
     int i;
     unsigned long flags;
+    struct pending_irq *n = irq_to_pending(v, irq);
+
+    if ( test_bit(GIC_IRQ_GUEST_VISIBLE, &n->status))
+    {
+        if ( v == current )
+            _gic_clear_lr(v, n->lr);
+        return;
+    }
 
     spin_lock_irqsave(&gic.lock, flags);
 
@@ -697,51 +707,57 @@ out:
     return;
 }
 
-void gic_clear_lrs(struct vcpu *v)
+static void _gic_clear_lr(struct vcpu *v, int i)
 {
-    struct pending_irq *p;
-    int i = 0, irq;
+    int irq;
     uint32_t lr;
-    bool_t inflight;
+    struct pending_irq *p;
+
+    lr = GICH[GICH_LR + i];
+    irq = (lr >> GICH_LR_VIRTUAL_SHIFT) & GICH_LR_VIRTUAL_MASK;
+    p = irq_to_pending(v, irq);
+    if ( lr & GICH_LR_ACTIVE )
+    {
+        /* HW interrupts cannot be ACTIVE and PENDING */
+        if ( p->desc == NULL &&
+             test_bit(GIC_IRQ_GUEST_ENABLED, &p->status) &&
+             test_and_clear_bit(GIC_IRQ_GUEST_PENDING, &p->status) )
+            GICH[GICH_LR + i] = lr | GICH_LR_PENDING;
+    } else if ( lr & GICH_LR_PENDING ) {
+        clear_bit(GIC_IRQ_GUEST_PENDING, &p->status);
+    } else {
+        spin_lock(&gic.lock);
+
+        GICH[GICH_LR + i] = 0;
+        clear_bit(i, &this_cpu(lr_mask));
+
+        if ( p->desc != NULL )
+            p->desc->status &= ~IRQ_INPROGRESS;
+        clear_bit(GIC_IRQ_GUEST_VISIBLE, &p->status);
+        p->lr = nr_lrs;
+        if ( test_bit(GIC_IRQ_GUEST_PENDING, &p->status) &&
+                test_bit(GIC_IRQ_GUEST_ENABLED, &p->status))
+        {
+            gic_raise_guest_irq(v, irq, p->priority);
+        } else
+            list_del_init(&p->inflight);
+
+        spin_unlock(&gic.lock);
+    }
+}
+
+void gic_clear_lrs(struct vcpu *v)
+{
+    int i = 0;
     unsigned long flags;
 
     spin_lock_irqsave(&v->arch.vgic.lock, flags);
-
     while ((i = find_next_bit((const long unsigned int *) &this_cpu(lr_mask),
                               nr_lrs, i)) < nr_lrs) {
-        lr = GICH[GICH_LR + i];
-        if ( !(lr & (GICH_LR_PENDING|GICH_LR_ACTIVE)) )
-        {
-            inflight = 0;
-            GICH[GICH_LR + i] = 0;
-            clear_bit(i, &this_cpu(lr_mask));
-
-            irq = (lr >> GICH_LR_VIRTUAL_SHIFT) & GICH_LR_VIRTUAL_MASK;
-            spin_lock(&gic.lock);
-            p = irq_to_pending(v, irq);
-            if ( p->desc != NULL )
-                p->desc->status &= ~IRQ_INPROGRESS;
-            clear_bit(GIC_IRQ_GUEST_VISIBLE, &p->status);
-            p->lr = nr_lrs;
-            if ( test_bit(GIC_IRQ_GUEST_PENDING, &p->status) &&
-                    test_bit(GIC_IRQ_GUEST_ENABLED, &p->status))
-            {
-                inflight = 1;
-                gic_raise_guest_irq(v, irq, p->priority);
-            }
-            spin_unlock(&gic.lock);
-            if ( !inflight )
-            {
-                spin_lock(&v->arch.vgic.lock);
-                list_del_init(&p->inflight);
-                spin_unlock(&v->arch.vgic.lock);
-            }
-
-        }
 
+        _gic_clear_lr(v, i);
         i++;
     }
-
     spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
 }
 
@@ -785,9 +801,6 @@ int gic_events_need_delivery(void)
 
 void gic_inject(void)
 {
-    if ( vcpu_info(current, evtchn_upcall_pending) )
-        vgic_vcpu_inject_irq(current, current->domain->arch.evtchn_irq);
-
     gic_restore_pending_irqs(current);
 
     if ( !list_empty(&current->arch.vgic.lr_pending) &&
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index b003f29..981db6c 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -387,7 +387,11 @@ static void vgic_enable_irqs(struct vcpu *v, uint32_t r, 
int n)
         irq = i + (32 * n);
         p = irq_to_pending(v, irq);
         set_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
-        if ( !list_empty(&p->inflight) && !test_bit(GIC_IRQ_GUEST_VISIBLE, 
&p->status) )
+        if ( irq == v->domain->arch.evtchn_irq &&
+             vcpu_info(current, evtchn_upcall_pending) &&
+             list_empty(&p->inflight) )
+            vgic_vcpu_inject_irq(v, irq);
+        else if ( !list_empty(&p->inflight) && 
!test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) )
             gic_raise_guest_irq(v, irq, p->priority);
         if ( p->desc != NULL )
             p->desc->handler->enable(p->desc);
@@ -694,14 +698,6 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq)
 
     spin_lock_irqsave(&v->arch.vgic.lock, flags);
 
-    if ( !list_empty(&n->inflight) )
-    {
-        if ( (irq != current->domain->arch.evtchn_irq) ||
-             (!test_bit(GIC_IRQ_GUEST_VISIBLE, &n->status)) )
-            set_bit(GIC_IRQ_GUEST_PENDING, &n->status);
-        goto out;
-    }
-
     /* vcpu offline */
     if ( test_bit(_VPF_down, &v->pause_flags) )
     {
@@ -713,21 +709,26 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int 
irq)
 
     n->irq = irq;
     set_bit(GIC_IRQ_GUEST_PENDING, &n->status);
-    n->priority = priority;
 
     /* the irq is enabled */
     if ( test_bit(GIC_IRQ_GUEST_ENABLED, &n->status) )
         gic_raise_guest_irq(v, irq, priority);
 
-    list_for_each_entry ( iter, &v->arch.vgic.inflight_irqs, inflight )
+    if ( list_empty(&n->inflight) )
     {
-        if ( iter->priority > priority )
+        n->priority = priority;
+        list_for_each_entry ( iter, &v->arch.vgic.inflight_irqs, inflight )
         {
-            list_add_tail(&n->inflight, &iter->inflight);
-            goto out;
+            if ( iter->priority > priority )
+            {
+                list_add_tail(&n->inflight, &iter->inflight);
+                goto out;
+            }
         }
-    }
-    list_add_tail(&n->inflight, &v->arch.vgic.inflight_irqs);
+        list_add_tail(&n->inflight, &v->arch.vgic.inflight_irqs);
+    } else if ( n->priority != priority )
+        gdprintk(XENLOG_WARNING, "Changing priority of an inflight interrupt 
is not supported");
+
 out:
     spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
     /* we have a new higher priority irq, inject it into the guest */
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.