[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v8 07/10] xen/arm: remove workaround to inject evtchn_irq on irq enable



evtchn_upcall_pending is already set by common code at vcpu creation,
therefore on ARM we also need to call vgic_vcpu_inject_irq for it.
Currently we do that from vgic_enable_irqs as a workaround.

Do this properly by calling vgic_vcpu_inject_irq in the appropriate
places at vcpu creation time, making sure to call it after the vcpu is
up (_VPF_down has been cleared).

Return an error if arch_set_info_guest is called without VGCF_online
set: at the moment no callers do that but if they did we would fail to
inject the first evtchn_irq interrupt.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>

---

Changes in v2:
- coding style fix;
- add comment;
- return an error if arch_set_info_guest is called without VGCF_online.
---
 xen/arch/arm/domain.c       |   11 +++++++++--
 xen/arch/arm/domain_build.c |    3 +++
 xen/arch/arm/vgic.c         |   18 ++++--------------
 3 files changed, 16 insertions(+), 16 deletions(-)

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 87902ef..4417a90 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -659,6 +659,9 @@ int arch_set_info_guest(
             return -EINVAL;
     }
 #endif
+    /* we do not support calls to this functions without VGCF_online set */
+    if ( !(ctxt->flags & VGCF_online) )
+        return -EINVAL;
 
     vcpu_regs_user_to_hyp(v, regs);
 
@@ -670,9 +673,13 @@ int arch_set_info_guest(
     v->is_initialised = 1;
 
     if ( ctxt->flags & VGCF_online )
+    {
         clear_bit(_VPF_down, &v->pause_flags);
-    else
-        set_bit(_VPF_down, &v->pause_flags);
+        /* evtchn_upcall_pending is set by common code at vcpu creation,
+         * therefore on ARM we also need to call vgic_vcpu_inject_irq
+         * for it */
+        vgic_vcpu_inject_irq(v, v->domain->arch.evtchn_irq);
+    }
 
     return 0;
 }
diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 69188a4..ed43a4c 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -18,6 +18,7 @@
 #include <asm/psci.h>
 
 #include <asm/gic.h>
+#include <asm/vgic.h>
 #include <xen/irq.h>
 #include "kernel.h"
 
@@ -1378,6 +1379,8 @@ int construct_dom0(struct domain *d)
     }
 #endif
 
+    vgic_vcpu_inject_irq(v, v->domain->arch.evtchn_irq);
+
     for ( i = 1, cpu = 0; i < d->max_vcpus; i++ )
     {
         cpu = cpumask_cycle(cpu, &cpu_online_map);
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 704eaaf..569a859 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -281,20 +281,10 @@ void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n)
         v_target = _vgic_get_target_vcpu(v, irq);
         p = irq_to_pending(v_target, irq);
         set_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
-        /* We need to force the first injection of evtchn_irq because
-         * evtchn_upcall_pending is already set by common code on vcpu
-         * creation. */
-        if ( irq == v_target->domain->arch.evtchn_irq &&
-             vcpu_info(current, evtchn_upcall_pending) &&
-             list_empty(&p->inflight) )
-            vgic_vcpu_inject_irq(v_target, irq);
-        else {
-            unsigned long flags;
-            spin_lock_irqsave(&v_target->arch.vgic.lock, flags);
-            if ( !list_empty(&p->inflight) && !test_bit(GIC_IRQ_GUEST_VISIBLE, 
&p->status) )
-                gic_raise_guest_irq(v_target, irq, p->priority);
-            spin_unlock_irqrestore(&v_target->arch.vgic.lock, flags);
-        }
+        spin_lock_irqsave(&v_target->arch.vgic.lock, flags);
+        if ( !list_empty(&p->inflight) && !test_bit(GIC_IRQ_GUEST_VISIBLE, 
&p->status) )
+            gic_raise_guest_irq(v_target, irq, p->priority);
+        spin_unlock_irqrestore(&v_target->arch.vgic.lock, flags);
         if ( p->desc != NULL )
         {
             irq_set_affinity(p->desc, cpumask_of(v_target->processor));
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.