[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 4/4] xen/arm: physical irq follow virtual irq



Migrate physical irqs to the same physical cpu that is running the vcpu
expected to receive the irqs. That is done when enabling irqs, when the
guest writes to GICD_ITARGETSR and when Xen migrates a vcpu to a
different pcpu.

Introduce a new hook in common code to call the vgic irq migration code
as evtchn_move_pirqs only deals with event channels at the moment.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
CC: JBeulich@xxxxxxxx
---
 xen/arch/arm/gic.c         |   18 ++++++++++++++++--
 xen/arch/arm/vgic.c        |   31 +++++++++++++++++++++++++++++++
 xen/common/event_channel.c |    4 ++++
 xen/include/asm-arm/gic.h  |    1 +
 4 files changed, 52 insertions(+), 2 deletions(-)

diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index 6f24b14..43bef21 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -192,9 +192,23 @@ static void gic_guest_irq_end(struct irq_desc *desc)
     /* Deactivation happens in maintenance interrupt / via GICV */
 }
 
-static void gic_irq_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
+static void gic_irq_set_affinity(struct irq_desc *desc, const cpumask_t 
*cpu_mask)
 {
-    BUG();
+    volatile unsigned char *bytereg;
+    unsigned int mask;
+
+    if ( desc == NULL || cpumask_empty(cpu_mask) )
+        return;
+
+    spin_lock(&gic.lock);
+
+    mask = gic_cpu_mask(cpu_mask);
+
+    /* Set target CPU mask (RAZ/WI on uniprocessor) */
+    bytereg = (unsigned char *) (GICD + GICD_ITARGETSR);
+    bytereg[desc->irq] = mask;
+
+    spin_unlock(&gic.lock);
 }
 
 /* XXX different for level vs edge */
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 54d3676..a90d9cb 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -408,6 +408,32 @@ struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned 
int irq)
     return v_target;
 }
 
+void vgic_move_irqs(struct vcpu *v)
+{
+    const cpumask_t *cpu_mask = cpumask_of(v->processor);
+    struct domain *d = v->domain;
+    struct pending_irq *p;
+    int i, j, k;
+
+    for ( i = 0; i < DOMAIN_NR_RANKS(d); i++ )
+    {
+        for ( j = 0 ; j < 8 ; j++ )
+        {
+            for ( k = 0; k < 4; k++ )
+            {
+                uint8_t target = 
byte_read(d->arch.vgic.shared_irqs[i].itargets[j], 0, k);
+                target = find_next_bit((const unsigned long *) &target, 8, 0);
+                if ( target == v->vcpu_id )
+                {
+                    p = irq_to_pending(v, 32 * (i + 1) + (j * 4) + k);
+                    if ( p->desc != NULL )
+                        p->desc->handler->set_affinity(p->desc, cpu_mask);
+                }
+            }
+        }
+    }
+}
+
 static void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n)
 {
     const unsigned long mask = r;
@@ -463,6 +489,7 @@ static void vgic_enable_irqs(struct vcpu *v, uint32_t r, 
int n)
         }
         if ( p->desc != NULL )
         {
+            p->desc->handler->set_affinity(p->desc, 
cpumask_of(v_target->processor));
             spin_lock_irqsave(&p->desc->lock, flags);
             p->desc->handler->enable(p->desc);
             spin_unlock_irqrestore(&p->desc->lock, flags);
@@ -649,6 +676,7 @@ static int vgic_distr_mmio_write(struct vcpu *v, 
mmio_info_t *info)
         {
             unsigned int irq, target, old_target;
             struct vcpu *v_target, *v_old;
+            struct pending_irq *p;
 
             target = i % 8;
 
@@ -657,6 +685,9 @@ static int vgic_distr_mmio_write(struct vcpu *v, 
mmio_info_t *info)
             old_target = byte_read(rank->itargets[REG_RANK_INDEX(8, gicd_reg - 
GICD_ITARGETSR)], 0, i/8);
             v_old = v->domain->vcpu[old_target];
             vgic_migrate_irq(v_old, v_target, irq);
+            p = irq_to_pending(v_target, irq);
+            if ( p->desc != NULL )
+                p->desc->handler->set_affinity(p->desc, 
cpumask_of(v_target->processor));
             i += 8 - target;
         }
         if ( dabt.size == 2 )
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 6853842..226321d 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -1319,6 +1319,10 @@ void evtchn_move_pirqs(struct vcpu *v)
     unsigned int port;
     struct evtchn *chn;
 
+#ifdef CONFIG_ARM
+       vgic_move_irqs(v);
+#endif
+
     spin_lock(&d->event_lock);
     for ( port = v->pirq_evtchn_head; port; port = chn->u.pirq.next_port )
     {
diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h
index bd40628..8f457dd 100644
--- a/xen/include/asm-arm/gic.h
+++ b/xen/include/asm-arm/gic.h
@@ -228,6 +228,7 @@ int gic_irq_xlate(const u32 *intspec, unsigned int intsize,
 void gic_clear_lrs(struct vcpu *v);
 
 struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int irq);
+void vgic_move_irqs(struct vcpu *v);
 
 #endif /* __ASSEMBLY__ */
 #endif
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.