[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC PATCH 08/10] ARM: vGIC: move target vcpu from irq_rank to struct pending_irq



Hi Andre,

On 04/05/17 16:31, Andre Przywara wrote:
So far we kept the target VCPU for SPIs in the rank structure.
Move that information over into pending_irq.
This changes vgic_get_target_vcpu(), which now takes only a domain
and a struct pending_irq to get the target vCPU, in a way that does not
necessarily require the pending_irq lock to be held.

You don't explain half of the changes made in this patch. Most of them would be better in separate patch as they don't necessarily depends on the move from the rank to pending_irq.


Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>
---
 xen/arch/arm/gic.c         |  3 +-
 xen/arch/arm/vgic-v2.c     | 57 ++++++++++++++---------------------
 xen/arch/arm/vgic-v3.c     | 71 ++++++++++++++++++++++----------------------
 xen/arch/arm/vgic.c        | 74 ++++++++++++++++++++--------------------------
 xen/include/asm-arm/vgic.h | 15 ++++------
 5 files changed, 97 insertions(+), 123 deletions(-)

diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index e175e9b..737da6b 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -492,7 +492,8 @@ static void gic_update_one_lr(struct vcpu *v, int i)
             smp_wmb();
             if ( test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
             {
-                struct vcpu *v_target = vgic_get_target_vcpu(v, irq);
+                struct vcpu *v_target = vgic_get_target_vcpu(v->domain, p);
+
                 irq_set_affinity(p->desc, cpumask_of(v_target->processor));
                 clear_bit(GIC_IRQ_GUEST_MIGRATING, &p->status);
             }
diff --git a/xen/arch/arm/vgic-v2.c b/xen/arch/arm/vgic-v2.c
index 22c679c..bf755ae 100644
--- a/xen/arch/arm/vgic-v2.c
+++ b/xen/arch/arm/vgic-v2.c
@@ -66,19 +66,21 @@ void vgic_v2_setup_hw(paddr_t dbase, paddr_t cbase, paddr_t 
csize,
  *
  * Note the byte offset will be aligned to an ITARGETSR<n> boundary.
  */
-static uint32_t vgic_fetch_itargetsr(struct vgic_irq_rank *rank,
-                                     unsigned int offset)
+static uint32_t vgic_fetch_itargetsr(struct vcpu *v, unsigned int offset)

Why can't you directly use the macros you introduced in #5?

 {
     uint32_t reg = 0;
     unsigned int i;

-    ASSERT(spin_is_locked(&rank->lock));
-
-    offset &= INTERRUPT_RANK_MASK;
     offset &= ~(NR_TARGETS_PER_ITARGETSR - 1);

     for ( i = 0; i < NR_TARGETS_PER_ITARGETSR; i++, offset++ )
-        reg |= (1 << read_atomic(&rank->vcpu[offset])) << (i * 
NR_BITS_PER_TARGET);
+    {
+        struct pending_irq *p = irq_to_pending(v, offset);
+
+        spin_lock(&p->lock);
+        reg |= (1 << p->vcpu_id) << (i * NR_BITS_PER_TARGET);
+        spin_unlock(&p->lock);
+    }

     return reg;
 }
@@ -89,32 +91,28 @@ static uint32_t vgic_fetch_itargetsr(struct vgic_irq_rank 
*rank,
  *
  * Note the byte offset will be aligned to an ITARGETSR<n> boundary.
  */
-static void vgic_store_itargetsr(struct domain *d, struct vgic_irq_rank *rank,
+static void vgic_store_itargetsr(struct domain *d,
                                  unsigned int offset, uint32_t itargetsr)

Please re-indent properly.

 {
     unsigned int i;
     unsigned int virq;

-    ASSERT(spin_is_locked(&rank->lock));
-
     /*
      * The ITARGETSR0-7, used for SGIs/PPIs, are implemented RO in the
      * emulation and should never call this function.
      *
-     * They all live in the first rank.
+     * They all live in the first four bytes of ITARGETSR.
      */
-    BUILD_BUG_ON(NR_INTERRUPT_PER_RANK != 32);
-    ASSERT(rank->index >= 1);
+    ASSERT(offset >= 4);

-    offset &= INTERRUPT_RANK_MASK;
+    virq = offset;
     offset &= ~(NR_TARGETS_PER_ITARGETSR - 1);

-    virq = rank->index * NR_INTERRUPT_PER_RANK + offset;
-
     for ( i = 0; i < NR_TARGETS_PER_ITARGETSR; i++, offset++, virq++ )
     {
         unsigned int new_target, old_target;
         uint8_t new_mask;
+        struct pending_irq *p = spi_to_pending(d, virq);

         /*
          * Don't need to mask as we rely on new_mask to fit for only one
@@ -151,16 +149,17 @@ static void vgic_store_itargetsr(struct domain *d, struct 
vgic_irq_rank *rank,
         /* The vCPU ID always starts from 0 */
         new_target--;

-        old_target = read_atomic(&rank->vcpu[offset]);
+        spin_lock(&p->lock);

I suspect you need to disable interrupt here.

+
+        old_target = p->vcpu_id;

         /* Only migrate the vIRQ if the target vCPU has changed */
         if ( new_target != old_target )
         {
-            if ( vgic_migrate_irq(d->vcpu[old_target],
-                             d->vcpu[new_target],
-                             virq) )
-                write_atomic(&rank->vcpu[offset], new_target);
+            if ( vgic_migrate_irq(p, d->vcpu[old_target], d->vcpu[new_target]) 
)
+                p->vcpu_id = new_target;
         }
+        spin_unlock(&p->lock);
     }
 }

@@ -168,9 +167,7 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, 
mmio_info_t *info,
                                    register_t *r, void *priv)
 {
     struct hsr_dabt dabt = info->dabt;
-    struct vgic_irq_rank *rank;
     int gicd_reg = (int)(info->gpa - v->domain->arch.vgic.dbase);
-    unsigned long flags;
     unsigned int irq;

     perfc_incr(vgicd_reads);
@@ -259,11 +256,7 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, 
mmio_info_t *info,
         uint32_t itargetsr;

         if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width;
-        rank = vgic_rank_offset(v, 8, gicd_reg - GICD_ITARGETSR, DABT_WORD);
-        if ( rank == NULL) goto read_as_zero;
-        vgic_lock_rank(v, rank, flags);
-        itargetsr = vgic_fetch_itargetsr(rank, gicd_reg - GICD_ITARGETSR);
-        vgic_unlock_rank(v, rank, flags);
+        itargetsr = vgic_fetch_itargetsr(v, gicd_reg - GICD_ITARGETSR);
         *r = vgic_reg32_extract(itargetsr, info);

         return 1;
@@ -385,10 +378,8 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, 
mmio_info_t *info,
                                     register_t r, void *priv)
 {
     struct hsr_dabt dabt = info->dabt;
-    struct vgic_irq_rank *rank;
     int gicd_reg = (int)(info->gpa - v->domain->arch.vgic.dbase);
     uint32_t tr;
-    unsigned long flags;
     unsigned int irq;

     perfc_incr(vgicd_writes);
@@ -492,14 +483,10 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, 
mmio_info_t *info,
         uint32_t itargetsr;

         if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width;
-        rank = vgic_rank_offset(v, 8, gicd_reg - GICD_ITARGETSR, DABT_WORD);
-        if ( rank == NULL) goto write_ignore;
-        vgic_lock_rank(v, rank, flags);
-        itargetsr = vgic_fetch_itargetsr(rank, gicd_reg - GICD_ITARGETSR);
+        itargetsr = vgic_fetch_itargetsr(v, gicd_reg - GICD_ITARGETSR);
         vgic_reg32_update(&itargetsr, r, info);
-        vgic_store_itargetsr(v->domain, rank, gicd_reg - GICD_ITARGETSR,
+        vgic_store_itargetsr(v->domain, gicd_reg - GICD_ITARGETSR,
                              itargetsr);
-        vgic_unlock_rank(v, rank, flags);
         return 1;
     }

diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
index 01764c9..15a512a 100644
--- a/xen/arch/arm/vgic-v3.c
+++ b/xen/arch/arm/vgic-v3.c
@@ -97,18 +97,20 @@ static struct vcpu *vgic_v3_irouter_to_vcpu(struct domain 
*d, uint64_t irouter)
  *
  * Note the byte offset will be aligned to an IROUTER<n> boundary.
  */
-static uint64_t vgic_fetch_irouter(struct vgic_irq_rank *rank,
-                                   unsigned int offset)
+static uint64_t vgic_fetch_irouter(struct vcpu *v, unsigned int offset)
 {
-    ASSERT(spin_is_locked(&rank->lock));
+    struct pending_irq *p;
+    uint64_t aff;

     /* There is exactly 1 vIRQ per IROUTER */
     offset /= NR_BYTES_PER_IROUTER;

-    /* Get the index in the rank */
-    offset &= INTERRUPT_RANK_MASK;
+    p = irq_to_pending(v, offset);
+    spin_lock(&p->lock);

Same remark for the interrupts.

+    aff = vcpuid_to_vaffinity(p->vcpu_id);
+    spin_unlock(&p->lock);

-    return vcpuid_to_vaffinity(read_atomic(&rank->vcpu[offset]));
+    return aff;
 }

 /*
@@ -117,11 +119,13 @@ static uint64_t vgic_fetch_irouter(struct vgic_irq_rank 
*rank,
  *
  * Note the offset will be aligned to the appropriate boundary.
  */
-static void vgic_store_irouter(struct domain *d, struct vgic_irq_rank *rank,
+static void vgic_store_irouter(struct domain *d,
                                unsigned int offset, uint64_t irouter)
 {
     struct vcpu *new_vcpu, *old_vcpu;
+    struct pending_irq *p;
     unsigned int virq;
+    bool reinject;

     /* There is 1 vIRQ per IROUTER */
     virq = offset / NR_BYTES_PER_IROUTER;
@@ -132,11 +136,11 @@ static void vgic_store_irouter(struct domain *d, struct 
vgic_irq_rank *rank,
      */
     ASSERT(virq >= 32);

-    /* Get the index in the rank */
-    offset &= virq & INTERRUPT_RANK_MASK;
+    p = spi_to_pending(d, virq);
+    spin_lock(&p->lock);

     new_vcpu = vgic_v3_irouter_to_vcpu(d, irouter);
-    old_vcpu = d->vcpu[read_atomic(&rank->vcpu[offset])];
+    old_vcpu = d->vcpu[p->vcpu_id];

     /*
      * From the spec (see 8.9.13 in IHI 0069A), any write with an
@@ -146,16 +150,21 @@ static void vgic_store_irouter(struct domain *d, struct 
vgic_irq_rank *rank,
      * invalid vCPU. So for now, just ignore the write.
      *
      * TODO: Respect the spec
+     *
+     * Only migrate the IRQ if the target vCPU has changed
      */
-    if ( !new_vcpu )
-        return;
-
-    /* Only migrate the IRQ if the target vCPU has changed */
-    if ( new_vcpu != old_vcpu )
+    if ( !new_vcpu || new_vcpu == old_vcpu )

Why this change?

     {
-        if ( vgic_migrate_irq(old_vcpu, new_vcpu, virq) )
-            write_atomic(&rank->vcpu[offset], new_vcpu->vcpu_id);
+        spin_unlock(&p->lock);
+        return;
     }
+
+    reinject = vgic_migrate_irq(p, old_vcpu, new_vcpu);

This change should really be a separate patch and a justification of why this is necessary.

+
+    spin_unlock(&p->lock);
+
+    if ( reinject )
+        vgic_vcpu_inject_irq(new_vcpu, virq);
 }

 static inline bool vgic_reg64_check_access(struct hsr_dabt dabt)
@@ -869,8 +878,6 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, 
mmio_info_t *info,
                                    register_t *r, void *priv)
 {
     struct hsr_dabt dabt = info->dabt;
-    struct vgic_irq_rank *rank;
-    unsigned long flags;
     int gicd_reg = (int)(info->gpa - v->domain->arch.vgic.dbase);

     perfc_incr(vgicd_reads);
@@ -996,15 +1003,12 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, 
mmio_info_t *info,
     case VRANGE64(GICD_IROUTER32, GICD_IROUTER1019):
     {
         uint64_t irouter;
+        unsigned int irq;

s/irq/virq/


         if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
-        rank = vgic_rank_offset(v, 64, gicd_reg - GICD_IROUTER,
-                                DABT_DOUBLE_WORD);
-        if ( rank == NULL ) goto read_as_zero;
-        vgic_lock_rank(v, rank, flags);
-        irouter = vgic_fetch_irouter(rank, gicd_reg - GICD_IROUTER);
-        vgic_unlock_rank(v, rank, flags);
-
+        irq = (gicd_reg - GICD_IROUTER) / 8;
+        if ( irq >= v->domain->arch.vgic.nr_spis + 32 ) goto read_as_zero;
+        irouter = vgic_fetch_irouter(v, gicd_reg - GICD_IROUTER);
         *r = vgic_reg64_extract(irouter, info);

         return 1;
@@ -1070,8 +1074,6 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, 
mmio_info_t *info,
                                     register_t r, void *priv)
 {
     struct hsr_dabt dabt = info->dabt;
-    struct vgic_irq_rank *rank;
-    unsigned long flags;
     int gicd_reg = (int)(info->gpa - v->domain->arch.vgic.dbase);

     perfc_incr(vgicd_writes);
@@ -1185,16 +1187,15 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, 
mmio_info_t *info,
     case VRANGE64(GICD_IROUTER32, GICD_IROUTER1019):
     {
         uint64_t irouter;
+        unsigned int irq;

         if ( !vgic_reg64_check_access(dabt) ) goto bad_width;
-        rank = vgic_rank_offset(v, 64, gicd_reg - GICD_IROUTER,
-                                DABT_DOUBLE_WORD);
-        if ( rank == NULL ) goto write_ignore;
-        vgic_lock_rank(v, rank, flags);
-        irouter = vgic_fetch_irouter(rank, gicd_reg - GICD_IROUTER);
+        irq = (gicd_reg - GICD_IROUTER) / 8;
+        if ( irq >= v->domain->arch.vgic.nr_spis + 32 ) goto write_ignore;
+
+        irouter = vgic_fetch_irouter(v, gicd_reg - GICD_IROUTER);
         vgic_reg64_update(&irouter, r, info);
-        vgic_store_irouter(v->domain, rank, gicd_reg - GICD_IROUTER, irouter);
-        vgic_unlock_rank(v, rank, flags);
+        vgic_store_irouter(v->domain, gicd_reg - GICD_IROUTER, irouter);
         return 1;
     }

diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index a23079a..530ac55 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -60,32 +60,22 @@ struct vgic_irq_rank *vgic_rank_irq(struct vcpu *v, 
unsigned int irq)
     return vgic_get_rank(v, rank);
 }

-static void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq)
+static void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq,
+                                  int vcpu_id)
 {
     INIT_LIST_HEAD(&p->inflight);
     INIT_LIST_HEAD(&p->lr_queue);
     spin_lock_init(&p->lock);
     p->irq = virq;
+    p->vcpu_id = vcpu_id;
 }

 static void vgic_rank_init(struct vgic_irq_rank *rank, uint8_t index,
                            unsigned int vcpu)
 {
-    unsigned int i;
-
-    /*
-     * Make sure that the type chosen to store the target is able to
-     * store an VCPU ID between 0 and the maximum of virtual CPUs
-     * supported.
-     */
-    BUILD_BUG_ON((1 << (sizeof(rank->vcpu[0]) * 8)) < MAX_VIRT_CPUS);

You dropped this BUILD_BUG_ON and never re-introduce a similar one to prevent any issue in the future if we decide to extend MAX_VIRT_VCPUS.

-
     spin_lock_init(&rank->lock);

     rank->index = index;
-
-    for ( i = 0; i < NR_INTERRUPT_PER_RANK; i++ )
-        write_atomic(&rank->vcpu[i], vcpu);
 }

 int domain_vgic_register(struct domain *d, int *mmio_count)
@@ -136,8 +126,9 @@ int domain_vgic_init(struct domain *d, unsigned int nr_spis)
     if ( d->arch.vgic.pending_irqs == NULL )
         return -ENOMEM;

+    /* SPIs are routed to VCPU0 by default */
     for (i=0; i<d->arch.vgic.nr_spis; i++)
-        vgic_init_pending_irq(&d->arch.vgic.pending_irqs[i], i + 32);
+        vgic_init_pending_irq(&d->arch.vgic.pending_irqs[i], i + 32, 0);

     /* SPIs are routed to VCPU0 by default */
     for ( i = 0; i < DOMAIN_NR_RANKS(d); i++ )
@@ -202,8 +193,9 @@ int vcpu_vgic_init(struct vcpu *v)
     v->domain->arch.vgic.handler->vcpu_init(v);

     memset(&v->arch.vgic.pending_irqs, 0, sizeof(v->arch.vgic.pending_irqs));
+    /* SGIs/PPIs are always routed to this VCPU */
     for (i = 0; i < 32; i++)
-        vgic_init_pending_irq(&v->arch.vgic.pending_irqs[i], i);
+        vgic_init_pending_irq(&v->arch.vgic.pending_irqs[i], i, v->vcpu_id);

     INIT_LIST_HEAD(&v->arch.vgic.inflight_irqs);
     INIT_LIST_HEAD(&v->arch.vgic.lr_pending);
@@ -218,11 +210,11 @@ int vcpu_vgic_free(struct vcpu *v)
     return 0;
 }

-struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int virq)
+struct vcpu *vgic_get_target_vcpu(struct domain *d, struct pending_irq *p)

This kind of changes would have been better in a separate patch to help the review.

 {
-    struct vgic_irq_rank *rank = vgic_rank_irq(v, virq);
-    int target = read_atomic(&rank->vcpu[virq & INTERRUPT_RANK_MASK]);
-    return v->domain->vcpu[target];
+    uint16_t vcpu_id = read_atomic(&p->vcpu_id);
+
+    return d->vcpu[vcpu_id];

You could have do return d->vcpu[read_atomic(&p->vcpu_id)]. This even likely can be turned into a static inline function.

 }

 static uint8_t extract_priority(struct pending_irq *p)
@@ -289,31 +281,29 @@ DEFINE_GATHER_IRQ_INFO(enabled, extract_enabled, 1)
 DEFINE_GATHER_IRQ_INFO(config, extract_config, 2)
 DEFINE_SCATTER_IRQ_INFO(config, set_config, 2)

-bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq)
+bool vgic_migrate_irq(struct pending_irq *p, struct vcpu *old, struct vcpu 
*new)

Re-ordering the parameter should have been explained. Also, use pending_irq could have been done in a separate patch.

Breaking down patchs in very small one really help the review. I admit this one is quite complex to read it because of the mix of re-ordering, introduction of new parameters, code modification not really related to this patch...

 {
-    unsigned long flags;
-    struct pending_irq *p = irq_to_pending(old, irq);
-
-    /* nothing to do for virtual interrupts */
-    if ( p->desc == NULL )
-        return true;
+    ASSERT(spin_is_locked(&p->lock));

     /* migration already in progress, no need to do anything */
     if ( test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
     {
-        gprintk(XENLOG_WARNING, "irq %u migration failed: requested while in 
progress\n", irq);
+        gprintk(XENLOG_WARNING, "irq %u migration failed: requested while in 
progress\n", p->irq);
         return false;
     }

-    perfc_incr(vgic_irq_migrates);
+    p->vcpu_id = new->vcpu_id;

Why did you move this code here?

+
+    /* nothing to do for virtual interrupts */
+    if ( p->desc == NULL )
+        return false;

-    spin_lock_irqsave(&old->arch.vgic.lock, flags);
+    perfc_incr(vgic_irq_migrates);

     if ( list_empty(&p->inflight) )
     {
         irq_set_affinity(p->desc, cpumask_of(new->processor));
-        spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
-        return true;
+        return false;
     }
     /* If the IRQ is still lr_pending, re-inject it to the new vcpu */
     if ( !list_empty(&p->lr_queue) )
@@ -322,8 +312,6 @@ bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, 
unsigned int irq)
         list_del_init(&p->lr_queue);
         list_del_init(&p->inflight);
         irq_set_affinity(p->desc, cpumask_of(new->processor));
-        spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
-        vgic_vcpu_inject_irq(new, irq);
         return true;
     }
     /* if the IRQ is in a GICH_LR register, set GIC_IRQ_GUEST_MIGRATING
@@ -331,8 +319,7 @@ bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, 
unsigned int irq)
     if ( !list_empty(&p->inflight) )
         set_bit(GIC_IRQ_GUEST_MIGRATING, &p->status);

-    spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
-    return true;
+    return false;
 }

 void arch_move_irqs(struct vcpu *v)
@@ -345,11 +332,13 @@ void arch_move_irqs(struct vcpu *v)

     for ( i = 32; i < vgic_num_irqs(d); i++ )
     {
-        v_target = vgic_get_target_vcpu(v, i);
-        p = irq_to_pending(v_target, i);
+        p = irq_to_pending(d->vcpu[0], i);

Why d->vcpu[0]?

+        spin_lock(&p->lock);
+        v_target = vgic_get_target_vcpu(d, p);

         if ( v_target == v && !test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
             irq_set_affinity(p->desc, cpu_mask);
+        spin_unlock(&p->lock);
     }
 }

@@ -362,8 +351,8 @@ void vgic_disable_irqs(struct vcpu *v, unsigned int irq, 
uint32_t r)
     struct vcpu *v_target;

     while ( (i = find_next_bit(&mask, 32, i)) < 32 ) {
-        v_target = vgic_get_target_vcpu(v, irq + i);
-        p = irq_to_pending(v_target, irq + i);
+        p = irq_to_pending(v, irq + i);
+        v_target = vgic_get_target_vcpu(v->domain, p);

This kind of re-ordering would have be nice to explain in the commit message.

         clear_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
         gic_remove_from_queues(v_target, irq + i);
         if ( p->desc != NULL )
@@ -387,10 +376,10 @@ void vgic_enable_irqs(struct vcpu *v, unsigned int irq, 
uint32_t r)
     struct domain *d = v->domain;

     while ( (i = find_next_bit(&mask, 32, i)) < 32 ) {
-        v_target = vgic_get_target_vcpu(v, irq + i);
+        p = irq_to_pending(v, irq + i);
+        v_target = vgic_get_target_vcpu(d, p);

         spin_lock_irqsave(&v_target->arch.vgic.lock, flags);
-        p = irq_to_pending(v_target, irq + i);
         spin_lock(&p->lock);

         set_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
@@ -561,11 +550,12 @@ out:
 void vgic_vcpu_inject_spi(struct domain *d, unsigned int virq)
 {
     struct vcpu *v;
+    struct pending_irq *p = irq_to_pending(d->vcpu[0], virq);

     /* the IRQ needs to be an SPI */
     ASSERT(virq >= 32 && virq <= vgic_num_irqs(d));

-    v = vgic_get_target_vcpu(d->vcpu[0], virq);
+    v = vgic_get_target_vcpu(d, p);
     vgic_vcpu_inject_irq(v, virq);
 }

diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
index fe09fb8..186e6df 100644
--- a/xen/include/asm-arm/vgic.h
+++ b/xen/include/asm-arm/vgic.h
@@ -76,6 +76,7 @@ struct pending_irq
     uint8_t lr;
     uint8_t priority;           /* the priority of the currently inflight IRQ 
*/
     uint8_t new_priority;       /* the priority of newly triggered IRQs */
+    uint8_t vcpu_id;

So this is pointing to the new vCPU and not the current. So this should be clearly identified in the name and in a comment.

     /* inflight is used to append instances of pending_irq to
      * vgic.inflight_irqs */
     struct list_head inflight;
@@ -103,14 +104,6 @@ struct vgic_irq_rank {
     spinlock_t lock; /* Covers access to all other members of this struct */

     uint8_t index;
-
-    /*
-     * It's more convenient to store a target VCPU per vIRQ
-     * than the register ITARGETSR/IROUTER itself.
-     * Use atomic operations to read/write the vcpu fields to avoid
-     * taking the rank lock.
-     */
-    uint8_t vcpu[32];
 };

 struct sgi_target {
@@ -301,7 +294,8 @@ enum gic_sgi_mode;
 extern int domain_vgic_init(struct domain *d, unsigned int nr_spis);
 extern void domain_vgic_free(struct domain *d);
 extern int vcpu_vgic_init(struct vcpu *v);
-extern struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int virq);
+extern struct vcpu *vgic_get_target_vcpu(struct domain *d,
+                                         struct pending_irq *p);
 extern void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int virq);
 extern void vgic_vcpu_inject_spi(struct domain *d, unsigned int virq);
 extern void vgic_clear_pending_irqs(struct vcpu *v);
@@ -321,7 +315,8 @@ extern int vcpu_vgic_free(struct vcpu *v);
 extern bool vgic_to_sgi(struct vcpu *v, register_t sgir,
                         enum gic_sgi_mode irqmode, int virq,
                         const struct sgi_target *target);
-extern bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int 
irq);
+extern bool vgic_migrate_irq(struct pending_irq *p,
+                             struct vcpu *old, struct vcpu *new);

 /* Reserve a specific guest vIRQ */
 extern bool vgic_reserve_virq(struct domain *d, unsigned int virq);


Cheers,

--
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.