[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] ARM: vGIC: move irq_to_pending() calls under the VGIC VCPU lock
commit 5f66da659060563df8481a86c017f07455095045 Author: Andre Przywara <andre.przywara@xxxxxxx> AuthorDate: Mon Apr 10 19:05:16 2017 +0100 Commit: Stefano Stabellini <sstabellini@xxxxxxxxxx> CommitDate: Wed Jun 14 11:38:37 2017 -0700 ARM: vGIC: move irq_to_pending() calls under the VGIC VCPU lock So far irq_to_pending() is just a convenience function to lookup statically allocated arrays. This will change with LPIs, which are more dynamic, so the memory for their struct pending_irq might go away. The proper answer to the issue of preventing stale pointers is ref-counting, which requires more rework and will be introduced with a later rework. For now move the irq_to_pending() calls that are used with LPIs under the VGIC VCPU lock, and only use the returned pointer while holding the lock. This prevents the memory from being freed while we use it. For the sake of completeness we take care about all irq_to_pending() users, even those which later will never deal with LPIs. Document the limits of vgic_num_irqs(). Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx> Acked-by: Julien Grall <julien.grall@xxxxxxx> Acked-by: Stefano Stabellini <sstabellini@xxxxxxxxxx> --- xen/arch/arm/vgic.c | 42 ++++++++++++++++++++++++++++++++---------- xen/include/asm-arm/vgic.h | 5 +++++ 2 files changed, 37 insertions(+), 10 deletions(-) diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c index 04d821a..f2f423f 100644 --- a/xen/arch/arm/vgic.c +++ b/xen/arch/arm/vgic.c @@ -234,23 +234,29 @@ static int vgic_get_virq_priority(struct vcpu *v, unsigned int virq) bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq) { unsigned long flags; - struct pending_irq *p = irq_to_pending(old, irq); + struct pending_irq *p; + + spin_lock_irqsave(&old->arch.vgic.lock, flags); + + p = irq_to_pending(old, irq); /* nothing to do for virtual interrupts */ if ( p->desc == NULL ) + { + spin_unlock_irqrestore(&old->arch.vgic.lock, flags); return true; + } /* migration already in progress, no need to do anything */ if ( test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) ) { gprintk(XENLOG_WARNING, "irq %u migration failed: requested while in progress\n", irq); + spin_unlock_irqrestore(&old->arch.vgic.lock, flags); return false; } perfc_incr(vgic_irq_migrates); - spin_lock_irqsave(&old->arch.vgic.lock, flags); - if ( list_empty(&p->inflight) ) { irq_set_affinity(p->desc, cpumask_of(new->processor)); @@ -285,6 +291,17 @@ void arch_move_irqs(struct vcpu *v) struct vcpu *v_target; int i; + /* + * We don't migrate LPIs at the moment. + * If we ever do, we must make sure that the struct pending_irq does + * not go away, as there is no lock preventing this here. + * To ensure this, we check if the loop below ever touches LPIs. + * In the moment vgic_num_irqs() just covers SPIs, as it's mostly used + * for allocating the pending_irq and irq_desc array, in which LPIs + * don't participate. + */ + ASSERT(!is_lpi(vgic_num_irqs(d) - 1)); + for ( i = 32; i < vgic_num_irqs(d); i++ ) { v_target = vgic_get_target_vcpu(v, i); @@ -299,6 +316,7 @@ void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n) { const unsigned long mask = r; struct pending_irq *p; + struct irq_desc *desc; unsigned int irq; unsigned long flags; int i = 0; @@ -307,17 +325,19 @@ void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n) while ( (i = find_next_bit(&mask, 32, i)) < 32 ) { irq = i + (32 * n); v_target = vgic_get_target_vcpu(v, irq); + + spin_lock_irqsave(&v_target->arch.vgic.lock, flags); p = irq_to_pending(v_target, irq); clear_bit(GIC_IRQ_GUEST_ENABLED, &p->status); - spin_lock_irqsave(&v_target->arch.vgic.lock, flags); gic_remove_from_lr_pending(v_target, p); + desc = p->desc; spin_unlock_irqrestore(&v_target->arch.vgic.lock, flags); - if ( p->desc != NULL ) + if ( desc != NULL ) { - spin_lock_irqsave(&p->desc->lock, flags); - p->desc->handler->disable(p->desc); - spin_unlock_irqrestore(&p->desc->lock, flags); + spin_lock_irqsave(&desc->lock, flags); + desc->handler->disable(desc); + spin_unlock_irqrestore(&desc->lock, flags); } i++; } @@ -352,9 +372,9 @@ void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n) while ( (i = find_next_bit(&mask, 32, i)) < 32 ) { irq = i + (32 * n); v_target = vgic_get_target_vcpu(v, irq); + spin_lock_irqsave(&v_target->arch.vgic.lock, flags); p = irq_to_pending(v_target, irq); set_bit(GIC_IRQ_GUEST_ENABLED, &p->status); - spin_lock_irqsave(&v_target->arch.vgic.lock, flags); if ( !list_empty(&p->inflight) && !test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) ) gic_raise_guest_irq(v_target, irq, p->priority); spin_unlock_irqrestore(&v_target->arch.vgic.lock, flags); @@ -463,7 +483,7 @@ void vgic_clear_pending_irqs(struct vcpu *v) void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int virq) { uint8_t priority; - struct pending_irq *iter, *n = irq_to_pending(v, virq); + struct pending_irq *iter, *n; unsigned long flags; bool running; @@ -471,6 +491,8 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int virq) spin_lock_irqsave(&v->arch.vgic.lock, flags); + n = irq_to_pending(v, virq); + /* vcpu offline */ if ( test_bit(_VPF_down, &v->pause_flags) ) { diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h index df75064..3af7a24 100644 --- a/xen/include/asm-arm/vgic.h +++ b/xen/include/asm-arm/vgic.h @@ -289,6 +289,11 @@ enum gic_sgi_mode; */ #define REG_RANK_INDEX(b, n, s) ((((n) >> s) & ((b)-1)) % 32) +/* + * In the moment vgic_num_irqs() just covers SPIs and the private IRQs, + * as it's mostly used for allocating the pending_irq and irq_desc array, + * in which LPIs don't participate. + */ #define vgic_num_irqs(d) ((d)->arch.vgic.nr_spis + 32) extern int domain_vgic_init(struct domain *d, unsigned int nr_spis); -- generated by git-patchbot for /home/xen/git/xen.git#master _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |