[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 2/2] xen/arm: observe itarget setting in vgic_enable_irqs and vgic_disable_irqs
vgic_enable_irqs should enable irq delivery to the vcpu specified by GICD_ITARGETSR, rather than the vcpu that wrote to GICD_ISENABLER. Similarly vgic_disable_irqs should use the target vcpu specified by itarget to disable irqs. Correctly initialize itargets for SPIs. Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx> --- Changes in v2: - refactor the common code in get_target_vcpu; - unify PPI and SPI paths; - correctly initialize itargets for SPI; - use byte_read. --- xen/arch/arm/vgic.c | 43 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 35 insertions(+), 8 deletions(-) diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c index 1304b5e..55ed27d 100644 --- a/xen/arch/arm/vgic.c +++ b/xen/arch/arm/vgic.c @@ -106,7 +106,15 @@ int domain_vgic_init(struct domain *d) INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].lr_queue); } for (i=0; i<DOMAIN_NR_RANKS(d); i++) + { + int j; + spin_lock_init(&d->arch.vgic.shared_irqs[i].lock); + /* Only delivery to CPU0 */ + for ( j = 0 ; j < 8 ; j++ ) + d->arch.vgic.shared_irqs[i].itargets[j] = + (1<<0) | (1<<8) | (1<<16) | (1<<24); + } return 0; } @@ -369,6 +377,21 @@ read_as_zero: return 1; } +static struct vcpu *get_target_vcpu(struct vcpu *v, unsigned int irq) +{ + int target; + struct vgic_irq_rank *rank; + struct vcpu *v_target; + + rank = vgic_irq_rank(v, 1, irq/32); + vgic_lock_rank(v, rank); + target = byte_read(rank->itargets[(irq%32)/4], 0, irq % 4); + target = find_next_bit((const unsigned long *) &target, 8, 0); + v_target = v->domain->vcpu[target]; + vgic_unlock_rank(v, rank); + return v_target; +} + static void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n) { const unsigned long mask = r; @@ -376,12 +399,14 @@ static void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n) unsigned int irq; unsigned long flags; int i = 0; + struct vcpu *v_target; while ( (i = find_next_bit(&mask, 32, i)) < 32 ) { irq = i + (32 * n); - p = irq_to_pending(v, irq); + v_target = get_target_vcpu(v, irq); + p = irq_to_pending(v_target, irq); clear_bit(GIC_IRQ_GUEST_ENABLED, &p->status); - gic_remove_from_queues(v, irq); + gic_remove_from_queues(v_target, irq); if ( p->desc != NULL ) { spin_lock_irqsave(&p->desc->lock, flags); @@ -399,24 +424,26 @@ static void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n) unsigned int irq; unsigned long flags; int i = 0; + struct vcpu *v_target; while ( (i = find_next_bit(&mask, 32, i)) < 32 ) { irq = i + (32 * n); - p = irq_to_pending(v, irq); + v_target = get_target_vcpu(v, irq); + p = irq_to_pending(v_target, irq); set_bit(GIC_IRQ_GUEST_ENABLED, &p->status); /* We need to force the first injection of evtchn_irq because * evtchn_upcall_pending is already set by common code on vcpu * creation. */ - if ( irq == v->domain->arch.evtchn_irq && + if ( irq == v_target->domain->arch.evtchn_irq && vcpu_info(current, evtchn_upcall_pending) && list_empty(&p->inflight) ) - vgic_vcpu_inject_irq(v, irq); + vgic_vcpu_inject_irq(v_target, irq); else { unsigned long flags; - spin_lock_irqsave(&v->arch.vgic.lock, flags); + spin_lock_irqsave(&v_target->arch.vgic.lock, flags); if ( !list_empty(&p->inflight) && !test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) ) - gic_raise_guest_irq(v, irq, p->priority); - spin_unlock_irqrestore(&v->arch.vgic.lock, flags); + gic_raise_guest_irq(v_target, irq, p->priority); + spin_unlock_irqrestore(&v_target->arch.vgic.lock, flags); } if ( p->desc != NULL ) { -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |