|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH V2 25/40] arm: split pending SPIs (global) out from pending PPIs and SGIs (per CPU)
On Tue, 26 Jun 2012, Ian Campbell wrote:
> This tracks SPIs in struct arch_domain and PPIs+SGIs in struct arch_vcpu which
> seems more logical.
>
> Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
Acked-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
> xen/arch/arm/vgic.c | 12 +++++++-----
> xen/include/asm-arm/domain.h | 10 ++++++++++
> 2 files changed, 17 insertions(+), 5 deletions(-)
>
> diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
> index 629a0da..af3523f 100644
> --- a/xen/arch/arm/vgic.c
> +++ b/xen/arch/arm/vgic.c
> @@ -82,9 +82,8 @@ int domain_vgic_init(struct domain *d)
> d->arch.vgic.shared_irqs =
> xmalloc_array(struct vgic_irq_rank, DOMAIN_NR_RANKS(d));
> d->arch.vgic.pending_irqs =
> - xmalloc_array(struct pending_irq,
> - d->arch.vgic.nr_lines + (32 * d->max_vcpus));
> - for (i=0; i<d->arch.vgic.nr_lines + (32 * d->max_vcpus); i++)
> + xzalloc_array(struct pending_irq, d->arch.vgic.nr_lines);
> + for (i=0; i<d->arch.vgic.nr_lines; i++)
> INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].inflight);
> for (i=0; i<DOMAIN_NR_RANKS(d); i++)
> spin_lock_init(&d->arch.vgic.shared_irqs[i].lock);
> @@ -98,6 +97,10 @@ int vcpu_vgic_init(struct vcpu *v)
>
> spin_lock_init(&v->arch.vgic.private_irqs.lock);
>
> + memset(&v->arch.vgic.pending_irqs, 0, sizeof(v->arch.vgic.pending_irqs));
> + for (i = 0; i < 32; i++)
> + INIT_LIST_HEAD(&v->arch.vgic.pending_irqs[i].inflight);
> +
> /* For SGI and PPI the target is always this CPU */
> for ( i = 0 ; i < 8 ; i++ )
> v->arch.vgic.private_irqs.itargets[i] =
> @@ -535,8 +538,7 @@ struct pending_irq *irq_to_pending(struct vcpu *v,
> unsigned int irq)
> /* Pending irqs allocation strategy: the first vgic.nr_lines irqs
> * are used for SPIs; the rests are used for per cpu irqs */
> if ( irq < 32 )
> - n = &v->domain->arch.vgic.pending_irqs[irq + (v->vcpu_id * 32)
> - + v->domain->arch.vgic.nr_lines];
> + n = &v->arch.vgic.pending_irqs[irq];
> else
> n = &v->domain->arch.vgic.pending_irqs[irq - 32];
> return n;
> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> index 620b26e..32deb52 100644
> --- a/xen/include/asm-arm/domain.h
> +++ b/xen/include/asm-arm/domain.h
> @@ -46,6 +46,10 @@ struct arch_domain
> int ctlr;
> int nr_lines;
> struct vgic_irq_rank *shared_irqs;
> + /*
> + * SPIs are domain global, SGIs and PPIs are per-VCPU and stored in
> + * struct arch_vcpu.
> + */
> struct pending_irq *pending_irqs;
> } vgic;
>
> @@ -114,7 +118,13 @@ struct arch_vcpu
> uint32_t gic_lr[64];
>
> struct {
> + /*
> + * SGIs and PPIs are per-VCPU, SPIs are domain global and in
> + * struct arch_domain.
> + */
> + struct pending_irq pending_irqs[32];
> struct vgic_irq_rank private_irqs;
> +
> /* This list is ordered by IRQ priority and it is used to keep
> * track of the IRQs that the VGIC injected into the guest.
> * Depending on the availability of LR registers, the IRQs might
> --
> 1.7.9.1
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |