|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [XenARM] [Xen-devel] [PATCH] arm: support fewer LRs register than virtual irqs
On 14/02/12 13:34, Stefano Stabellini wrote:
> On Tue, 14 Feb 2012, Ian Campbell wrote:
>> On Tue, 2012-02-14 at 13:07 +0000, Stefano Stabellini wrote:
>>> If the vgic needs to inject a virtual irq into the guest, but no free
>>> LR registers are available, add the irq to a list and return.
>>> Whenever an LR register becomes available we add the queued irq to it
>>> and remove it from the list.
>>> We use the gic lock to protect the list and the bitmask.
>>
>> There's no need to order the IRQs by priority and ensure that the
>> highest priorities are in the LRs?
>
> You are right, they need to be ordered by priority.
>
> -->8--
>
>>From 027ddc0a08c5608797b03e66b87178cd2522ad07 Mon Sep 17 00:00:00 2001
> From: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
> Date: Tue, 14 Feb 2012 13:23:56 +0000
> Subject: [PATCH] arm: support fewer LR registers than virtual irqs
>
> If the vgic needs to inject a virtual irq into the guest, but no free
> LR registers are available, add the irq to a list and return.
> Whenever an LR register becomes available we add the queued irq to it
> and remove it from the list.
> We use the gic lock to protect the list and the bitmask.
>
> Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
> ---
> xen/arch/arm/gic.c | 58 ++++++++++++++++++++++++++++++++++++++---
> xen/include/asm-arm/domain.h | 1 +
> 2 files changed, 54 insertions(+), 5 deletions(-)
>
> diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
> index adc10bb..129b7ff 100644
> --- a/xen/arch/arm/gic.c
> +++ b/xen/arch/arm/gic.c
> @@ -25,6 +25,7 @@
> #include <xen/sched.h>
> #include <xen/errno.h>
> #include <xen/softirq.h>
> +#include <xen/list.h>
> #include <asm/p2m.h>
> #include <asm/domain.h>
>
> @@ -45,6 +46,8 @@ static struct {
> unsigned int lines;
> unsigned int cpus;
> spinlock_t lock;
> + uint64_t lr_mask;
> + struct list_head lr_pending;
> } gic;
>
> irq_desc_t irq_desc[NR_IRQS];
> @@ -247,6 +250,8 @@ static void __cpuinit gic_hyp_init(void)
>
> GICH[GICH_HCR] = GICH_HCR_EN;
> GICH[GICH_MISR] = GICH_MISR_EOI;
> + gic.lr_mask = 0ULL;
> + INIT_LIST_HEAD(&gic.lr_pending);
> }
>
> /* Set up the GIC */
> @@ -345,16 +350,47 @@ int __init setup_irq(unsigned int irq, struct irqaction
> *new)
> return rc;
> }
>
> -void gic_set_guest_irq(unsigned int virtual_irq,
> +static inline void gic_set_lr(int lr, unsigned int virtual_irq,
> unsigned int state, unsigned int priority)
> {
> - BUG_ON(virtual_irq > nr_lrs);
> - GICH[GICH_LR + virtual_irq] = state |
> + BUG_ON(lr > nr_lrs);
> + GICH[GICH_LR + lr] = state |
> GICH_LR_MAINTENANCE_IRQ |
> ((priority >> 3) << GICH_LR_PRIORITY_SHIFT) |
> ((virtual_irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT);
> }
>
> +void gic_set_guest_irq(unsigned int virtual_irq,
> + unsigned int state, unsigned int priority)
> +{
> + int i;
> + struct pending_irq *iter, *n;
> +
> + spin_lock(&gic.lock);
> + for (i = 0; i < nr_lrs; i++) {
> + if (!test_and_set_bit(i, &gic.lr_mask))
> + {
> + gic_set_lr(i, virtual_irq, state, priority);
> + spin_unlock(&gic.lock);
> + return;
> + }
> + }
You can skip this loop if gic.lr_pending is non-empty as there won't be
any spare bits in gic.lr_mask.
> + n = irq_to_pending(current, virtual_irq);
> + list_for_each_entry ( iter, &gic.lr_pending, lr_link )
> + {
> + if ( iter->priority < priority )
> + {
> + list_add_tail(&n->lr_link, &iter->lr_link);
> + spin_unlock(&gic.lock);
> + return;
> + }
> + }
How many pending irqs are expected? If it's lots then looping through a
simple list like this might be slow. Something to keep in mind -- I
wouldn't try and fix it now.
> + list_add(&n->lr_link, &gic.lr_pending);
> + spin_unlock(&gic.lock);
> + return;
> +}
> +
> void gic_inject_irq_start(void)
> {
> uint32_t hcr;
> @@ -435,13 +471,26 @@ static void maintenance_interrupt(int irq, void
> *dev_id, struct cpu_user_regs *r
> uint32_t lr;
> uint64_t eisr = GICH[GICH_EISR0] | (((uint64_t) GICH[GICH_EISR1]) << 32);
>
> - for ( i = 0; i < 64; i++ ) {
> + for ( i = 0; i < nr_lrs; i++ ) {
> if ( eisr & ((uint64_t)1 << i) ) {
> struct pending_irq *p;
>
> + spin_lock(&gic.lock);
> lr = GICH[GICH_LR + i];
> virq = lr & GICH_LR_VIRTUAL_MASK;
> GICH[GICH_LR + i] = 0;
> + clear_bit(i, &gic.lr_mask);
> +
> + if ( !list_empty(gic.lr_pending.next) ) {
> + p = list_entry(gic.lr_pending.next, typeof(*p), lr_link);
> + gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority);
> + list_del(&p->lr_link);
> + INIT_LIST_HEAD(&p->lr_link);
I don't think you need the INIT_LIST_HEAD() here (and even if you did
you should use list_del_init()). You only need to init nodes if you
need to test if they are in a list or not.
> + set_bit(i, &gic.lr_mask);
> + } else {
> + gic_inject_irq_stop();
> + }
> + spin_unlock(&gic.lock);
>
> spin_lock(¤t->arch.vgic.lock);
> p = irq_to_pending(current, virq);
> @@ -449,7 +498,6 @@ static void maintenance_interrupt(int irq, void *dev_id,
> struct cpu_user_regs *r
> p->desc->status &= ~IRQ_INPROGRESS;
> GICC[GICC_DIR] = virq;
> }
> - gic_inject_irq_stop();
> list_del(&p->link);
> INIT_LIST_HEAD(&p->link);
Similarly, here (but this should be fixed up in a separate patch).
> cpu_raise_softirq(current->processor, VGIC_SOFTIRQ);
> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> index 3372d14..75095ff 100644
> --- a/xen/include/asm-arm/domain.h
> +++ b/xen/include/asm-arm/domain.h
> @@ -21,6 +21,7 @@ struct pending_irq
> struct irq_desc *desc; /* only set it the irq corresponds to a physical
> irq */
> uint8_t priority;
> struct list_head link;
> + struct list_head lr_link;
> };
>
> struct arch_domain
_______________________________________________
Xen-arm mailing list
Xen-arm@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/mailman/listinfo/xen-arm
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |