[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [XenARM] [PATCH] arm: support fewer LRs register than virtual irqs



On Tue, Feb 14, 2012 at 1:07 PM, Stefano Stabellini
<stefano.stabellini@xxxxxxxxxxxxx> wrote:
> If the vgic needs to inject a virtual irq into the guest, but no free
> LR registers are available, add the irq to a list and return.
> Whenever an LR register becomes available we add the queued irq to it
> and remove it from the list.
> We use the gic lock to protect the list and the bitmask.
>
> Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
> ---
>  xen/arch/arm/gic.c           |   46 +++++++++++++++++++++++++++++++++++++----
>  xen/include/asm-arm/domain.h |    1 +
>  2 files changed, 42 insertions(+), 5 deletions(-)
>
> diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
> index adc10bb..97c223c 100644
> --- a/xen/arch/arm/gic.c
> +++ b/xen/arch/arm/gic.c
> @@ -25,6 +25,7 @@
>  #include <xen/sched.h>
>  #include <xen/errno.h>
>  #include <xen/softirq.h>
> +#include <xen/list.h>
>  #include <asm/p2m.h>
>  #include <asm/domain.h>
>
> @@ -45,6 +46,8 @@ static struct {
>     unsigned int lines;
>     unsigned int cpus;
>     spinlock_t lock;
> +    uint64_t lr_mask;
> +    struct list_head lr_pending;
>  } gic;
>
>  irq_desc_t irq_desc[NR_IRQS];
> @@ -247,6 +250,8 @@ static void __cpuinit gic_hyp_init(void)
>
>     GICH[GICH_HCR] = GICH_HCR_EN;
>     GICH[GICH_MISR] = GICH_MISR_EOI;
> +    gic.lr_mask = 0ULL;
> +    INIT_LIST_HEAD(&gic.lr_pending);
>  }
>
>  /* Set up the GIC */
> @@ -345,16 +350,35 @@ int __init setup_irq(unsigned int irq, struct irqaction 
> *new)
>     return rc;
>  }
>
> -void gic_set_guest_irq(unsigned int virtual_irq,
> +static inline void gic_set_lr(int lr, unsigned int virtual_irq,
>         unsigned int state, unsigned int priority)
>  {
> -    BUG_ON(virtual_irq > nr_lrs);
> -    GICH[GICH_LR + virtual_irq] = state |
> +    BUG_ON(lr > nr_lrs);
> +    GICH[GICH_LR + lr] = state |
>         GICH_LR_MAINTENANCE_IRQ |
>         ((priority >> 3) << GICH_LR_PRIORITY_SHIFT) |
>         ((virtual_irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT);
>  }
>
> +void gic_set_guest_irq(unsigned int virtual_irq,
> +        unsigned int state, unsigned int priority)
> +{
> +    int i;
> +
> +    spin_lock(&gic.lock);
> +    for (i = 0; i < nr_lrs; i++) {
> +        if (!test_and_set_bit(i, &gic.lr_mask))
> +        {
> +            gic_set_lr(i, virtual_irq, state, priority);
> +            spin_unlock(&gic.lock);
> +            return;
> +        }
> +    }
> +    list_add_tail(&irq_to_pending(current, virtual_irq)->lr_link, 
> &gic.lr_pending);
> +    spin_unlock(&gic.lock);
> +    return;
> +}
> +
>  void gic_inject_irq_start(void)
>  {
>     uint32_t hcr;
> @@ -435,13 +459,26 @@ static void maintenance_interrupt(int irq, void 
> *dev_id, struct cpu_user_regs *r
>     uint32_t lr;
>     uint64_t eisr = GICH[GICH_EISR0] | (((uint64_t) GICH[GICH_EISR1]) << 32);
>
> -    for ( i = 0; i < 64; i++ ) {
> +    for ( i = 0; i < nr_lrs; i++ ) {
>         if ( eisr & ((uint64_t)1 << i) ) {
>             struct pending_irq *p;
>
> +            spin_lock(&gic.lock);
>             lr = GICH[GICH_LR + i];
>             virq = lr & GICH_LR_VIRTUAL_MASK;
>             GICH[GICH_LR + i] = 0;
> +            clear_bit(i, &gic.lr_mask);
> +
> +            if ( !list_empty(gic.lr_pending.next) ) {
> +                p = list_entry(gic.lr_pending.next, typeof(*p), lr_link);
> +                gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority);
> +                list_del(&p->lr_link);
> +                INIT_LIST_HEAD(&p->lr_link);
> +                set_bit(i, &gic.lr_mask);
> +            } else {
> +                gic_inject_irq_stop();
> +            }
> +            spin_unlock(&gic.lock);
>
>             spin_lock(&current->arch.vgic.lock);
>             p = irq_to_pending(current, virq);
> @@ -449,7 +486,6 @@ static void maintenance_interrupt(int irq, void *dev_id, 
> struct cpu_user_regs *r
>                 p->desc->status &= ~IRQ_INPROGRESS;
>                 GICC[GICC_DIR] = virq;
>             }
> -            gic_inject_irq_stop();
>             list_del(&p->link);
>             INIT_LIST_HEAD(&p->link);
>             cpu_raise_softirq(current->processor, VGIC_SOFTIRQ);
> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> index 3372d14..75095ff 100644
> --- a/xen/include/asm-arm/domain.h
> +++ b/xen/include/asm-arm/domain.h
> @@ -21,6 +21,7 @@ struct pending_irq
>     struct irq_desc *desc; /* only set it the irq corresponds to a physical 
> irq */
>     uint8_t priority;
>     struct list_head link;
> +    struct list_head lr_link;
>  };
>
>  struct arch_domain
> --
> 1.7.8.3
>

The patch fixes the BUG() call from gic_set_guest_irq() but now I'm
getting another guest data abort error.
I used the arm-tools-2 branch -> the GIC code looks slightly
different? Which branch should I be using?

Below my boot log with the error.

__  __            _  _    ____                     _        _     _
 \ \/ /___ _ __   | || |  |___ \    _   _ _ __  ___| |_ __ _| |__ | | ___
  \  // _ \ '_ \  | || |_   __) |__| | | | '_ \/ __| __/ _` | '_ \| |/ _ \
  /  \  __/ | | | |__   _| / __/|__| |_| | | | \__ \ || (_| | |_) | |  __/
 /_/\_\___|_| |_|    |_|(_)_____|   \__,_|_| |_|___/\__\__,_|_.__/|_|\___|


(XEN) Latest ChangeSet: unavailable
(XEN) Using generic timer at 100000000 Hz
(XEN) Domain heap initialised
(XEN) Set hyp vector base to 237ec0 (expected 00237ec0)
(XEN) GIC: 64 lines, 1 cpu, secure (IID 0000043b).
(XEN) GICH: 4 list registers available
(XEN) Using scheduler: SMP Credit Scheduler (credit)
(XEN) Allocated console ring of 16 KiB.
(XEN) Brought up 1 CPUs
(XEN) *** LOADING DOMAIN 0 ***
(XEN) Copying 0x8 bytes from flash 00000000001c8550 to
0029fc48Populate P2M 0x80000000->0x88000000
(XEN) Map CS2 MMIO regions 1:1 in the P2M 0x18000000->0x1bffffff
(XEN) Map CS3 MMIO regions 1:1 in the P2M 0x1c000000->0x1fffffff
(XEN) Map VGIC MMIO regions 1:1 in the P2M 0x2c008000->0x2dffffff
(XEN) mapping GICC at 0x2c002000 to 0x2c006000
(XEN) Routing peripheral interrupts to guest
(XEN) VTTBR dom0 = 10080bdffe000
(XEN) Loading 00000000001c9d5c byte zImage from flash 0000000000000000
to 0000000080008000-00000000801d1d5c: [..]
(XEN) Std. Loglevel: All
(XEN) Guest Loglevel: All
(XEN) *** Serial input -> DOM0 (type 'CTRL-a' three times to switch
input to Xen)
(XEN) Freed 48kB init memory.
(XEN) context switch 32767:0 (idle) -> 0:0
(XEN) VTTBR dom0 = 10080bdffe000
Uncompressing Linux... done, booting the kernel.
(XEN) Guest data abort: Translation fault at level 2
(XEN)     gva=c880cfe0 gpa=000000002b0a0fe0
(XEN)     size=2 sign=0 write=0 reg=3
(XEN)     eat=0 cm=0 s1ptw=0 dfsc=6
(XEN) ----[ Xen-4.2-unstable  x86_64  debug=y  Not tainted ]----
(XEN) CPU:    0
(XEN) PC:     c015c8a8
(XEN) CPSR:   60000013 MODE:SVC
(XEN)      R0: c880cfe0 R1: 00000fe0 R2: 00000000 R3: 00001000
(XEN)      R4: c783cc00 R5: 00001000 R6: c783ccc8 R7: 00000000
(XEN)      R8: c880c000 R9: 00000000 R10:00000000 R11:c0290f7c R12:c0010c80
(XEN) USR: SP: 00000000 LR: 00000000 CPSR:60000013
(XEN) SVC: SP: c781bf18 LR: c015c64c SPSR:00000093
(XEN) ABT: SP: c035cecc LR: c035cecc SPSR:00000000
(XEN) UND: SP: c035ced8 LR: c035ced8 SPSR:00000000
(XEN) IRQ: SP: c035cec0 LR: c0008b20 SPSR:200001d3
(XEN) FIQ: SP: 00000000 LR: 00000000 SPSR:00000000
(XEN) FIQ: R8: 00000000 R9: 00000000 R10:00000000 R11:00000000 R12:00000000
(XEN)
(XEN) TTBR0 80004059 TTBR1 80004059 TTBCR 00000000
(XEN) SCTLR 10c53c7d
(XEN) VTTBR 10080bdffe000
(XEN)
(XEN) HTTBR 80ffe91000
(XEN) HDFAR c880cfe0
(XEN) HIFAR 0
(XEN) HPFAR 2b0a00
(XEN) HCR 00000031
(XEN) HSR   93830006
(XEN)
(XEN) DFSR 0 DFAR 0
(XEN) IFSR 0 IFAR 0
(XEN)
(XEN) GUEST STACK GOES HERE
(XEN)
(XEN) ****************************************
(XEN) Panic on CPU 0:
(XEN) Unhandled guest data abort
(XEN) ****************************************

_______________________________________________
Xen-arm mailing list
Xen-arm@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/mailman/listinfo/xen-arm


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.