[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 13/16] xen/arm: Add support for GIC v3



On Tue, 2014-04-15 at 16:47 +0530, vijay.kilari@xxxxxxxxx wrote:

> +#include <xen/config.h>
> +#include <xen/lib.h>
> +#include <xen/init.h>
> +#include <xen/cpu.h>
> +#include <xen/mm.h>
> +#include <xen/irq.h>
> +#include <xen/sched.h>
> +#include <xen/errno.h>
> +#include <xen/serial.h>
> +#include <xen/softirq.h>
> +#include <xen/list.h>
> +#include <xen/device_tree.h>
> +#include <xen/delay.h>
> +#include <asm/p2m.h>
> +#include <asm/domain.h>
> +#include <asm/platform.h>

Do you really need all of these? serial.h for example?

> +
> +#include <asm/gic_v3_defs.h>
> +#include <asm/gic.h>
> +#include <asm/io.h>
> +#include <asm/device.h>
> +
> +static struct gic_hw_operations gic_ops;
> +
> +struct rdist_region {
> +    paddr_t rdist_base;
> +    paddr_t rdist_base_size;
> +    void __iomem *map_rdist_base;

"base", "size" and "map" would be adequate field names I think.

> +};
> +
> +/* Global state */
> +static struct {
> +    paddr_t dbase;            /* Address of distributor registers */
> +    paddr_t dbase_size;
> +    void __iomem *map_dbase;  /* Mapped address of distributor registers */
> +    struct rdist_region *rdist_regions;
> +    u32  rdist_stride;
> +    unsigned int rdist_count; /* Number of rdist regions count */
> +    unsigned int lines;       /* Number of interrupts (SPIs + PPIs + SGIs) */
> +    struct dt_irq maintenance;
> +}gic;
> +
> +/* per-cpu re-distributor base */
> +static DEFINE_PER_CPU(void __iomem*, rbase);

Does this end up containing one of the gic.rdist_regions[i].map entries?
Any reason to duplicate this in that map field as well? Can't each
processor map it as it is initialised and store it here directly?

I suppose we have plenty of ioremap space on v8, so nr_cpus*2*64k isn't
too bad and there's no need to go for per-pcpu pagetables with a
dedicated virtual address region for the redistributors.

> +static u64 gich_read_lr(int lr)
> +{
> +    switch ( lr )
> +    {
> +    case 0: /* ICH_LRn is 64 bit */
> +        return READ_SYSREG(ICH_LR0_EL2);
> +        break;

All of these breaks are redundant. I think you could get away with
        case N: return READ_(..._LRn_EL2);
for brevity even.
> +
> +/* Wait for completion of a distributor change */
> +static void gicv3_do_wait_for_rwp(void __iomem *base)
> +{
> +    u32 val;
> +    u32 count = 1000000;
> +
> +    do {
> +        val = readl_relaxed(base + GICD_CTLR);
> +        if ( !(val & GICD_CTLR_RWP) )
> +           break;
> +        cpu_relax();
> +        udelay(1);

Ick. Is there no event when rwp changes, so we could do wfe here?

Could we at least use NOW() and MILLISECS() to construct a delay of a
known length?

> +    } while ( count-- );
> +
> +    if ( !count )
> +        dprintk(XENLOG_WARNING, "RWP timeout\n");

Shouldn't we panic here?

And if we are going to panic, we might as well wait forever? (Perhaps
with a warning after some number of iterations.

> +static unsigned int gicv3_mask_cpu(const cpumask_t *cpumask)

this returns an arbitrary cpu from cpumask? Can we name it as such
please.

The v2 equivalent returns a value suitable for GICD_ITARGETSR, which
might contain multiple valid target CPUs. Does GICD_IROUTER not have the
same property?

> +{
> +    unsigned int cpu;
> +    cpumask_t possible_mask;
> +
> +    cpumask_and(&possible_mask, cpumask, &cpu_possible_map);
> +    cpu = cpumask_any(&possible_mask);
> +    return cpu;
> +}
> +
> +static void write_aprn_regs(union gic_state_data *d)

Given the usage I think this should be restore_aprn_regs.

> +{
> +    ASSERT((nr_priorities > 4 && nr_priorities < 8));
> +    /* Write APRn register based on number of priorities
> +       plaform has implemented */

"platform"

> +    switch ( nr_priorities )
> +    {
> +    case 7:
> +        WRITE_SYSREG32(d->v3.apr0[2], ICH_AP0R2_EL2);
> +        WRITE_SYSREG32(d->v3.apr1[2], ICH_AP1R2_EL2);
> +        /* Fall through */
> +    case 6:
> +        WRITE_SYSREG32(d->v3.apr0[1], ICH_AP0R1_EL2);
> +        WRITE_SYSREG32(d->v3.apr1[1], ICH_AP1R1_EL2);
> +        /* Fall through */
> +    case 5:
> +        WRITE_SYSREG32(d->v3.apr0[0], ICH_AP0R0_EL2);
> +        WRITE_SYSREG32(d->v3.apr1[0], ICH_AP1R0_EL2);
> +        break;
> +    default:

BUG_ON?

> +        break;
> +    }
> +}
> +
> +static void read_aprn_regs(union gic_state_data *d)

The same three comments as write_* apply here too.

> +static void gicv3_save_state(struct vcpu *v)
> +{
> +    int i;
> +
> +    /* No need for spinlocks here because interrupts are disabled around
> +     * this call and it only accesses struct vcpu fields that cannot be
> +     * accessed simultaneously by another pCPU.
> +     */
> +    for ( i = 0; i < nr_lrs; i++)
> +        v->arch.gic.v3.lr[i] = gich_read_lr(i);
> +
> +    read_aprn_regs(&v->arch.gic); 
> +    v->arch.gic.v3.vmcr = READ_SYSREG32(ICH_VMCR_EL2);
> +}
> +
> +static void gicv3_restore_state(struct vcpu *v)
> +{
> +    int i;
> +
> +    for ( i = 0; i < nr_lrs; i++)
> +        gich_write_lr(i, v->arch.gic.v3.lr[i]);

I wonder if the compiler could do a better job of this using the same
switch and fallthrough method you used for aprn regs?

> +
> +    write_aprn_regs(&v->arch.gic);
> +
> +    WRITE_SYSREG32(v->arch.gic.v3.vmcr, ICH_VMCR_EL2);
> +}

> +static void gicv3_enable_irq(struct irq_desc *irqd)
> +{
> +    int irq = irqd->irq;
> +    uint32_t enabler;
> +
> +    /* Enable routing */
> +    if ( irq < NR_GIC_LOCAL_IRQS )
> +    {
> +        enabler = readl_relaxed(GICD_RDIST_SGI_BASE + GICR_ISENABLER0);
> +        enabler |= (1u << irq);
> +        writel_relaxed(enabler, GICD_RDIST_SGI_BASE + GICR_ISENABLER0);

I don't think the enable registers need read-modufy-wirte, do they? You
just write the bit you want to enable, like you do with the clear
ICENABLER registers.

> +static u64 gic_mpidr_to_affinity(u64 mpidr)
> +{
> +    u64 aff;
> +    /* Make sure we don't broadcast the interrupt */
> +     aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |

Indentation here is squiffy.

> +            MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
> +            MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8  |
> +            MPIDR_AFFINITY_LEVEL(mpidr, 0)) & ~GICD_IROUTER_SPI_MODE_ANY;
> +    return aff;
> +}
> +
> +/*
> + * - needs to be called with gic.lock held
> + * - needs to be called with a valid cpu_mask, ie each cpu in the mask has
> + * already called gic_cpu_init
> + */
> +static void gicv3_set_irq_property(unsigned int irq, bool_t level,
> +                                   const cpumask_t *cpu_mask,
> +                                   unsigned int priority)
> +{
> +    uint32_t cfg, edgebit;
> +    u64 affinity;
> +    unsigned int cpu = gicv3_mask_cpu(cpu_mask);
> +
> +
> +    /* Set edge / level */
> +    if ( irq < NR_GIC_SGI )
> +        /* SGI's are always edge-triggered not need to call GICD_ICFGR0 */

s/not/no/ I think.

But then given the comment you do anyway?

> +        cfg = readl_relaxed(GICD_RDIST_SGI_BASE + GICR_ICFGR0);
> +    else if ( irq < NR_GIC_LOCAL_IRQS )
> +        cfg = readl_relaxed(GICD_RDIST_SGI_BASE + GICR_ICFGR1);
> +    else
> +        cfg = readl_relaxed(GICD + GICD_ICFGR + (irq / 16) * 4);
> +
> +    edgebit = 2u << (2 * (irq % 16));
> +    if ( level )
> +        cfg &= ~edgebit;
> +    else
> +        cfg |= edgebit;
> +
> +    if ( irq < NR_GIC_SGI )
> +       writel_relaxed(cfg, GICD_RDIST_SGI_BASE + GICR_ICFGR0);
> +    else if ( irq < NR_GIC_LOCAL_IRQS )
> +       writel_relaxed(cfg, GICD_RDIST_SGI_BASE + GICR_ICFGR1);
> +    else
> +       writel_relaxed(cfg, GICD + GICD_ICFGR + (irq / 16) * 4);
> +
> +    affinity = gic_mpidr_to_affinity(cpu_logical_map(cpu));
> +    if ( irq >= NR_GIC_LOCAL_IRQS )
> +        writeq_relaxed(affinity, (GICD + GICD_IROUTER + irq * 8));
> +
> +    /* Set priority */
> +    if ( irq < NR_GIC_LOCAL_IRQS )
> +    {

The {}s here aren't needed.

> +        writeb_relaxed(priority, GICD_RDIST_SGI_BASE + GICR_IPRIORITYR0 + 
> irq);
> +    }
> +    else 
> +    {
> +        writeb_relaxed(priority, GICD + GICD_IPRIORITYR + irq);
> +    }
> +}
> +
> +static void gicv3_enable_redist(void)
> +{
> +    u32 val;
> +    /* Wait for 1s */
> +    u32 count = 1000000;

NOW() + MILLISECS(...) based again?

> +
> +    /* Wake up this CPU redistributor */
> +    val = readl_relaxed(GICD_RDIST_BASE + GICR_WAKER);
> +    val &= ~GICR_WAKER_ProcessorSleep;
> +    writel_relaxed(val, GICD_RDIST_BASE + GICR_WAKER);
> +
> +    do {
> +         val = readl_relaxed(GICD_RDIST_BASE + GICR_WAKER);
> +         if ( !(val & GICR_WAKER_ChildrenAsleep) )
> +           break;
> +         cpu_relax();
> +         udelay(1);
> +    } while ( count-- );
> +
> +    if ( !count )
> +        gdprintk(XENLOG_WARNING, "Redist enable RWP timeout\n");
> +}
> +
> +static int __init gicv3_populate_rdist(void)
> +{
> +    u64 mpidr = cpu_logical_map(smp_processor_id());
> +    u64 typer;
> +    uint32_t aff;

You have an interesting mix of u64 and uint32_t. Please stick to one or
the other, preferable uintXX_t.

> +    int i;
> +    uint32_t reg;
> +
> +    aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
> +           MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
> +           MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
> +           MPIDR_AFFINITY_LEVEL(mpidr, 0));

Is this not gic_mpidr_to_affinity?

> +
> +    for ( i = 0; i < gic.rdist_count; i++ )
> +    {
> +        void __iomem *ptr = gic.rdist_regions[i].map_rdist_base;
> +
> +        reg = readl_relaxed(ptr + GICR_PIDR2);
> +        if ( (reg & GICR_PIDR2_ARCH_MASK) != GICR_PIDR2_GICV3 ) {
> +            dprintk(XENLOG_WARNING, "No redistributor present 
> @%"PRIpaddr"\n",
> +                   (u64)ptr);

What is with this cast? What is wrong with %p?

But in fact I think the virtual address is pretty meaningless. You
should print the associated physical address.

> +            break;
> +        }
> +
> +        do {
> +            typer = readq_relaxed(ptr + GICR_TYPER);
> +            if ( (typer >> 32) == aff )
> +            {
> +                this_cpu(rbase) = ptr;
> +                printk("CPU%d: Found redistributor in region %d\n",
> +                           smp_processor_id(), i);

Print the paddr of the distributor too?

> +                return 0;

So basically we only know the number of redistributors from device tree
and not the mapping from RDIST to CPU, so we have to try every one?

Do you have a reference to the GIC v3 Device Tree bindings? I don't see
them in linux/Documentation/devicetree/bindings/.

> +            }
> +            if ( gic.rdist_stride ) {
> +                ptr += gic.rdist_stride;
> +            } else {
> +                ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
> +                if ( typer & GICR_TYPER_VLPIS )
> +                    ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
> +            }
> +        } while ( !(typer & GICR_TYPER_LAST) );
> +    }
> +
> +    dprintk(XENLOG_WARNING, "CPU%d: mpidr %"PRIpaddr" has no 
> re-distributor!\n",
> +                  smp_processor_id(), mpidr);

You are using PRIpaddr to print mpidr here, which is certainly not a
physical address. Please use the appropriate PRI macro and not just one
which happens to accept the correct sized argument.

> +    gicv3_redist_wait_for_rwp();
> +
> +    /* Enable system registers */
> +    gicv3_enable_sre();
> +
> +    WRITE_SYSREG32(0, ICC_BPR1_EL1);

Comment for this ^ write?

> +static void __cpuinit gicv3_hyp_init(void)
> +{
> +    uint32_t vtr;
> +
> +    vtr = READ_SYSREG32(ICH_VTR_EL2);
> +    nr_lrs  = (vtr & GICH_VTR_NRLRGS) + 1;
> +    nr_priorities = ((vtr >> GICH_VTR_PRIBITS_SHIFT) &
> +                    GICH_VTR_PRIBITS_MASK) + 1;
> +
> +    gic_ops.nr_lrs = nr_lrs;
> +
> +    WRITE_SYSREG32(GICH_VMCR_EOI | GICH_VMCR_VENG1, ICH_VMCR_EL2);
> +    WRITE_SYSREG32(GICH_HCR_EN, ICH_HCR_EL2);
> +
> +    update_cpu_lr_mask();
> +    vtr = READ_SYSREG32(ICH_HCR_EL2);

If this apparently redundant read actual serves a purpose (i.e. to
synchronise theh.w or something) then please add a comment.

> +static u16 gicv3_compute_target_list(int *base_cpu, const struct cpumask 
> *mask,
> +                                   u64 cluster_id)
> +{
> +    int cpu = *base_cpu;
> +    u64 mpidr = cpu_logical_map(cpu);
> +    u16 tlist = 0;
> +
> +    while ( cpu < nr_cpu_ids )
> +    {
> +        /*
> +         * If we ever get a cluster of more than 16 CPUs, just
> +         * scream and skip that CPU.
> +         */
> +        if ( (mpidr & 0xff) >= 16 )
> +        {
> +            dprintk(XENLOG_WARNING, "Cluster more than 16's cpus\n");

"Cluster of" or "Cluster with".

This loop runs for every IPI we send. Perhaps we should validate this
sort of thing once at start of day and then assume that it is fine?

> +            goto out;
> +        }
> +        tlist |= 1 << (mpidr & 0xf);
> +
> +        cpu = cpumask_next(cpu, mask);

Aren't you essentially opencoding for_each_cpu with this check and the
while loop? 

The caller of this function already has a for_each_cpu in it. Can you
explain the algorithm please.

> +        if ( cpu == nr_cpu_ids )
> +        {
> +            cpu--;
> +            goto out;
> +        }
> +
> +        mpidr = cpu_logical_map(cpu);
> +        if ( cluster_id != (mpidr & ~0xffUL) ) {
> +            cpu--;
> +            goto out;
> +        }
> +    }
> +out:
> +    *base_cpu = cpu;
> +    return tlist;
> +}
> +
> +static void send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)

s/irq/sgi/ I think.

> +{
> +    u64 val;
> +
> +    val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48        |
> +           MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32        |
> +           irq << 24                                        |
> +           MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16        |

There's been a lot of open coded shifts throughout this code, I'm coming
to the conclusion that they need #defines so this code is more self
documenting (I was puzzled by the irq in the middle of all those
affinities).

> +           tlist);
> +
> +    WRITE_SYSREG(val, ICC_SGI1R_EL1);   
> +}
> +
> +static void gicv3_send_sgi(const cpumask_t *cpumask, enum gic_sgi sgi,
> +            uint8_t mode)
> +{
> +    int cpu = 0;
> +
> +    dsb();

The dsb() macro needs a scope parameter for a while now. Which version
of Xen is all this based on?

> +    for_each_cpu(cpu, cpumask)
> +    {
> +        u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;

Magic number.

> +static void gicv3_update_lr(int lr, struct pending_irq *p, unsigned int 
> state)
> +{
> +    u64 grp = GICH_LR_GRP1;
> +    u64 val = 0;
> +
> +    BUG_ON(lr >= nr_lrs);
> +    BUG_ON(lr < 0);
> +
> +    val =  ((((u64)state) & 0x3) << GICH_LR_STATE_SHIFT) | grp |
> +        ((((u64)p->priority) & 0xff) << GICH_LR_PRIORITY_SHIFT) |
> +        (((u64)p->irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT);

Please can you try and do something about the visual clutter here.
Aligning things would be a good start. Perhaps some temporaries and/or
using "val |= XXX" might help too.

Are the & 0x3 and & 0xff really needed here? Aren't state and priority
constrained already?

> +
> +   if ( p->desc != NULL )
> +       val |= GICH_LR_HW |
> +         (((u64) p->desc->irq & GICH_LR_PHYSICAL_MASK) << 
> GICH_LR_PHYSICAL_SHIFT);
> +
> +    gich_write_lr(lr, val);
> +}
> +

> +static void gicv3_read_lr(int lr, struct gic_lr *lr_reg)
> +{
> +    u64 lrv;
> +    lrv = gich_read_lr(lr);
> +    lr_reg->pirq = (lrv >> GICH_LR_PHYSICAL_SHIFT) & GICH_LR_PHYSICAL_MASK;
> +    lr_reg->virq = (lrv >> GICH_LR_VIRTUAL_SHIFT) & GICH_LR_VIRTUAL_MASK; 
> +    lr_reg->priority = (lrv >> GICH_LR_PRIORITY_SHIFT) & 
> GICH_LR_PRIORITY_MASK;
> +    lr_reg->state    = (lrv >> GICH_LR_STATE_SHIFT) & GICH_LR_STATE_MASK;
> +    lr_reg->hw_status = (lrv >> GICH_LR_HW_SHIFT) & GICH_LR_HW_MASK;

Either line things up or don't

> +    lr_reg->grp = (lrv >> GICH_LR_GRP_SHIFT) & GICH_LR_GRP_MASK;
> +}
> +
> +static void gicv3_write_lr(int lr_reg, struct gic_lr *lr)
> +{
> +    u64 lrv = 0;
> +    lrv = ( ((u64)(lr->pirq & GICH_LR_PHYSICAL_MASK) << 
> GICH_LR_PHYSICAL_SHIFT) |
> +        ((u64)(lr->virq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT) |
> +        ((u64)(lr->priority & GICH_LR_PRIORITY_MASK) << 
> GICH_LR_PRIORITY_SHIFT) |
> +        ((u64)(lr->state & GICH_LR_STATE_MASK) << GICH_LR_STATE_SHIFT) |
> +        ((u64)(lr->hw_status & GICH_LR_HW_MASK)  << GICH_LR_HW_SHIFT)  |
> +        ((u64)(lr->grp & GICH_LR_GRP_MASK) << GICH_LR_GRP_SHIFT) );
> +    gich_write_lr(lr_reg, lrv);
> +}
> +
> +int gicv_v3_init(struct domain *d)
> +{
> +    int i;
> +    /*
> +     * Domain 0 gets the hardware address.
> +     * Guests get the virtual platform layout.
> +     */
> +    if ( d->domain_id == 0 )
> +    {
> +        d->arch.vgic.dbase = gic.dbase;
> +        d->arch.vgic.dbase_size = gic.dbase_size;
> +        for ( i = 0; i < gic.rdist_count; i++ )
> +        {
> +            d->arch.vgic.rbase[i] = gic.rdist_regions[i].rdist_base;
> +            d->arch.vgic.rbase_size[i] = 
> gic.rdist_regions[i].rdist_base_size;
> +        }
> +        d->arch.vgic.rdist_stride = gic.rdist_stride;
> +        d->arch.vgic.rdist_count = gic.rdist_count;
> +    }
> +    else
> +    {

Does this need a comment like "Currently guests see a GICv2"? Otherwise
doesn't it need an rbase etc?

> +/* Set up the GIC */
> +static int __init gicv3_init(struct dt_device_node *node,
> +                                       const void *data)
> +{
> +    struct rdist_region *rdist_regs;
> +    int res, i;
> +    uint32_t reg;
> +
> +    dt_device_set_used_by(node, DOMID_XEN);
> +
> +    res = dt_device_get_address(node, 0, &gic.dbase, &gic.dbase_size);
> +    if ( res || !gic.dbase  || (gic.dbase & ~PAGE_MASK) ||
> +       (gic.dbase_size & ~PAGE_MASK) )
> +        panic("GIC: Cannot find a valid address for the distributor");

For the last two clauses printing the invalid values would be a useful
diagnostic (probably split into two ifs and two panics)

> +    gic.map_dbase = ioremap_nocache(gic.dbase, gic.dbase_size);
> +    if ( !gic.map_dbase )
> +        panic("Failed to ioremap for GIC distributor\n");

Please consistently prefix these "GIC:" (or better yet, GICv3)

> +
> +    reg = readl_relaxed(GICD + GICD_PIDR2);
> +    if ( (reg & GICD_PIDR2_ARCH_MASK) != GICD_PIDR2_GICV3 )
> +        panic("GIC: no distributor detected, giving up\n"); 

", giving up" is redundant in the context of a panic.

> +
> +    gic_ops.hw_version = GIC_V3;
> + 
> +    if ( !dt_property_read_u32(node, "#redistributor-regions",
> +                &gic.rdist_count) )
> +        gic.rdist_count = 1;
> +
> +    if ( gic.rdist_count > MAX_RDIST_COUNT )
> +        panic("GIC: Number of redistributor regions is more than \
> +               MAX_RDIST_COUNT (Increase MAX_RDIST_COUNT!!)\n");

Drop the parenthetical and print the actual and maximum values please.

> +
> +    rdist_regs = xzalloc_array(struct rdist_region, gic.rdist_count);
> +    if ( !rdist_regs )
> +        panic("GIC: no distributor detected, giving up\n");

This panic message does not match the context.

> +
> +    for ( i = 0; i < gic.rdist_count; i++ ) {
> +        u64 rdist_base, rdist_size;
> +
> +        res = dt_device_get_address(node, 1 + i, &rdist_base, &rdist_size);
> +        if ( res || !rdist_base)
> +            panic("No rdist base found\n");

Please print which one.

> +        rdist_regs[i].rdist_base = rdist_base;
> +        rdist_regs[i].rdist_base_size = rdist_size;
> +    }
> +
> +    if ( !dt_property_read_u32(node, "redistributor-stride", 
> &gic.rdist_stride) )
> +        gic.rdist_stride = 0x0;

Somewhere else you had code to handle this with an if/else and explicit
increment. Why not just set this appropriately to start with and avoid
all that?

> +    for ( i = 0; i < gic.rdist_count; i++ ) {
> +        /* map dbase & rdist regions */
> +        gic.rdist_regions[i].map_rdist_base =
> +                ioremap_nocache(gic.rdist_regions[i].rdist_base,
> +                          gic.rdist_regions[i].rdist_base_size);
> +
> +        if ( !gic.rdist_regions[i].map_rdist_base )
> +            panic("Failed to ioremap for GIC redistributor\n");

Please print which one.

> +    }
> +
> +    printk("GIC initialization:\n"
> +              "        gic_dist_addr=%"PRIpaddr"\n"
> +              "        gic_dist_size=%"PRIpaddr"\n"
> +              "        gic_dist_mapaddr=%"PRIpaddr"\n"
> +              "        gic_rdist_regions=%d\n"
> +              "        gic_rdist_stride=%x\n"
> +              "        gic_rdist_base=%"PRIpaddr"\n"
> +              "        gic_rdist_base_size=%"PRIpaddr"\n"
> +              "        gic_rdist_base_mapaddr=%"PRIpaddr"\n"
> +              "        gic_maintenance_irq=%u\n",
> +              gic.dbase, gic.dbase_size, (u64)gic.map_dbase, gic.rdist_count,
> +              gic.rdist_stride, gic.rdist_regions[0].rdist_base,
> +              gic.rdist_regions[0].rdist_base_size,
> +              (u64)gic.rdist_regions[0].map_rdist_base, gic.maintenance.irq);

Print the stride perhaps?

Please removal all of the casts from this code and use the correct
format specifier instead.

As I've said before, a cast should almost never be needed.

> diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h
> index 7489684..3c37120 100644
> --- a/xen/include/asm-arm/gic.h
> +++ b/xen/include/asm-arm/gic.h
> @@ -18,6 +18,12 @@
>  #ifndef __ASM_ARM_GIC_H__
>  #define __ASM_ARM_GIC_H__
>  
> +#define SZ_64K  0x00010000

This isn't gic, or even ARM, specific. Please find a proper home for it.

> +#define NR_GIC_LOCAL_IRQS  NR_LOCAL_IRQS
> +#define NR_GIC_SGI         16
> +#define MAX_RDIST_COUNT    4

These should refer to GIC v3 in their names I think.

> +
>  /*
>   * The minimum GICC_BPR is required to be in the range 0-3. We set
>   * GICC_BPR to 0 but we must expect that it might be 3. This means we
> @@ -62,6 +68,13 @@
>  #define DT_MATCH_GIC    DT_MATCH_COMPATIBLE("arm,cortex-a15-gic"), \
>                          DT_MATCH_COMPATIBLE("arm,cortex-a7-gic")

You should have one of these for GICv3 and use it.

Ian.



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.