|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [RFC PATCH v1 06/10] xen/arm: split gic driver into generic and gicv2 driver
On Thu, Mar 20, 2014 at 5:25 PM, Stefano Stabellini
<stefano.stabellini@xxxxxxxxxxxxx> wrote:
> On Wed, 19 Mar 2014, vijay.kilari@xxxxxxxxx wrote:
>> From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
>>
>> Existing GIC driver has both generic code and hw specific
>> code. Segregate GIC low level driver into gic-v2.c and
>> keep generic code in existing gic.c file
>>
>> GIC v2 driver registers required functions
>> to generic GIC driver. This helps to plug in next version
>> of GIC drivers like GIC v3.
>>
>> Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
>
> Am I correct in assuming that this patch is just code movement plus the
> introduction of struct gic_hw_operations?
>
Yes, as a part of code movement, I introduced this structure and
used this callbacks in generic code
>
>> xen/arch/arm/Makefile | 2 +-
>> xen/arch/arm/gic-v2.c | 562
>> +++++++++++++++++++++++++++++++++++++
>> xen/arch/arm/gic.c | 530 +++++++---------------------------
>> xen/include/asm-arm/gic.h | 40 ++-
>> xen/include/asm-arm/gic_v2_defs.h | 2 -
>> 5 files changed, 708 insertions(+), 428 deletions(-)
>>
>> diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile
>> index 63e0460..969ee52 100644
>> --- a/xen/arch/arm/Makefile
>> +++ b/xen/arch/arm/Makefile
>> @@ -10,7 +10,7 @@ obj-y += vpsci.o
>> obj-y += domctl.o
>> obj-y += sysctl.o
>> obj-y += domain_build.o
>> -obj-y += gic.o
>> +obj-y += gic.o gic-v2.o
>> obj-y += io.o
>> obj-y += irq.o
>> obj-y += kernel.o
>> diff --git a/xen/arch/arm/gic-v2.c b/xen/arch/arm/gic-v2.c
>> new file mode 100644
>> index 0000000..9378427
>> --- /dev/null
>> +++ b/xen/arch/arm/gic-v2.c
>> @@ -0,0 +1,562 @@
>> +/*
>> + * xen/arch/arm/gic-v2.c
>> + *
>> + * ARM Generic Interrupt Controller support v2
>> + *
>> + * Tim Deegan <tim@xxxxxxx>
>> + * Copyright (c) 2011 Citrix Systems.
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License as published by
>> + * the Free Software Foundation; either version 2 of the License, or
>> + * (at your option) any later version.
>> + *
>> + * This program is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
>> + * GNU General Public License for more details.
>> + */
>> +
>> +#include <xen/config.h>
>> +#include <xen/lib.h>
>> +#include <xen/init.h>
>> +#include <xen/cpu.h>
>> +#include <xen/mm.h>
>> +#include <xen/irq.h>
>> +#include <xen/sched.h>
>> +#include <xen/errno.h>
>> +#include <xen/serial.h>
>> +#include <xen/softirq.h>
>> +#include <xen/list.h>
>> +#include <xen/device_tree.h>
>> +#include <asm/p2m.h>
>> +#include <asm/domain.h>
>> +#include <asm/platform.h>
>> +
>> +#include <asm/gic_v2_defs.h>
>> +#include <asm/gic.h>
>> +
>> +/* Access to the GIC Distributor registers through the fixmap */
>> +#define GICD ((volatile uint32_t *) FIXMAP_ADDR(FIXMAP_GICD))
>> +#define GICC ((volatile uint32_t *) FIXMAP_ADDR(FIXMAP_GICC1))
>> +#define GICH ((volatile uint32_t *) FIXMAP_ADDR(FIXMAP_GICH))
>> +
>> +struct gic_state_data {
>> + uint32_t gic_hcr;
>> + uint32_t gic_vmcr;
>> + uint32_t gic_apr;
>> + uint32_t gic_lr[64];
>> +};
>> +
>> +/* Global state */
>> +static struct {
>> + int hw_version;
>> + paddr_t dbase; /* Address of distributor registers */
>> + paddr_t cbase; /* Address of CPU interface registers */
>> + paddr_t hbase; /* Address of virtual interface registers */
>> + paddr_t vbase; /* Address of virtual cpu interface registers */
>> + unsigned int lines; /* Number of interrupts (SPIs + PPIs + SGIs) */
>> + struct dt_irq maintenance; /* IRQ maintenance */
>> + unsigned int cpus;
>> + spinlock_t lock;
>> +} gic;
>> +
>> +static unsigned nr_lrs;
>> +
>> +/* The GIC mapping of CPU interfaces does not necessarily match the
>> + * logical CPU numbering. Let's use mapping as returned by the GIC
>> + * itself
>> + */
>> +static DEFINE_PER_CPU(u8, gic_cpu_id);
>> +
>> +/* Maximum cpu interface per GIC */
>> +#define NR_GIC_CPU_IF 8
>> +
>> +static unsigned int gic_cpu_mask(const cpumask_t *cpumask)
>> +{
>> + unsigned int cpu;
>> + unsigned int mask = 0;
>> + cpumask_t possible_mask;
>> +
>> + cpumask_and(&possible_mask, cpumask, &cpu_possible_map);
>> + for_each_cpu(cpu, &possible_mask)
>> + {
>> + ASSERT(cpu < NR_GIC_CPU_IF);
>> + mask |= per_cpu(gic_cpu_id, cpu);
>> + }
>> +
>> + return mask;
>> +}
>> +
>> +static unsigned int gic_nr_lines(void)
>> +{
>> + return gic.lines;
>> +}
>> +
>> +static unsigned int gic_nr_lrs(void)
>> +{
>> + return nr_lrs;
>> +}
>> +
>> +static int gic_state_init(struct vcpu *v)
>> +{
>> + v->arch.gic_state = (struct gic_state_data *)xzalloc(struct
>> gic_state_data);
>> + if(!v->arch.gic_state)
>> + return -ENOMEM;
>> + return 0;
>> +}
>> +
>> +static void save_state(struct vcpu *v)
>> +{
>> + int i;
>> + struct gic_state_data *d;
>> + d = (struct gic_state_data *)v->arch.gic_state;
>> +
>> + ASSERT(!local_irq_is_enabled());
>> +
>> + /* No need for spinlocks here because interrupts are disabled around
>> + * this call and it only accesses struct vcpu fields that cannot be
>> + * accessed simultaneously by another pCPU.
>> + */
>> + for ( i = 0; i < nr_lrs; i++)
>> + d->gic_lr[i] = GICH[GICH_LR + i];
>> + d->gic_apr = GICH[GICH_APR];
>> + d->gic_vmcr = GICH[GICH_VMCR];
>> + /* Disable until next VCPU scheduled */
>> + GICH[GICH_HCR] = 0;
>> +}
>> +
>> +static void restore_state(struct vcpu *v)
>> +{
>> + int i;
>> + struct gic_state_data *d;
>> + d = (struct gic_state_data *)v->arch.gic_state;
>> +
>> + for ( i = 0; i < nr_lrs; i++)
>> + GICH[GICH_LR + i] = d->gic_lr[i];
>> + GICH[GICH_APR] = d->gic_apr;
>> + GICH[GICH_VMCR] = d->gic_vmcr;
>> + GICH[GICH_HCR] = GICH_HCR_EN;
>> +}
>> +
>> +static void gic_dump_state(struct vcpu *v)
>> +{
>> + int i;
>> + struct gic_state_data *d;
>> + d = (struct gic_state_data *)v->arch.gic_state;
>> +
>> + if ( v == current )
>> + {
>> + for ( i = 0; i < nr_lrs; i++ )
>> + printk(" HW_LR[%d]=%x\n", i, GICH[GICH_LR + i]);
>> + } else {
>> + for ( i = 0; i < nr_lrs; i++ )
>> + printk(" VCPU_LR[%d]=%x\n", i, d->gic_lr[i]);
>> + }
>> +}
>> +
>> +static void gic_enable_irq(int irq)
>> +{
>> + /* Enable routing */
>> + GICD[GICD_ISENABLER + irq / 32] = (1u << (irq % 32));
>> +}
>> +
>> +static void gic_disable_irq(int irq)
>> +{
>> + /* Disable routing */
>> + GICD[GICD_ICENABLER + irq / 32] = (1u << (irq % 32));
>> +}
>> +
>> +static void gic_eoi_irq(int irq)
>> +{
>> + /* Lower the priority */
>> + GICC[GICC_EOIR] = irq;
>> +}
>> +
>> +static void gic_dir_irq(int irq)
>> +{
>> + /* Deactivate */
>> + GICC[GICC_DIR] = irq;
>> +}
>> +
>> +static unsigned int gic_ack_irq(void)
>> +{
>> + return (GICC[GICC_IAR] & GICC_IA_IRQ);
>> +}
>> +
>> +/*
>> + * - needs to be called with gic.lock held
>> + * - needs to be called with a valid cpu_mask, ie each cpu in the mask has
>> + * already called gic_cpu_init
>> + */
>> +static void gic_set_irq_properties(unsigned int irq, bool_t level,
>> + const cpumask_t *cpu_mask,
>> + unsigned int priority)
>> +{
>> + volatile unsigned char *bytereg;
>> + uint32_t cfg, edgebit;
>> + unsigned int mask = gic_cpu_mask(cpu_mask);
>> +
>> + /* Set edge / level */
>> + cfg = GICD[GICD_ICFGR + irq / 16];
>> + edgebit = 2u << (2 * (irq % 16));
>> + if ( level )
>> + cfg &= ~edgebit;
>> + else
>> + cfg |= edgebit;
>> + GICD[GICD_ICFGR + irq / 16] = cfg;
>> +
>> + /* Set target CPU mask (RAZ/WI on uniprocessor) */
>> + bytereg = (unsigned char *) (GICD + GICD_ITARGETSR);
>> + bytereg[irq] = mask;
>> +
>> + /* Set priority */
>> + bytereg = (unsigned char *) (GICD + GICD_IPRIORITYR);
>> + bytereg[irq] = priority;
>> +
>> +}
>> +
>> +static void __init gic_dist_init(void)
>> +{
>> + uint32_t type;
>> + uint32_t cpumask;
>> + int i;
>> +
>> + cpumask = GICD[GICD_ITARGETSR] & 0xff;
>> + cpumask |= cpumask << 8;
>> + cpumask |= cpumask << 16;
>> +
>> + /* Disable the distributor */
>> + GICD[GICD_CTLR] = 0;
>> +
>> + type = GICD[GICD_TYPER];
>> + gic.lines = 32 * ((type & GICD_TYPE_LINES) + 1);
>> + gic.cpus = 1 + ((type & GICD_TYPE_CPUS) >> 5);
>> + printk("GIC: %d lines, %d cpu%s%s (IID %8.8x).\n",
>> + gic.lines, gic.cpus, (gic.cpus == 1) ? "" : "s",
>> + (type & GICD_TYPE_SEC) ? ", secure" : "",
>> + GICD[GICD_IIDR]);
>> +
>> + /* Default all global IRQs to level, active low */
>> + for ( i = 32; i < gic.lines; i += 16 )
>> + GICD[GICD_ICFGR + i / 16] = 0x0;
>> +
>> + /* Route all global IRQs to this CPU */
>> + for ( i = 32; i < gic.lines; i += 4 )
>> + GICD[GICD_ITARGETSR + i / 4] = cpumask;
>> +
>> + /* Default priority for global interrupts */
>> + for ( i = 32; i < gic.lines; i += 4 )
>> + GICD[GICD_IPRIORITYR + i / 4] =
>> + GIC_PRI_IRQ<<24 | GIC_PRI_IRQ<<16 | GIC_PRI_IRQ<<8 |
>> GIC_PRI_IRQ;
>> +
>> + /* Disable all global interrupts */
>> + for ( i = 32; i < gic.lines; i += 32 )
>> + GICD[GICD_ICENABLER + i / 32] = (uint32_t)~0ul;
>> +
>> + /* Turn on the distributor */
>> + GICD[GICD_CTLR] = GICD_CTL_ENABLE;
>> +}
>> +
>> +static void __cpuinit gic_cpu_init(void)
>> +{
>> + int i;
>> +
>> + this_cpu(gic_cpu_id) = GICD[GICD_ITARGETSR] & 0xff;
>> +
>> + /* The first 32 interrupts (PPI and SGI) are banked per-cpu, so
>> + * even though they are controlled with GICD registers, they must
>> + * be set up here with the other per-cpu state. */
>> + GICD[GICD_ICENABLER] = 0xffff0000; /* Disable all PPI */
>> + GICD[GICD_ISENABLER] = 0x0000ffff; /* Enable all SGI */
>> + /* Set SGI priorities */
>> + for (i = 0; i < 16; i += 4)
>> + GICD[GICD_IPRIORITYR + i / 4] =
>> + GIC_PRI_IPI<<24 | GIC_PRI_IPI<<16 | GIC_PRI_IPI<<8 |
>> GIC_PRI_IPI;
>> + /* Set PPI priorities */
>> + for (i = 16; i < 32; i += 4)
>> + GICD[GICD_IPRIORITYR + i / 4] =
>> + GIC_PRI_IRQ<<24 | GIC_PRI_IRQ<<16 | GIC_PRI_IRQ<<8 |
>> GIC_PRI_IRQ;
>> +
>> + /* Local settings: interface controller */
>> + GICC[GICC_PMR] = 0xff; /* Don't mask by priority */
>> + GICC[GICC_BPR] = 0; /* Finest granularity of priority
>> */
>> + GICC[GICC_CTLR] = GICC_CTL_ENABLE|GICC_CTL_EOI; /* Turn on delivery
>> */
>> +}
>> +
>> +static void gic_cpu_disable(void)
>> +{
>> + GICC[GICC_CTLR] = 0;
>> +}
>> +
>> +static void __cpuinit gic_hyp_init(void)
>> +{
>> + uint32_t vtr;
>> +
>> + vtr = GICH[GICH_VTR];
>> + nr_lrs = (vtr & GICH_VTR_NRLRGS) + 1;
>> +
>> + GICH[GICH_MISR] = GICH_MISR_EOI;
>> + update_cpu_lr_mask();
>> +}
>> +
>> +static void __cpuinit gic_hyp_disable(void)
>> +{
>> + GICH[GICH_HCR] = 0;
>> +}
>> +
>> +/* Set up the per-CPU parts of the GIC for a secondary CPU */
>> +static int __cpuinit gic_init_secondary_cpu(struct notifier_block *nfb,
>> + unsigned long action, void *hcpu)
>> +{
>> + if (action == CPU_STARTING)
>> + {
>> + spin_lock(&gic.lock);
>> + gic_cpu_init();
>> + gic_hyp_init();
>> + spin_unlock(&gic.lock);
>> + }
>> + return NOTIFY_DONE;
>> +}
>> +
>> +static struct notifier_block gic_cpu_nb = {
>> + .notifier_call = gic_init_secondary_cpu,
>> + .priority = 100
>> +};
>> +
>> +static void gic_smp_init(void)
>> +{
>> + register_cpu_notifier(&gic_cpu_nb);
>> +}
>> +
>> +static int gic_hw_type(void)
>> +{
>> + return gic.hw_version;
>> +}
>> +
>> +static struct dt_irq * gic_maintenance_irq(void)
>> +{
>> + return &gic.maintenance;
>> +}
>> +
>> +/* Set up the GIC */
>> +
>> +static void gic_send_sgi(const cpumask_t *cpumask, enum gic_sgi sgi)
>> +{
>> + unsigned int mask = 0;
>> + cpumask_t online_mask;
>> +
>> + ASSERT(sgi < 16); /* There are only 16 SGIs */
>> +
>> + cpumask_and(&online_mask, cpumask, &cpu_online_map);
>> + mask = gic_cpu_mask(&online_mask);
>> +
>> + dsb();
>> +
>> + GICD[GICD_SGIR] = GICD_SGI_TARGET_LIST
>> + | (mask<<GICD_SGI_TARGET_SHIFT)
>> + | sgi;
>> +}
>> +
>> +/* Shut down the per-CPU GIC interface */
>> +static void gic_disable_interface(void)
>> +{
>> + ASSERT(!local_irq_is_enabled());
>> +
>> + spin_lock(&gic.lock);
>> + gic_cpu_disable();
>> + gic_hyp_disable();
>> + spin_unlock(&gic.lock);
>> +}
>> +
>> +static void gic_update_lr(int lr, unsigned int virtual_irq,
>> + unsigned int state, unsigned int priority)
>> +{
>> + int maintenance_int = GICH_LR_MAINTENANCE_IRQ;
>> +
>> + BUG_ON(lr >= nr_lrs);
>> + BUG_ON(lr < 0);
>> + BUG_ON(state & ~(GICH_LR_STATE_MASK<<GICH_LR_STATE_SHIFT));
>> +
>> + GICH[GICH_LR + lr] = ((state & 0x3) << GICH_LR_STATE_SHIFT) |
>> + maintenance_int |
>> + ((priority >> 3) << GICH_LR_PRIORITY_SHIFT) |
>> + ((virtual_irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT);
>> +
>> +}
>> +
>> +static int gicv_init(struct domain *d)
>> +{
>> + int ret;
>> +
>> + /*
>> + * Domain 0 gets the hardware address.
>> + * Guests get the virtual platform layout.
>> + */
>> + if ( d->domain_id == 0 )
>> + {
>> + d->arch.vgic.dbase = gic.dbase;
>> + d->arch.vgic.cbase = gic.cbase;
>> + }
>> + else
>> + {
>> + d->arch.vgic.dbase = GUEST_GICD_BASE;
>> + d->arch.vgic.cbase = GUEST_GICC_BASE;
>> + }
>> +
>> + d->arch.vgic.nr_lines = 0;
>> +
>> + /*
>> + * Map the gic virtual cpu interface in the gic cpu interface
>> + * region of the guest.
>> + *
>> + * The second page is always mapped at +4K irrespective of the
>> + * GIC_64K_STRIDE quirk. The DTB passed to the guest reflects this.
>> + */
>> + ret = map_mmio_regions(d, d->arch.vgic.cbase,
>> + d->arch.vgic.cbase + PAGE_SIZE - 1,
>> + gic.vbase);
>> + if (ret)
>> + return ret;
>> +
>> + if ( !platform_has_quirk(PLATFORM_QUIRK_GIC_64K_STRIDE) )
>> + ret = map_mmio_regions(d, d->arch.vgic.cbase + PAGE_SIZE,
>> + d->arch.vgic.cbase + (2 * PAGE_SIZE) - 1,
>> + gic.vbase + PAGE_SIZE);
>> + else
>> + ret = map_mmio_regions(d, d->arch.vgic.cbase + PAGE_SIZE,
>> + d->arch.vgic.cbase + (2 * PAGE_SIZE) - 1,
>> + gic.vbase + 16*PAGE_SIZE);
>> +
>> + return ret;
>> +
>> +}
>> +
>> +static unsigned long gic_read_eisr(void)
>> +{
>> + return ((unsigned long)(GICH[GICH_EISR0] | (((uint64_t)
>> GICH[GICH_EISR1]) << 32)));
>> +}
>> +
>> +static unsigned int gic_update_lr_for_mi(int lr)
>> +{
>> + u64 val;
>> + uint32_t virq;
>> +
>> + spin_lock_irq(&gic.lock);
>> + val = GICH[GICH_LR + lr];
>> + virq = val & GICH_LR_VIRTUAL_MASK;
>> + GICH[GICH_LR + lr] = 0;
>> + spin_unlock_irq(&gic.lock);
>> + return virq;
>> +}
>> +
>> +static struct gic_hw_operations gic_ops = {
>> + .gic_type = gic_hw_type,
>> + .nr_lines = gic_nr_lines,
>> + .nr_lrs = gic_nr_lrs,
>> + .get_maintenance_irq = gic_maintenance_irq,
>> + .state_init = gic_state_init,
>> + .save_state = save_state,
>> + .restore_state = restore_state,
>> + .dump_state = gic_dump_state,
>> + .gicv_setup = gicv_init,
>> + .enable_irq = gic_enable_irq,
>> + .disable_irq = gic_disable_irq,
>> + .eoi_irq = gic_eoi_irq,
>> + .deactivate_irq = gic_dir_irq,
>> + .ack_irq = gic_ack_irq,
>> + .set_irq_property = gic_set_irq_properties,
>> + .send_sgi = gic_send_sgi,
>> + .disable_interface = gic_disable_interface,
>> + .update_lr = gic_update_lr,
>> + .update_lr_for_mi = gic_update_lr_for_mi,
>> + .read_eisr = gic_read_eisr,
>> +};
>> +
>> +void __init gicv2_init(void)
>> +{
>> + static const struct dt_device_match gic_ids[] __initconst =
>> + {
>> + DT_MATCH_GIC,
>> + { /* sentinel */ },
>> + };
>> + struct dt_device_node *node;
>> + int res;
>> +
>> + node = dt_find_interrupt_controller(gic_ids);
>> + if ( !node )
>> + panic("Unable to find compatible GIC in the device tree");
>> +
>> + dt_device_set_used_by(node, DOMID_XEN);
>> +
>> + gic.hw_version = GIC_VERSION_V2;
>> +
>> + res = dt_device_get_address(node, 0, &gic.dbase, NULL);
>> + if ( res || !gic.dbase || (gic.dbase & ~PAGE_MASK) )
>> + panic("GIC: Cannot find a valid address for the distributor");
>> +
>> + res = dt_device_get_address(node, 1, &gic.cbase, NULL);
>> + if ( res || !gic.cbase || (gic.cbase & ~PAGE_MASK) )
>> + panic("GIC: Cannot find a valid address for the CPU");
>> +
>> + res = dt_device_get_address(node, 2, &gic.hbase, NULL);
>> + if ( res || !gic.hbase || (gic.hbase & ~PAGE_MASK) )
>> + panic("GIC: Cannot find a valid address for the hypervisor");
>> +
>> + res = dt_device_get_address(node, 3, &gic.vbase, NULL);
>> + if ( res || !gic.vbase || (gic.vbase & ~PAGE_MASK) )
>> + panic("GIC: Cannot find a valid address for the virtual CPU");
>> +
>> + res = dt_device_get_irq(node, 0, &gic.maintenance);
>> + if ( res )
>> + panic("GIC: Cannot find the maintenance IRQ");
>> +
>> + /* Set the GIC as the primary interrupt controller */
>> + dt_interrupt_controller = node;
>> +
>> + /* TODO: Add check on distributor, cpu size */
>> +
>> + printk("GIC initialization:\n"
>> + " gic_dist_addr=%"PRIpaddr"\n"
>> + " gic_cpu_addr=%"PRIpaddr"\n"
>> + " gic_hyp_addr=%"PRIpaddr"\n"
>> + " gic_vcpu_addr=%"PRIpaddr"\n"
>> + " gic_maintenance_irq=%u\n",
>> + gic.dbase, gic.cbase, gic.hbase, gic.vbase,
>> + gic.maintenance.irq);
>> +
>> + if ( (gic.dbase & ~PAGE_MASK) || (gic.cbase & ~PAGE_MASK) ||
>> + (gic.hbase & ~PAGE_MASK) || (gic.vbase & ~PAGE_MASK) )
>> + panic("GIC interfaces not page aligned");
>> +
>> + set_fixmap(FIXMAP_GICD, gic.dbase >> PAGE_SHIFT, DEV_SHARED);
>> + BUILD_BUG_ON(FIXMAP_ADDR(FIXMAP_GICC1) !=
>> + FIXMAP_ADDR(FIXMAP_GICC2)-PAGE_SIZE);
>> + set_fixmap(FIXMAP_GICC1, gic.cbase >> PAGE_SHIFT, DEV_SHARED);
>> + if ( platform_has_quirk(PLATFORM_QUIRK_GIC_64K_STRIDE) )
>> + set_fixmap(FIXMAP_GICC2, (gic.cbase >> PAGE_SHIFT) + 0x10,
>> DEV_SHARED);
>> + else
>> + set_fixmap(FIXMAP_GICC2, (gic.cbase >> PAGE_SHIFT) + 0x1,
>> DEV_SHARED);
>> + set_fixmap(FIXMAP_GICH, gic.hbase >> PAGE_SHIFT, DEV_SHARED);
>> +
>> + /* Global settings: interrupt distributor */
>> + spin_lock_init(&gic.lock);
>> + spin_lock(&gic.lock);
>> +
>> + gic_smp_init();
>> + gic_dist_init();
>> + gic_cpu_init();
>> + gic_hyp_init();
>> +
>> + register_gic_ops(&gic_ops);
>> + spin_unlock(&gic.lock);
>> +}
>> +
>> +/*
>> + * Local variables:
>> + * mode: C
>> + * c-file-style: "BSD"
>> + * c-basic-offset: 4
>> + * indent-tabs-mode: nil
>> + * End:
>> + */
>> diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
>> index bb718f6..e0859ae 100644
>> --- a/xen/arch/arm/gic.c
>> +++ b/xen/arch/arm/gic.c
>> @@ -33,68 +33,46 @@
>> #include <asm/domain.h>
>> #include <asm/platform.h>
>>
>> -#include <asm/gic_v2_defs.h>
>> #include <asm/gic.h>
>>
>> -/* Access to the GIC Distributor registers through the fixmap */
>> -#define GICD ((volatile uint32_t *) FIXMAP_ADDR(FIXMAP_GICD))
>> -#define GICC ((volatile uint32_t *) FIXMAP_ADDR(FIXMAP_GICC1))
>> -#define GICH ((volatile uint32_t *) FIXMAP_ADDR(FIXMAP_GICH))
>> static void gic_restore_pending_irqs(struct vcpu *v);
>>
>> -struct gic_state_data {
>> - uint32_t gic_hcr;
>> - uint32_t gic_vmcr;
>> - uint32_t gic_apr;
>> - uint32_t gic_lr[64];
>> -};
>> -
>> -/* Global state */
>> -static struct {
>> - paddr_t dbase; /* Address of distributor registers */
>> - paddr_t cbase; /* Address of CPU interface registers */
>> - paddr_t hbase; /* Address of virtual interface registers */
>> - paddr_t vbase; /* Address of virtual cpu interface registers */
>> - unsigned int lines; /* Number of interrupts (SPIs + PPIs + SGIs) */
>> - struct dt_irq maintenance; /* IRQ maintenance */
>> - unsigned int cpus;
>> - spinlock_t lock;
>> -} gic;
>> -
>> static irq_desc_t irq_desc[NR_IRQS];
>> static DEFINE_PER_CPU(irq_desc_t[NR_LOCAL_IRQS], local_irq_desc);
>> static DEFINE_PER_CPU(uint64_t, lr_mask);
>>
>> -static unsigned nr_lrs;
>> -
>> -/* The GIC mapping of CPU interfaces does not necessarily match the
>> - * logical CPU numbering. Let's use mapping as returned by the GIC
>> - * itself
>> - */
>> -static DEFINE_PER_CPU(u8, gic_cpu_id);
>> +static struct gic_hw_operations *gic_hw_ops;
>>
>> -/* Maximum cpu interface per GIC */
>> -#define NR_GIC_CPU_IF 8
>> +spinlock_t gic_lock;
>>
>> -static unsigned int gic_cpu_mask(const cpumask_t *cpumask)
>> +void register_gic_ops(struct gic_hw_operations *ops)
>> {
>> - unsigned int cpu;
>> - unsigned int mask = 0;
>> - cpumask_t possible_mask;
>> + gic_hw_ops = ops;
>> +}
>>
>> - cpumask_and(&possible_mask, cpumask, &cpu_possible_map);
>> - for_each_cpu(cpu, &possible_mask)
>> - {
>> - ASSERT(cpu < NR_GIC_CPU_IF);
>> - mask |= per_cpu(gic_cpu_id, cpu);
>> - }
>> +void update_cpu_lr_mask(void)
>> +{
>> + this_cpu(lr_mask) = 0ULL;
>> +}
>>
>> - return mask;
>> +int gic_hw_version(void)
>> +{
>> + return gic_hw_ops->gic_type();
>> }
>>
>> unsigned int gic_number_lines(void)
>> {
>> - return gic.lines;
>> + return gic_hw_ops->nr_lines();
>> +}
>> +
>> +unsigned long gic_data_rdist_rd_base(void)
>> +{
>> + return gic_hw_ops->read_cpu_rbase();
>> +}
>> +
>> +unsigned long gic_data_rdist_sgi_base(void)
>> +{
>> + return gic_hw_ops->read_cpu_sgi_rbase();
>> }
>>
>> irq_desc_t *__irq_to_desc(int irq)
>> @@ -105,43 +83,21 @@ irq_desc_t *__irq_to_desc(int irq)
>>
>> void gic_save_state(struct vcpu *v)
>> {
>> - int i;
>> - struct gic_state_data *d;
>> - d = (struct gic_state_data *)v->arch.gic_state;
>> -
>> ASSERT(!local_irq_is_enabled());
>>
>> - /* No need for spinlocks here because interrupts are disabled around
>> - * this call and it only accesses struct vcpu fields that cannot be
>> - * accessed simultaneously by another pCPU.
>> - */
>> - for ( i=0; i<nr_lrs; i++)
>> - d->gic_lr[i] = GICH[GICH_LR + i];
>> v->arch.lr_mask = this_cpu(lr_mask);
>> - d->gic_apr = GICH[GICH_APR];
>> - d->gic_vmcr = GICH[GICH_VMCR];
>> - /* Disable until next VCPU scheduled */
>> - GICH[GICH_HCR] = 0;
>> + gic_hw_ops->save_state(v);
>> isb();
>> }
>>
>> void gic_restore_state(struct vcpu *v)
>> {
>> - int i;
>> - struct gic_state_data *d;
>> - d = (struct gic_state_data *)v->arch.gic_state;
>> -
>> if ( is_idle_vcpu(v) )
>> return;
>>
>> this_cpu(lr_mask) = v->arch.lr_mask;
>> - for ( i=0; i<nr_lrs; i++)
>> - GICH[GICH_LR + i] = d->gic_lr[i];
>> - GICH[GICH_APR] = d->gic_apr;
>> - GICH[GICH_VMCR] = d->gic_vmcr;
>> - GICH[GICH_HCR] = GICH_HCR_EN;
>> + gic_hw_ops->restore_state(v);
>> isb();
>> -
>> gic_restore_pending_irqs(v);
>> }
>>
>> @@ -151,12 +107,12 @@ static void gic_irq_enable(struct irq_desc *desc)
>> unsigned long flags;
>>
>> spin_lock_irqsave(&desc->lock, flags);
>> - spin_lock(&gic.lock);
>> + spin_lock(&gic_lock);
>> desc->status &= ~IRQ_DISABLED;
>> dsb();
>> /* Enable routing */
>> - GICD[GICD_ISENABLER + irq / 32] = (1u << (irq % 32));
>> - spin_unlock(&gic.lock);
>> + gic_hw_ops->enable_irq(irq);
>> + spin_unlock(&gic_lock);
>> spin_unlock_irqrestore(&desc->lock, flags);
>> }
>>
>> @@ -166,11 +122,11 @@ static void gic_irq_disable(struct irq_desc *desc)
>> unsigned long flags;
>>
>> spin_lock_irqsave(&desc->lock, flags);
>> - spin_lock(&gic.lock);
>> + spin_lock(&gic_lock);
>> /* Disable routing */
>> - GICD[GICD_ICENABLER + irq / 32] = (1u << (irq % 32));
>> + gic_hw_ops->disable_irq(irq);
>> desc->status |= IRQ_DISABLED;
>> - spin_unlock(&gic.lock);
>> + spin_unlock(&gic_lock);
>> spin_unlock_irqrestore(&desc->lock, flags);
>> }
>>
>> @@ -194,17 +150,16 @@ static void gic_host_irq_end(struct irq_desc *desc)
>> {
>> int irq = desc->irq;
>> /* Lower the priority */
>> - GICC[GICC_EOIR] = irq;
>> - /* Deactivate */
>> - GICC[GICC_DIR] = irq;
>> + gic_hw_ops->eoi_irq(irq);
>> + gic_hw_ops->deactivate_irq(irq);
>> }
>>
>> static void gic_guest_irq_end(struct irq_desc *desc)
>> {
>> int irq = desc->irq;
>> /* Lower the priority of the IRQ */
>> - GICC[GICC_EOIR] = irq;
>> /* Deactivation happens in maintenance interrupt / via GICV */
>> + gic_hw_ops->eoi_irq(irq);
>> }
>>
>> static void gic_irq_set_affinity(struct irq_desc *desc, const cpumask_t
>> *mask)
>> @@ -243,27 +198,7 @@ static void gic_set_irq_properties(unsigned int irq,
>> bool_t level,
>> const cpumask_t *cpu_mask,
>> unsigned int priority)
>> {
>> - volatile unsigned char *bytereg;
>> - uint32_t cfg, edgebit;
>> - unsigned int mask = gic_cpu_mask(cpu_mask);
>> -
>> - /* Set edge / level */
>> - cfg = GICD[GICD_ICFGR + irq / 16];
>> - edgebit = 2u << (2 * (irq % 16));
>> - if ( level )
>> - cfg &= ~edgebit;
>> - else
>> - cfg |= edgebit;
>> - GICD[GICD_ICFGR + irq / 16] = cfg;
>> -
>> - /* Set target CPU mask (RAZ/WI on uniprocessor) */
>> - bytereg = (unsigned char *) (GICD + GICD_ITARGETSR);
>> - bytereg[irq] = mask;
>> -
>> - /* Set priority */
>> - bytereg = (unsigned char *) (GICD + GICD_IPRIORITYR);
>> - bytereg[irq] = priority;
>> -
>> + return gic_hw_ops->set_irq_property(irq, level, cpu_mask, priority);
>> }
>>
>> /* Program the GIC to route an interrupt */
>> @@ -273,8 +208,8 @@ static int gic_route_irq(unsigned int irq, bool_t level,
>> struct irq_desc *desc = irq_to_desc(irq);
>> unsigned long flags;
>>
>> - ASSERT(priority <= 0xff); /* Only 8 bits of priority */
>> - ASSERT(irq < gic.lines); /* Can't route interrupts that don't
>> exist */
>> + ASSERT(priority <= 0xff); /* Only 8 bits of priority */
>> + ASSERT(irq < gic_number_lines());
>>
>> if ( desc->action != NULL )
>> return -EBUSY;
>> @@ -286,9 +221,9 @@ static int gic_route_irq(unsigned int irq, bool_t level,
>>
>> desc->handler = &gic_host_irq_type;
>>
>> - spin_lock(&gic.lock);
>> + spin_lock(&gic_lock);
>> gic_set_irq_properties(irq, level, cpu_mask, priority);
>> - spin_unlock(&gic.lock);
>> + spin_unlock(&gic_lock);
>>
>> spin_unlock_irqrestore(&desc->lock, flags);
>> return 0;
>> @@ -305,119 +240,6 @@ void gic_route_dt_irq(const struct dt_irq *irq, const
>> cpumask_t *cpu_mask,
>> gic_route_irq(irq->irq, level, cpu_mask, priority);
>> }
>>
>> -static void __init gic_dist_init(void)
>> -{
>> - uint32_t type;
>> - uint32_t cpumask;
>> - int i;
>> -
>> - cpumask = GICD[GICD_ITARGETSR] & 0xff;
>> - cpumask |= cpumask << 8;
>> - cpumask |= cpumask << 16;
>> -
>> - /* Disable the distributor */
>> - GICD[GICD_CTLR] = 0;
>> -
>> - type = GICD[GICD_TYPER];
>> - gic.lines = 32 * ((type & GICD_TYPE_LINES) + 1);
>> - gic.cpus = 1 + ((type & GICD_TYPE_CPUS) >> 5);
>> - printk("GIC: %d lines, %d cpu%s%s (IID %8.8x).\n",
>> - gic.lines, gic.cpus, (gic.cpus == 1) ? "" : "s",
>> - (type & GICD_TYPE_SEC) ? ", secure" : "",
>> - GICD[GICD_IIDR]);
>> -
>> - /* Default all global IRQs to level, active low */
>> - for ( i = 32; i < gic.lines; i += 16 )
>> - GICD[GICD_ICFGR + i / 16] = 0x0;
>> -
>> - /* Route all global IRQs to this CPU */
>> - for ( i = 32; i < gic.lines; i += 4 )
>> - GICD[GICD_ITARGETSR + i / 4] = cpumask;
>> -
>> - /* Default priority for global interrupts */
>> - for ( i = 32; i < gic.lines; i += 4 )
>> - GICD[GICD_IPRIORITYR + i / 4] =
>> - GIC_PRI_IRQ<<24 | GIC_PRI_IRQ<<16 | GIC_PRI_IRQ<<8 |
>> GIC_PRI_IRQ;
>> -
>> - /* Disable all global interrupts */
>> - for ( i = 32; i < gic.lines; i += 32 )
>> - GICD[GICD_ICENABLER + i / 32] = (uint32_t)~0ul;
>> -
>> - /* Turn on the distributor */
>> - GICD[GICD_CTLR] = GICD_CTL_ENABLE;
>> -}
>> -
>> -static void __cpuinit gic_cpu_init(void)
>> -{
>> - int i;
>> -
>> - this_cpu(gic_cpu_id) = GICD[GICD_ITARGETSR] & 0xff;
>> -
>> - /* The first 32 interrupts (PPI and SGI) are banked per-cpu, so
>> - * even though they are controlled with GICD registers, they must
>> - * be set up here with the other per-cpu state. */
>> - GICD[GICD_ICENABLER] = 0xffff0000; /* Disable all PPI */
>> - GICD[GICD_ISENABLER] = 0x0000ffff; /* Enable all SGI */
>> - /* Set SGI priorities */
>> - for (i = 0; i < 16; i += 4)
>> - GICD[GICD_IPRIORITYR + i / 4] =
>> - GIC_PRI_IPI<<24 | GIC_PRI_IPI<<16 | GIC_PRI_IPI<<8 |
>> GIC_PRI_IPI;
>> - /* Set PPI priorities */
>> - for (i = 16; i < 32; i += 4)
>> - GICD[GICD_IPRIORITYR + i / 4] =
>> - GIC_PRI_IRQ<<24 | GIC_PRI_IRQ<<16 | GIC_PRI_IRQ<<8 |
>> GIC_PRI_IRQ;
>> -
>> - /* Local settings: interface controller */
>> - GICC[GICC_PMR] = 0xff; /* Don't mask by priority */
>> - GICC[GICC_BPR] = 0; /* Finest granularity of priority
>> */
>> - GICC[GICC_CTLR] = GICC_CTL_ENABLE|GICC_CTL_EOI; /* Turn on delivery
>> */
>> -}
>> -
>> -static void gic_cpu_disable(void)
>> -{
>> - GICC[GICC_CTLR] = 0;
>> -}
>> -
>> -static void __cpuinit gic_hyp_init(void)
>> -{
>> - uint32_t vtr;
>> -
>> - vtr = GICH[GICH_VTR];
>> - nr_lrs = (vtr & GICH_VTR_NRLRGS) + 1;
>> -
>> - GICH[GICH_MISR] = GICH_MISR_EOI;
>> - this_cpu(lr_mask) = 0ULL;
>> -}
>> -
>> -static void __cpuinit gic_hyp_disable(void)
>> -{
>> - GICH[GICH_HCR] = 0;
>> -}
>> -
>> -/* Set up the per-CPU parts of the GIC for a secondary CPU */
>> -static int __cpuinit gic_init_secondary_cpu(struct notifier_block *nfb,
>> - unsigned long action, void *hcpu)
>> -{
>> - if (action == CPU_STARTING)
>> - {
>> - spin_lock(&gic.lock);
>> - gic_cpu_init();
>> - gic_hyp_init();
>> - spin_unlock(&gic.lock);
>> - }
>> - return NOTIFY_DONE;
>> -}
>> -
>> -static struct notifier_block gic_cpu_nb = {
>> - .notifier_call = gic_init_secondary_cpu,
>> - .priority = 100
>> -};
>> -
>> -static void gic_smp_init(void)
>> -{
>> - register_cpu_notifier(&gic_cpu_nb);
>> -}
>> -
>> int gic_irq_xlate(const u32 *intspec, unsigned int intsize,
>> unsigned int *out_hwirq,
>> unsigned int *out_type)
>> @@ -438,124 +260,25 @@ int gic_irq_xlate(const u32 *intspec, unsigned int
>> intsize,
>> return 0;
>> }
>>
>> -/* Set up the GIC */
>> -void __init gic_init(void)
>> -{
>> - static const struct dt_device_match gic_ids[] __initconst =
>> - {
>> - DT_MATCH_GIC,
>> - { /* sentinel */ },
>> - };
>> - struct dt_device_node *node;
>> - int res;
>> -
>> - node = dt_find_interrupt_controller(gic_ids);
>> - if ( !node )
>> - panic("Unable to find compatible GIC in the device tree");
>> -
>> - dt_device_set_used_by(node, DOMID_XEN);
>> -
>> - res = dt_device_get_address(node, 0, &gic.dbase, NULL);
>> - if ( res || !gic.dbase || (gic.dbase & ~PAGE_MASK) )
>> - panic("GIC: Cannot find a valid address for the distributor");
>> -
>> - res = dt_device_get_address(node, 1, &gic.cbase, NULL);
>> - if ( res || !gic.cbase || (gic.cbase & ~PAGE_MASK) )
>> - panic("GIC: Cannot find a valid address for the CPU");
>> -
>> - res = dt_device_get_address(node, 2, &gic.hbase, NULL);
>> - if ( res || !gic.hbase || (gic.hbase & ~PAGE_MASK) )
>> - panic("GIC: Cannot find a valid address for the hypervisor");
>> -
>> - res = dt_device_get_address(node, 3, &gic.vbase, NULL);
>> - if ( res || !gic.vbase || (gic.vbase & ~PAGE_MASK) )
>> - panic("GIC: Cannot find a valid address for the virtual CPU");
>> -
>> - res = dt_device_get_irq(node, 0, &gic.maintenance);
>> - if ( res )
>> - panic("GIC: Cannot find the maintenance IRQ");
>> -
>> - /* Set the GIC as the primary interrupt controller */
>> - dt_interrupt_controller = node;
>> -
>> - /* TODO: Add check on distributor, cpu size */
>> -
>> - printk("GIC initialization:\n"
>> - " gic_dist_addr=%"PRIpaddr"\n"
>> - " gic_cpu_addr=%"PRIpaddr"\n"
>> - " gic_hyp_addr=%"PRIpaddr"\n"
>> - " gic_vcpu_addr=%"PRIpaddr"\n"
>> - " gic_maintenance_irq=%u\n",
>> - gic.dbase, gic.cbase, gic.hbase, gic.vbase,
>> - gic.maintenance.irq);
>> -
>> - if ( (gic.dbase & ~PAGE_MASK) || (gic.cbase & ~PAGE_MASK) ||
>> - (gic.hbase & ~PAGE_MASK) || (gic.vbase & ~PAGE_MASK) )
>> - panic("GIC interfaces not page aligned");
>> -
>> - set_fixmap(FIXMAP_GICD, gic.dbase >> PAGE_SHIFT, DEV_SHARED);
>> - BUILD_BUG_ON(FIXMAP_ADDR(FIXMAP_GICC1) !=
>> - FIXMAP_ADDR(FIXMAP_GICC2)-PAGE_SIZE);
>> - set_fixmap(FIXMAP_GICC1, gic.cbase >> PAGE_SHIFT, DEV_SHARED);
>> - if ( platform_has_quirk(PLATFORM_QUIRK_GIC_64K_STRIDE) )
>> - set_fixmap(FIXMAP_GICC2, (gic.cbase >> PAGE_SHIFT) + 0x10,
>> DEV_SHARED);
>> - else
>> - set_fixmap(FIXMAP_GICC2, (gic.cbase >> PAGE_SHIFT) + 0x1,
>> DEV_SHARED);
>> - set_fixmap(FIXMAP_GICH, gic.hbase >> PAGE_SHIFT, DEV_SHARED);
>> -
>> - /* Global settings: interrupt distributor */
>> - spin_lock_init(&gic.lock);
>> - spin_lock(&gic.lock);
>> -
>> - gic_smp_init();
>> - gic_dist_init();
>> - gic_cpu_init();
>> - gic_hyp_init();
>> -
>> - spin_unlock(&gic.lock);
>> -}
>> -
>> void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi)
>> {
>> - unsigned int mask = 0;
>> - cpumask_t online_mask;
>> -
>> - ASSERT(sgi < 16); /* There are only 16 SGIs */
>> -
>> - cpumask_and(&online_mask, cpumask, &cpu_online_map);
>> - mask = gic_cpu_mask(&online_mask);
>> -
>> - dsb();
>> -
>> - GICD[GICD_SGIR] = GICD_SGI_TARGET_LIST
>> - | (mask<<GICD_SGI_TARGET_SHIFT)
>> - | sgi;
>> + gic_hw_ops->send_sgi(cpumask, sgi);
>> }
>>
>> -void send_SGI_one(unsigned int cpu, enum gic_sgi sgi)
>> +static void send_SGI_one(unsigned int cpu, enum gic_sgi sgi)
>> {
>> - ASSERT(cpu < NR_GIC_CPU_IF); /* Targets bitmap only supports 8 CPUs */
>> send_SGI_mask(cpumask_of(cpu), sgi);
>> }
>>
>> -void send_SGI_self(enum gic_sgi sgi)
>> -{
>> - ASSERT(sgi < 16); /* There are only 16 SGIs */
>> -
>> - dsb();
>> -
>> - GICD[GICD_SGIR] = GICD_SGI_TARGET_SELF
>> - | sgi;
>> -}
>> -
>> void send_SGI_allbutself(enum gic_sgi sgi)
>> {
>> - ASSERT(sgi < 16); /* There are only 16 SGIs */
>> + cpumask_t all_others_mask;
>> + ASSERT(sgi < 16); /* There are only 16 SGIs */
>>
>> - dsb();
>> + cpumask_andnot(&all_others_mask, &cpu_possible_map,
>> cpumask_of(smp_processor_id()));
>>
>> - GICD[GICD_SGIR] = GICD_SGI_TARGET_OTHERS
>> - | sgi;
>> + dsb();
>> + send_SGI_mask(&all_others_mask, sgi);
>> }
>>
>> void smp_send_state_dump(unsigned int cpu)
>> @@ -568,16 +291,15 @@ void gic_disable_cpu(void)
>> {
>> ASSERT(!local_irq_is_enabled());
>>
>> - spin_lock(&gic.lock);
>> - gic_cpu_disable();
>> - gic_hyp_disable();
>> - spin_unlock(&gic.lock);
>> + spin_lock(&gic_lock);
>> + gic_hw_ops->disable_interface();
>> + spin_unlock(&gic_lock);
>> }
>>
>> void gic_route_ppis(void)
>> {
>> /* GIC maintenance */
>> - gic_route_dt_irq(&gic.maintenance, cpumask_of(smp_processor_id()),
>> + gic_route_dt_irq(gic_hw_ops->get_maintenance_irq(),
>> cpumask_of(smp_processor_id()),
>> GIC_PRI_IRQ);
>> /* Route timer interrupt */
>> route_timer_interrupt();
>> @@ -652,20 +374,10 @@ int __init setup_dt_irq(const struct dt_irq *irq,
>> struct irqaction *new)
>> return rc;
>> }
>>
>> -static inline void gic_set_lr(int lr, struct pending_irq *p,
>> +static void gic_set_lr(int lr, struct pending_irq *p,
>> unsigned int state)
>> {
>> - int maintenance_int = GICH_LR_MAINTENANCE_IRQ;
>> -
>> - BUG_ON(lr >= nr_lrs);
>> - BUG_ON(lr < 0);
>> - BUG_ON(state & ~(GICH_LR_STATE_MASK<<GICH_LR_STATE_SHIFT));
>> -
>> - GICH[GICH_LR + lr] = state |
>> - maintenance_int |
>> - ((p->priority >> 3) << GICH_LR_PRIORITY_SHIFT) |
>> - ((p->irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT);
>> -
>> + gic_hw_ops->update_lr(lr, p->irq, state, p->priority);
>> set_bit(GIC_IRQ_GUEST_VISIBLE, &p->status);
>> clear_bit(GIC_IRQ_GUEST_PENDING, &p->status);
>> }
>> @@ -693,10 +405,10 @@ void gic_remove_from_queues(struct vcpu *v, unsigned
>> int virtual_irq)
>> struct pending_irq *p = irq_to_pending(v, virtual_irq);
>> unsigned long flags;
>>
>> - spin_lock_irqsave(&gic.lock, flags);
>> + spin_lock_irqsave(&gic_lock, flags);
>> if ( !list_empty(&p->lr_queue) )
>> list_del_init(&p->lr_queue);
>> - spin_unlock_irqrestore(&gic.lock, flags);
>> + spin_unlock_irqrestore(&gic_lock, flags);
>> }
>>
>> void gic_set_guest_irq(struct vcpu *v, unsigned int virtual_irq,
>> @@ -704,9 +416,9 @@ void gic_set_guest_irq(struct vcpu *v, unsigned int
>> virtual_irq,
>> {
>> int i;
>> unsigned long flags;
>> + unsigned int nr_lrs = gic_hw_ops->nr_lrs();
>>
>> - spin_lock_irqsave(&gic.lock, flags);
>> -
>> + spin_lock_irqsave(&gic_lock, flags);
>> if ( v == current && list_empty(&v->arch.vgic.lr_pending) )
>> {
>> i = find_first_zero_bit(&this_cpu(lr_mask), nr_lrs);
>> @@ -720,7 +432,7 @@ void gic_set_guest_irq(struct vcpu *v, unsigned int
>> virtual_irq,
>> gic_add_to_lr_pending(v, irq_to_pending(v, virtual_irq));
>>
>> out:
>> - spin_unlock_irqrestore(&gic.lock, flags);
>> + spin_unlock_irqrestore(&gic_lock, flags);
>> return;
>> }
>>
>> @@ -729,17 +441,18 @@ static void gic_restore_pending_irqs(struct vcpu *v)
>> int i;
>> struct pending_irq *p, *t;
>> unsigned long flags;
>> + unsigned int nr_lrs = gic_hw_ops->nr_lrs();
>>
>> list_for_each_entry_safe ( p, t, &v->arch.vgic.lr_pending, lr_queue )
>> {
>> i = find_first_zero_bit(&this_cpu(lr_mask), nr_lrs);
>> if ( i >= nr_lrs ) return;
>>
>> - spin_lock_irqsave(&gic.lock, flags);
>> + spin_lock_irqsave(&gic_lock, flags);
>> gic_set_lr(i, p, GICH_LR_PENDING);
>> list_del_init(&p->lr_queue);
>> set_bit(i, &this_cpu(lr_mask));
>> - spin_unlock_irqrestore(&gic.lock, flags);
>> + spin_unlock_irqrestore(&gic_lock, flags);
>> }
>>
>> }
>> @@ -749,11 +462,11 @@ void gic_clear_pending_irqs(struct vcpu *v)
>> struct pending_irq *p, *t;
>> unsigned long flags;
>>
>> - spin_lock_irqsave(&gic.lock, flags);
>> + spin_lock_irqsave(&gic_lock, flags);
>> v->arch.lr_mask = 0;
>> list_for_each_entry_safe ( p, t, &v->arch.vgic.lr_pending, lr_queue )
>> list_del_init(&p->lr_queue);
>> - spin_unlock_irqrestore(&gic.lock, flags);
>> + spin_unlock_irqrestore(&gic_lock, flags);
>> }
>>
>> static void gic_inject_irq_start(void)
>> @@ -809,7 +522,7 @@ int gic_route_irq_to_guest(struct domain *d, const
>> struct dt_irq *irq,
>> action->free_on_release = 1;
>>
>> spin_lock_irqsave(&desc->lock, flags);
>> - spin_lock(&gic.lock);
>> + spin_lock(&gic_lock);
>>
>> desc->handler = &gic_guest_irq_type;
>> desc->status |= IRQ_GUEST;
>> @@ -830,15 +543,15 @@ int gic_route_irq_to_guest(struct domain *d, const
>> struct dt_irq *irq,
>> p->desc = desc;
>>
>> out:
>> - spin_unlock(&gic.lock);
>> + spin_unlock(&gic_lock);
>> spin_unlock_irqrestore(&desc->lock, flags);
>> return retval;
>> }
>>
>> -static void do_sgi(struct cpu_user_regs *regs, int othercpu, enum gic_sgi
>> sgi)
>> +static void do_sgi(struct cpu_user_regs *regs, enum gic_sgi sgi)
>> {
>> /* Lower the priority */
>> - GICC[GICC_EOIR] = sgi;
>> + gic_hw_ops->eoi_irq(sgi);
>>
>> switch (sgi)
>> {
>> @@ -857,20 +570,17 @@ static void do_sgi(struct cpu_user_regs *regs, int
>> othercpu, enum gic_sgi sgi)
>> }
>>
>> /* Deactivate */
>> - GICC[GICC_DIR] = sgi;
>> + gic_hw_ops->deactivate_irq(sgi);
>> }
>>
>> /* Accept an interrupt from the GIC and dispatch its handler */
>> void gic_interrupt(struct cpu_user_regs *regs, int is_fiq)
>> {
>> - uint32_t intack;
>> unsigned int irq;
>>
>>
>> do {
>> - intack = GICC[GICC_IAR];
>> - irq = intack & GICC_IA_IRQ;
>> -
>> + irq = gic_hw_ops->ack_irq();
>> if ( likely(irq >= 16 && irq < 1021) )
>> {
>> local_irq_enable();
>> @@ -879,8 +589,7 @@ void gic_interrupt(struct cpu_user_regs *regs, int
>> is_fiq)
>> }
>> else if (unlikely(irq < 16))
>> {
>> - unsigned int cpu = (intack & GICC_IA_CPU_MASK) >>
>> GICC_IA_CPU_SHIFT;
>> - do_sgi(regs, cpu, irq);
>> + do_sgi(regs, irq);
>> }
>> else
>> {
>> @@ -892,72 +601,31 @@ void gic_interrupt(struct cpu_user_regs *regs, int
>> is_fiq)
>>
>> int vcpu_gic_init(struct vcpu *v)
>> {
>> - v->arch.gic_state = xzalloc(struct gic_state_data);
>> - if(!v->arch.gic_state)
>> - return -ENOMEM;
>> - return 0;
>> + return gic_hw_ops->state_init(v);
>> }
>>
>> int gicv_setup(struct domain *d)
>> {
>> int ret;
>>
>> - /*
>> - * Domain 0 gets the hardware address.
>> - * Guests get the virtual platform layout.
>> - */
>> - if ( d->domain_id == 0 )
>> - {
>> - d->arch.vgic.dbase = gic.dbase;
>> - d->arch.vgic.cbase = gic.cbase;
>> - }
>> - else
>> - {
>> - d->arch.vgic.dbase = GUEST_GICD_BASE;
>> - d->arch.vgic.cbase = GUEST_GICC_BASE;
>> - }
>> -
>> - d->arch.vgic.nr_lines = 0;
>> -
>> - /*
>> - * Map the gic virtual cpu interface in the gic cpu interface
>> - * region of the guest.
>> - *
>> - * The second page is always mapped at +4K irrespective of the
>> - * GIC_64K_STRIDE quirk. The DTB passed to the guest reflects this.
>> - */
>> - ret = map_mmio_regions(d, d->arch.vgic.cbase,
>> - d->arch.vgic.cbase + PAGE_SIZE - 1,
>> - gic.vbase);
>> - if (ret)
>> - return ret;
>> -
>> - if ( !platform_has_quirk(PLATFORM_QUIRK_GIC_64K_STRIDE) )
>> - ret = map_mmio_regions(d, d->arch.vgic.cbase + PAGE_SIZE,
>> - d->arch.vgic.cbase + (2 * PAGE_SIZE) - 1,
>> - gic.vbase + PAGE_SIZE);
>> - else
>> - ret = map_mmio_regions(d, d->arch.vgic.cbase + PAGE_SIZE,
>> - d->arch.vgic.cbase + (2 * PAGE_SIZE) - 1,
>> - gic.vbase + 16*PAGE_SIZE);
>> -
>> + ret = gic_hw_ops->gicv_setup(d);
>> return ret;
>>
>> }
>>
>> static void gic_irq_eoi(void *info)
>> -{
>> +{
>> int virq = (uintptr_t) info;
>> - GICC[GICC_DIR] = virq;
>> + gic_hw_ops->deactivate_irq(virq);
>> }
>>
>> static void maintenance_interrupt(int irq, void *dev_id, struct
>> cpu_user_regs *regs)
>> {
>> int i = 0, virq, pirq = -1;
>> - uint32_t lr;
>> struct vcpu *v = current;
>> - uint64_t eisr = GICH[GICH_EISR0] | (((uint64_t) GICH[GICH_EISR1]) <<
>> 32);
>> + unsigned long eisr = 0;
>>
>> + eisr = gic_hw_ops->read_eisr();
>> while ((i = find_next_bit((const long unsigned int *) &eisr,
>> 64, i)) < 64) {
>> struct pending_irq *p, *p2;
>> @@ -967,10 +635,9 @@ static void maintenance_interrupt(int irq, void
>> *dev_id, struct cpu_user_regs *r
>> cpu = -1;
>> inflight = 0;
>>
>> - spin_lock_irq(&gic.lock);
>> - lr = GICH[GICH_LR + i];
>> - virq = lr & GICH_LR_VIRTUAL_MASK;
>> - GICH[GICH_LR + i] = 0;
>> + spin_lock_irq(&gic_lock);
>> + virq = gic_hw_ops->update_lr_for_mi(i);
>> +
>> clear_bit(i, &this_cpu(lr_mask));
>>
>> p = irq_to_pending(v, virq);
>> @@ -995,7 +662,7 @@ static void maintenance_interrupt(int irq, void *dev_id,
>> struct cpu_user_regs *r
>> list_del_init(&p2->lr_queue);
>> set_bit(i, &this_cpu(lr_mask));
>> }
>> - spin_unlock_irq(&gic.lock);
>> + spin_unlock_irq(&gic_lock);
>>
>> if ( !inflight )
>> {
>> @@ -1018,22 +685,37 @@ static void maintenance_interrupt(int irq, void
>> *dev_id, struct cpu_user_regs *r
>> }
>> }
>>
>> +/* Set up the GIC */
>> +void __init gic_init(void)
>> +{
>> + static const struct dt_device_match gicv2_ids[] __initconst =
>> + {
>> + DT_MATCH_GIC,
>> + { /* sentinel */ },
>> + };
>> + struct dt_device_node *node;
>> +
>> + spin_lock_init(&gic_lock);
>> +
>> + spin_lock(&gic_lock);
>> + node = dt_find_interrupt_controller(gicv2_ids);
>> + if ( node )
>> + {
>> + gicv2_init();
>> + spin_unlock(&gic_lock);
>> + return;
>> + }
>> + spin_unlock(&gic_lock);
>> + if ( !node )
>> + panic("Unable to find compatible GIC in the device tree");
>> +}
>> +
>> void gic_dump_info(struct vcpu *v)
>> {
>> - int i;
>> struct pending_irq *p;
>> - struct gic_state_data *d;
>> - d = (struct gic_state_data *)v->arch.gic_state;
>>
>> printk("GICH_LRs (vcpu %d) mask=%"PRIx64"\n", v->vcpu_id,
>> v->arch.lr_mask);
>> - if ( v == current )
>> - {
>> - for ( i = 0; i < nr_lrs; i++ )
>> - printk(" HW_LR[%d]=%x\n", i, GICH[GICH_LR + i]);
>> - } else {
>> - for ( i = 0; i < nr_lrs; i++ )
>> - printk(" VCPU_LR[%d]=%x\n", i, d->gic_lr[i]);
>> - }
>> + gic_hw_ops->dump_state(v);
>>
>> list_for_each_entry ( p, &v->arch.vgic.inflight_irqs, inflight )
>> {
>> @@ -1049,7 +731,7 @@ void gic_dump_info(struct vcpu *v)
>>
>> void __cpuinit init_maintenance_interrupt(void)
>> {
>> - request_dt_irq(&gic.maintenance, maintenance_interrupt,
>> + request_dt_irq(gic_hw_ops->get_maintenance_irq(), maintenance_interrupt,
>> "irq-maintenance", NULL);
>> }
>>
>> diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h
>> index 18656fd..4244491 100644
>> --- a/xen/include/asm-arm/gic.h
>> +++ b/xen/include/asm-arm/gic.h
>> @@ -39,6 +39,8 @@
>> #define GIC_PRI_IPI 0x90 /* IPIs must preempt normal interrupts */
>> #define GIC_PRI_HIGHEST 0x80 /* Higher priorities belong to Secure-World
>> */
>>
>> +#define GICH_LR_PENDING 1
>> +#define GICH_LR_ACTIVE 2
>>
>> #ifndef __ASSEMBLY__
>> #include <xen/device_tree.h>
>> @@ -46,12 +48,15 @@
>> #define DT_MATCH_GIC DT_MATCH_COMPATIBLE("arm,cortex-a15-gic"), \
>> DT_MATCH_COMPATIBLE("arm,cortex-a7-gic")
>>
>> +extern int gic_hw_version(void);
>> extern int domain_vgic_init(struct domain *d);
>> extern void domain_vgic_free(struct domain *d);
>>
>> extern int vcpu_vgic_init(struct vcpu *v);
>> extern int vcpu_gic_init(struct vcpu *v);
>>
>> +extern void gicv2_init(void);
>> +
>> extern void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq,int
>> virtual);
>> extern void vgic_clear_pending_irqs(struct vcpu *v);
>> extern struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq);
>> @@ -88,14 +93,47 @@ extern int gicv_setup(struct domain *d);
>> extern void gic_save_state(struct vcpu *v);
>> extern void gic_restore_state(struct vcpu *v);
>>
>> +#define GIC_VERSION_V2 0x2
>> +
>> /* SGI (AKA IPIs) */
>> enum gic_sgi {
>> GIC_SGI_EVENT_CHECK = 0,
>> GIC_SGI_DUMP_STATE = 1,
>> GIC_SGI_CALL_FUNCTION = 2,
>> };
>> +
>> +struct gic_hw_operations {
>> + int (*gic_type)(void);
>> + struct dt_irq * (*get_maintenance_irq)(void);
>> + unsigned int (*nr_lines)(void);
>> + unsigned int (*nr_lrs)(void);
>> + int (*state_init)(struct vcpu *);
>> + void (*save_state)(struct vcpu *);
>> + void (*restore_state)(struct vcpu *);
>> + void (*dump_state)(struct vcpu *);
>> + int (*gicv_setup)(struct domain *);
>> + void (*enable_irq)(int);
>> + void (*disable_irq)(int);
>> + void (*eoi_irq)(int);
>> + void (*deactivate_irq)(int);
>> + unsigned int (*ack_irq)(void);
>> + void (*set_irq_property)(unsigned int irq, bool_t level,
>> + const cpumask_t *cpu_mask,
>> + unsigned int priority);
>> + void (*send_sgi)(const cpumask_t *, enum gic_sgi);
>> + void (*disable_interface)(void);
>> + void (*update_lr)(int lr, unsigned int virtual_irq,
>> + unsigned int state, unsigned int priority);
>> + unsigned int (*update_lr_for_mi)(int lr);
>> + unsigned long (*read_eisr)(void);
>> + unsigned long (*read_cpu_rbase)(void);
>> + unsigned long (*read_cpu_sgi_rbase)(void);
>> +};
>> +
>> +extern void register_gic_ops(struct gic_hw_operations *);
>> +
>> +extern void update_cpu_lr_mask(void);
>> extern void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi);
>> -extern void send_SGI_one(unsigned int cpu, enum gic_sgi sgi);
>> extern void send_SGI_self(enum gic_sgi sgi);
>> extern void send_SGI_allbutself(enum gic_sgi sgi);
>>
>> diff --git a/xen/include/asm-arm/gic_v2_defs.h
>> b/xen/include/asm-arm/gic_v2_defs.h
>> index 2366685..5ddd48a 100644
>> --- a/xen/include/asm-arm/gic_v2_defs.h
>> +++ b/xen/include/asm-arm/gic_v2_defs.h
>> @@ -119,8 +119,6 @@
>> #define GICH_LR_STATE_SHIFT 28
>> #define GICH_LR_PRIORITY_SHIFT 23
>> #define GICH_LR_MAINTENANCE_IRQ (1<<19)
>> -#define GICH_LR_PENDING (1<<28)
>> -#define GICH_LR_ACTIVE (1<<29)
>> #define GICH_LR_GRP1 (1<<30)
>> #define GICH_LR_HW (1<<31)
>> #define GICH_LR_CPUID_SHIFT 9
>> --
>> 1.7.9.5
>>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |