|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 05/16] xen/arm: use ioremap to map gic-v2 registers
From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
gic-v2 driver uses fixmap to map the registers.
Instead use ioremap to access mmio registers.
With this patch, gic-v2 register definitions are updated
to use obsolute offset address instead of dividing the
register offset by 4.
Update vgic driver logic to compute using obsolute register
address offsets
Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
---
xen/arch/arm/gic.c | 150 ++++++++++++++++++++++++---------------------
xen/arch/arm/vgic.c | 17 ++---
xen/include/asm-arm/gic.h | 104 +++++++++++++++----------------
3 files changed, 142 insertions(+), 129 deletions(-)
diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index ce2269f..470e6c0 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -31,20 +31,23 @@
#include <asm/p2m.h>
#include <asm/domain.h>
#include <asm/platform.h>
-
+#include <asm/io.h>
#include <asm/gic.h>
-/* Access to the GIC Distributor registers through the fixmap */
-#define GICD ((volatile uint32_t *) FIXMAP_ADDR(FIXMAP_GICD))
-#define GICC ((volatile uint32_t *) FIXMAP_ADDR(FIXMAP_GICC1))
-#define GICH ((volatile uint32_t *) FIXMAP_ADDR(FIXMAP_GICH))
+#define GICD (gic.map_dbase)
+#define GICC (gic.map_cbase)
+#define GICH (gic.map_hbase)
+
static void gic_restore_pending_irqs(struct vcpu *v);
/* Global state */
static struct {
paddr_t dbase; /* Address of distributor registers */
+ void __iomem * map_dbase; /* IO mapped Address of distributor registers */
paddr_t cbase; /* Address of CPU interface registers */
+ void __iomem * map_cbase; /* IO mapped Address of CPU interface registers*/
paddr_t hbase; /* Address of virtual interface registers */
+ void __iomem * map_hbase; /* IO Address of virtual interface registers */
paddr_t vbase; /* Address of virtual cpu interface registers */
unsigned int lines; /* Number of interrupts (SPIs + PPIs + SGIs) */
unsigned int maintenance_irq; /* IRQ maintenance */
@@ -101,12 +104,12 @@ void gic_save_state(struct vcpu *v)
* accessed simultaneously by another pCPU.
*/
for ( i=0; i<nr_lrs; i++)
- v->arch.gic_lr[i] = GICH[GICH_LR + i];
+ v->arch.gic_lr[i] = readl_relaxed(GICH + GICH_LR + i * 4);
v->arch.lr_mask = this_cpu(lr_mask);
- v->arch.gic_apr = GICH[GICH_APR];
- v->arch.gic_vmcr = GICH[GICH_VMCR];
+ v->arch.gic_apr = readl_relaxed(GICH + GICH_APR);
+ v->arch.gic_vmcr = readl_relaxed(GICH + GICH_VMCR);
/* Disable until next VCPU scheduled */
- GICH[GICH_HCR] = 0;
+ writel_relaxed(0, GICH + GICH_HCR);
isb();
}
@@ -120,10 +123,10 @@ void gic_restore_state(struct vcpu *v)
this_cpu(lr_mask) = v->arch.lr_mask;
for ( i=0; i<nr_lrs; i++)
- GICH[GICH_LR + i] = v->arch.gic_lr[i];
- GICH[GICH_APR] = v->arch.gic_apr;
- GICH[GICH_VMCR] = v->arch.gic_vmcr;
- GICH[GICH_HCR] = GICH_HCR_EN;
+ writel_relaxed(v->arch.gic_lr[i], GICH + GICH_LR + i * 4);
+ writel_relaxed(v->arch.gic_apr, GICH + GICH_APR);
+ writel_relaxed(v->arch.gic_vmcr, GICH + GICH_VMCR);
+ writel_relaxed(GICH_HCR_EN, GICH + GICH_HCR);
isb();
gic_restore_pending_irqs(v);
@@ -139,8 +142,7 @@ static void gic_irq_enable(struct irq_desc *desc)
spin_lock_irqsave(&gic.lock, flags);
desc->status &= ~IRQ_DISABLED;
dsb(sy);
- /* Enable routing */
- GICD[GICD_ISENABLER + irq / 32] = (1u << (irq % 32));
+ writel_relaxed((1u << (irq % 32)), GICD + GICD_ISENABLER + (irq / 32) * 4);
spin_unlock_irqrestore(&gic.lock, flags);
}
@@ -153,7 +155,7 @@ static void gic_irq_disable(struct irq_desc *desc)
spin_lock_irqsave(&gic.lock, flags);
/* Disable routing */
- GICD[GICD_ICENABLER + irq / 32] = (1u << (irq % 32));
+ writel_relaxed(1u << (irq % 32), GICD + GICD_ICENABLER + (irq / 32) * 4);
desc->status |= IRQ_DISABLED;
spin_unlock_irqrestore(&gic.lock, flags);
}
@@ -179,16 +181,16 @@ static void gic_host_irq_end(struct irq_desc *desc)
{
int irq = desc->irq;
/* Lower the priority */
- GICC[GICC_EOIR] = irq;
+ writel_relaxed(irq, GICC + GICC_EOIR);
/* Deactivate */
- GICC[GICC_DIR] = irq;
+ writel_relaxed(irq, GICC + GICC_DIR);
}
static void gic_guest_irq_end(struct irq_desc *desc)
{
int irq = desc->irq;
/* Lower the priority of the IRQ */
- GICC[GICC_EOIR] = irq;
+ writel_relaxed(irq, GICC + GICC_EOIR);
/* Deactivation happens in maintenance interrupt / via GICV */
}
@@ -243,13 +245,13 @@ static void gic_set_irq_properties(struct irq_desc *desc,
mask = gic_cpu_mask(cpu_mask);
/* Set edge / level */
- cfg = GICD[GICD_ICFGR + irq / 16];
+ cfg = readl_relaxed(GICD + GICD_ICFGR + (irq / 16) * 4);
edgebit = 2u << (2 * (irq % 16));
if ( type & DT_IRQ_TYPE_LEVEL_MASK )
cfg &= ~edgebit;
else if ( type & DT_IRQ_TYPE_EDGE_BOTH )
cfg |= edgebit;
- GICD[GICD_ICFGR + irq / 16] = cfg;
+ writel_relaxed(cfg, GICD + GICD_ICFGR + (irq / 16) * 4);
/* Set target CPU mask (RAZ/WI on uniprocessor) */
bytereg = (unsigned char *) (GICD + GICD_ITARGETSR);
@@ -303,87 +305,91 @@ static void __init gic_dist_init(void)
uint32_t cpumask;
int i;
- cpumask = GICD[GICD_ITARGETSR] & 0xff;
+ cpumask = readl_relaxed(GICD + GICD_ITARGETSR) & 0xff;
cpumask |= cpumask << 8;
cpumask |= cpumask << 16;
/* Disable the distributor */
- GICD[GICD_CTLR] = 0;
+ writel_relaxed(0, GICD + GICD_CTLR);
- type = GICD[GICD_TYPER];
+ type = readl_relaxed(GICD + GICD_TYPER);
gic.lines = 32 * ((type & GICD_TYPE_LINES) + 1);
gic.cpus = 1 + ((type & GICD_TYPE_CPUS) >> 5);
printk("GIC: %d lines, %d cpu%s%s (IID %8.8x).\n",
gic.lines, gic.cpus, (gic.cpus == 1) ? "" : "s",
(type & GICD_TYPE_SEC) ? ", secure" : "",
- GICD[GICD_IIDR]);
+ readl_relaxed(GICD + GICD_IIDR));
/* Default all global IRQs to level, active low */
for ( i = 32; i < gic.lines; i += 16 )
- GICD[GICD_ICFGR + i / 16] = 0x0;
+ writel_relaxed(0x0, GICD + GICD_ICFGR + (i / 16) * 4);
/* Route all global IRQs to this CPU */
for ( i = 32; i < gic.lines; i += 4 )
- GICD[GICD_ITARGETSR + i / 4] = cpumask;
+ writel_relaxed(cpumask, GICD + GICD_ITARGETSR + (i / 4) * 4);
/* Default priority for global interrupts */
for ( i = 32; i < gic.lines; i += 4 )
- GICD[GICD_IPRIORITYR + i / 4] =
- GIC_PRI_IRQ<<24 | GIC_PRI_IRQ<<16 | GIC_PRI_IRQ<<8 | GIC_PRI_IRQ;
+ writel_relaxed (GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 |
+ GIC_PRI_IRQ << 8 | GIC_PRI_IRQ,
+ GICD + GICD_IPRIORITYR + (i / 4) * 4);
/* Disable all global interrupts */
for ( i = 32; i < gic.lines; i += 32 )
- GICD[GICD_ICENABLER + i / 32] = (uint32_t)~0ul;
+ writel_relaxed(~0x0, GICD + GICD_ICENABLER + (i / 32) * 4);
/* Turn on the distributor */
- GICD[GICD_CTLR] = GICD_CTL_ENABLE;
+ writel_relaxed(GICD_CTL_ENABLE, GICD + GICD_CTLR);
}
static void __cpuinit gic_cpu_init(void)
{
int i;
- this_cpu(gic_cpu_id) = GICD[GICD_ITARGETSR] & 0xff;
+ this_cpu(gic_cpu_id) = readl_relaxed(GICD + GICD_ITARGETSR) & 0xff;
/* The first 32 interrupts (PPI and SGI) are banked per-cpu, so
* even though they are controlled with GICD registers, they must
* be set up here with the other per-cpu state. */
- GICD[GICD_ICENABLER] = 0xffff0000; /* Disable all PPI */
- GICD[GICD_ISENABLER] = 0x0000ffff; /* Enable all SGI */
+ writel_relaxed(0xffff0000, GICD + GICD_ICENABLER); /* Disable all PPI */
+ writel_relaxed(0x0000ffff, GICD + GICD_ISENABLER); /* Enable all SGI */
+
/* Set SGI priorities */
for (i = 0; i < 16; i += 4)
- GICD[GICD_IPRIORITYR + i / 4] =
- GIC_PRI_IPI<<24 | GIC_PRI_IPI<<16 | GIC_PRI_IPI<<8 | GIC_PRI_IPI;
+ writel_relaxed(GIC_PRI_IPI << 24 | GIC_PRI_IPI << 16 |
+ GIC_PRI_IPI << 8 | GIC_PRI_IPI,
+ GICD + GICD_IPRIORITYR + (i / 4) * 4);
/* Set PPI priorities */
for (i = 16; i < 32; i += 4)
- GICD[GICD_IPRIORITYR + i / 4] =
- GIC_PRI_IRQ<<24 | GIC_PRI_IRQ<<16 | GIC_PRI_IRQ<<8 | GIC_PRI_IRQ;
+ writel_relaxed(GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 |
+ GIC_PRI_IRQ << 8 | GIC_PRI_IRQ,
+ GICD + GICD_IPRIORITYR + (i / 4) * 4);
/* Local settings: interface controller */
- GICC[GICC_PMR] = 0xff; /* Don't mask by priority */
- GICC[GICC_BPR] = 0; /* Finest granularity of priority */
- GICC[GICC_CTLR] = GICC_CTL_ENABLE|GICC_CTL_EOI; /* Turn on delivery */
+ writel_relaxed(0xff, GICC + GICC_PMR); /* Don't mask by
priority */
+ writel_relaxed(0x0, GICC + GICC_BPR); /* Finest
granularity of priority */
+ writel_relaxed(GICC_CTL_ENABLE|GICC_CTL_EOI, GICC + GICC_CTLR); /* Turn
on delivery */
}
static void gic_cpu_disable(void)
{
- GICC[GICC_CTLR] = 0;
+ writel_relaxed(0x0, GICC + GICC_CTLR);
}
static void __cpuinit gic_hyp_init(void)
{
uint32_t vtr;
- vtr = GICH[GICH_VTR];
+ vtr = readl_relaxed(GICH + GICH_VTR);
nr_lrs = (vtr & GICH_VTR_NRLRGS) + 1;
- GICH[GICH_MISR] = GICH_MISR_EOI;
+ writel_relaxed(GICH_MISR_EOI, GICH + GICH_MISR);
this_cpu(lr_mask) = 0ULL;
}
static void __cpuinit gic_hyp_disable(void)
{
- GICH[GICH_HCR] = 0;
+ writel_relaxed(0, GICH + GICH_HCR);
}
int gic_irq_xlate(const u32 *intspec, unsigned int intsize,
@@ -462,15 +468,21 @@ void __init gic_init(void)
(gic.hbase & ~PAGE_MASK) || (gic.vbase & ~PAGE_MASK) )
panic("GIC interfaces not page aligned");
- set_fixmap(FIXMAP_GICD, gic.dbase >> PAGE_SHIFT, DEV_SHARED);
- BUILD_BUG_ON(FIXMAP_ADDR(FIXMAP_GICC1) !=
- FIXMAP_ADDR(FIXMAP_GICC2)-PAGE_SIZE);
- set_fixmap(FIXMAP_GICC1, gic.cbase >> PAGE_SHIFT, DEV_SHARED);
+ gic.map_dbase = ioremap_nocache(gic.dbase, PAGE_SIZE);
+ if ( !gic.map_dbase )
+ panic("Failed to ioremap for GIC distributor\n");
+
if ( platform_has_quirk(PLATFORM_QUIRK_GIC_64K_STRIDE) )
- set_fixmap(FIXMAP_GICC2, (gic.cbase >> PAGE_SHIFT) + 0x10, DEV_SHARED);
+ gic.map_cbase = ioremap_nocache(gic.cbase, PAGE_SIZE * 0x10);
else
- set_fixmap(FIXMAP_GICC2, (gic.cbase >> PAGE_SHIFT) + 0x1, DEV_SHARED);
- set_fixmap(FIXMAP_GICH, gic.hbase >> PAGE_SHIFT, DEV_SHARED);
+ gic.map_cbase = ioremap_nocache(gic.cbase, PAGE_SIZE * 2);
+
+ if ( !gic.map_cbase )
+ panic("Failed to ioremap for GIC CPU interface\n");
+
+ gic.map_hbase = ioremap_nocache(gic.hbase, PAGE_SIZE);
+ if ( !gic.map_hbase )
+ panic("Failed to ioremap for GIC Virtual interface\n");
/* Global settings: interrupt distributor */
spin_lock_init(&gic.lock);
@@ -491,15 +503,16 @@ static void send_SGI(const cpumask_t *cpu_mask, enum
gic_sgi sgi,
switch ( irqmode )
{
case SGI_TARGET_OTHERS:
- GICD[GICD_SGIR] = GICD_SGI_TARGET_OTHERS | sgi;
+ writel_relaxed(GICD_SGI_TARGET_OTHERS | sgi, GICD + GICD_SGIR);
break;
case SGI_TARGET_SELF:
- GICD[GICD_SGIR] = GICD_SGI_TARGET_SELF | sgi;
+ writel_relaxed(GICD_SGI_TARGET_SELF | sgi, GICD + GICD_SGIR);
break;
case SGI_TARGET_LIST:
mask = gic_cpu_mask(cpu_mask);
- GICD[GICD_SGIR] = GICD_SGI_TARGET_LIST |
- (mask<<GICD_SGI_TARGET_SHIFT) | sgi;
+ writel_relaxed(GICD_SGI_TARGET_LIST |
+ (mask << GICD_SGI_TARGET_SHIFT) | sgi,
+ GICD + GICD_SGIR);
break;
default:
BUG_ON(1);
@@ -585,7 +598,7 @@ static inline void gic_set_lr(int lr, struct pending_irq *p,
if ( p->desc != NULL )
lr_val |= GICH_LR_HW | (p->desc->irq << GICH_LR_PHYSICAL_SHIFT);
- GICH[GICH_LR + lr] = lr_val;
+ writel_relaxed(lr_reg, GICH + GICH_LR + lr * 4);
set_bit(GIC_IRQ_GUEST_VISIBLE, &p->status);
clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status);
@@ -677,7 +690,7 @@ static void gic_update_one_lr(struct vcpu *v, int i)
ASSERT(spin_is_locked(&v->arch.vgic.lock));
ASSERT(!local_irq_is_enabled());
- lr = GICH[GICH_LR + i];
+ lr = readl_relaxed(GICH + GICH_LR + i * 4);
irq = (lr >> GICH_LR_VIRTUAL_SHIFT) & GICH_LR_VIRTUAL_MASK;
p = irq_to_pending(v, irq);
if ( lr & GICH_LR_ACTIVE )
@@ -687,7 +700,7 @@ static void gic_update_one_lr(struct vcpu *v, int i)
test_and_clear_bit(GIC_IRQ_GUEST_QUEUED, &p->status) )
{
if ( p->desc == NULL )
- GICH[GICH_LR + i] = lr | GICH_LR_PENDING;
+ writel_relaxed(lr | GICH_LR_PENDING, GICH + GICH_LR + i * 4);
else
gdprintk(XENLOG_WARNING, "unable to inject hw irq=%d into
d%dv%d: already active in LR%d\n",
irq, v->domain->domain_id, v->vcpu_id, i);
@@ -700,7 +713,7 @@ static void gic_update_one_lr(struct vcpu *v, int i)
irq, v->domain->domain_id, v->vcpu_id, i);
#endif
} else {
- GICH[GICH_LR + i] = 0;
+ writel_relaxed(0, GICH + GICH_LR + i * 4);
clear_bit(i, &this_cpu(lr_mask));
if ( p->desc != NULL )
@@ -808,7 +821,7 @@ int gic_events_need_delivery(void)
struct pending_irq *p;
unsigned long flags;
- mask_priority = (GICH[GICH_VMCR] >> GICH_VMCR_PRIORITY_SHIFT) &
GICH_VMCR_PRIORITY_MASK;
+ mask_priority = (readl_relaxed(GICH + GICH_VMCR) >>
GICH_VMCR_PRIORITY_SHIFT) & GICH_VMCR_PRIORITY_MASK;
mask_priority = mask_priority << 3;
spin_lock_irqsave(&v->arch.vgic.lock, flags);
@@ -844,22 +857,21 @@ int gic_events_need_delivery(void)
void gic_inject(void)
{
+ uint32_t hcr;
ASSERT(!local_irq_is_enabled());
gic_restore_pending_irqs(current);
-
if ( !list_empty(¤t->arch.vgic.lr_pending) && lr_all_full() )
- GICH[GICH_HCR] |= GICH_HCR_UIE;
+ writel_relaxed(hcr | GICH_HCR_UIE, GICH + GICH_HCR);
else
- GICH[GICH_HCR] &= ~GICH_HCR_UIE;
-
+ writel_relaxed(hcr & ~GICH_HCR_UIE, GICH + GICH_HCR);
}
static void do_sgi(struct cpu_user_regs *regs, enum gic_sgi sgi)
{
/* Lower the priority */
- GICC[GICC_EOIR] = sgi;
+ writel_relaxed(sgi, GICC + GICC_EOIR);
switch (sgi)
{
@@ -878,7 +890,7 @@ static void do_sgi(struct cpu_user_regs *regs, enum gic_sgi
sgi)
}
/* Deactivate */
- GICC[GICC_DIR] = sgi;
+ writel_relaxed(sgi, GICC + GICC_DIR);
}
/* Accept an interrupt from the GIC and dispatch its handler */
@@ -889,7 +901,7 @@ void gic_interrupt(struct cpu_user_regs *regs, int is_fiq)
do {
- intack = GICC[GICC_IAR];
+ intack = readl_relaxed(GICC + GICC_IAR);
irq = intack & GICC_IA_IRQ;
if ( likely(irq >= 16 && irq < 1021) )
@@ -976,7 +988,7 @@ void gic_dump_info(struct vcpu *v)
if ( v == current )
{
for ( i = 0; i < nr_lrs; i++ )
- printk(" HW_LR[%d]=%x\n", i, GICH[GICH_LR + i]);
+ printk(" HW_LR[%d]=%x\n", i, readl_relaxed(GICH + GICH_LR + i *
4));
} else {
for ( i = 0; i < nr_lrs; i++ )
printk(" VCPU_LR[%d]=%x\n", i, v->arch.gic_lr[i]);
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 151ec3e..b56f9d1 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -30,7 +30,7 @@
#include <asm/mmio.h>
#include <asm/gic.h>
-#define REG(n) (n/4)
+#define REG(n) (n)
/* Number of ranks of interrupt registers for a domain */
#define DOMAIN_NR_RANKS(d) (((d)->arch.vgic.nr_lines+31)/32)
@@ -55,7 +55,7 @@ static inline int REG_RANK_NR(int b, uint32_t n)
* Offset of GICD_<FOO><n> with its rank, for GICD_<FOO> with
* <b>-bits-per-interrupt.
*/
-#define REG_RANK_INDEX(b, n) ((n) & ((b)-1))
+#define REG_RANK_INDEX(b, n) (((n) >> 2) & ((b)-1))
/*
* Returns rank corresponding to a GICD_<FOO><n> register for
@@ -63,7 +63,9 @@ static inline int REG_RANK_NR(int b, uint32_t n)
*/
static struct vgic_irq_rank *vgic_irq_rank(struct vcpu *v, int b, int n)
{
- int rank = REG_RANK_NR(b, n);
+ int rank;
+ n = n >> 2;
+ rank = REG_RANK_NR(b, n);
if ( rank == 0 )
return &v->arch.vgic.private_irqs;
@@ -492,7 +494,7 @@ static int vgic_distr_mmio_write(struct vcpu *v,
mmio_info_t *info)
tr = rank->ienable;
rank->ienable |= *r;
vgic_unlock_rank(v, rank);
- vgic_enable_irqs(v, (*r) & (~tr), gicd_reg - GICD_ISENABLER);
+ vgic_enable_irqs(v, (*r) & (~tr), (gicd_reg - GICD_ISENABLER) >> 2);
return 1;
case GICD_ICENABLER ... GICD_ICENABLERN:
@@ -503,7 +505,7 @@ static int vgic_distr_mmio_write(struct vcpu *v,
mmio_info_t *info)
tr = rank->ienable;
rank->ienable &= ~*r;
vgic_unlock_rank(v, rank);
- vgic_disable_irqs(v, (*r) & tr, gicd_reg - GICD_ICENABLER);
+ vgic_disable_irqs(v, (*r) & tr, (gicd_reg - GICD_ICENABLER) >> 2);
return 1;
case GICD_ISPENDR ... GICD_ISPENDRN:
@@ -670,9 +672,8 @@ void vgic_clear_pending_irqs(struct vcpu *v)
void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq)
{
- int idx = irq >> 2, byte = irq & 0x3;
uint8_t priority;
- struct vgic_irq_rank *rank = vgic_irq_rank(v, 8, idx);
+ struct vgic_irq_rank *rank = vgic_irq_rank(v, 8, irq);
struct pending_irq *iter, *n = irq_to_pending(v, irq);
unsigned long flags;
bool_t running;
@@ -693,7 +694,7 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq)
return;
}
- priority = byte_read(rank->ipriority[REG_RANK_INDEX(8, idx)], 0, byte);
+ priority = byte_read(rank->ipriority[REG_RANK_INDEX(8, irq)], 0, irq &
0x3);
n->irq = irq;
set_bit(GIC_IRQ_GUEST_QUEUED, &n->status);
diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h
index c7b7368..7fa3b95 100644
--- a/xen/include/asm-arm/gic.h
+++ b/xen/include/asm-arm/gic.h
@@ -18,37 +18,37 @@
#ifndef __ASM_ARM_GIC_H__
#define __ASM_ARM_GIC_H__
-#define GICD_CTLR (0x000/4)
-#define GICD_TYPER (0x004/4)
-#define GICD_IIDR (0x008/4)
-#define GICD_IGROUPR (0x080/4)
-#define GICD_IGROUPRN (0x0FC/4)
-#define GICD_ISENABLER (0x100/4)
-#define GICD_ISENABLERN (0x17C/4)
-#define GICD_ICENABLER (0x180/4)
-#define GICD_ICENABLERN (0x1fC/4)
-#define GICD_ISPENDR (0x200/4)
-#define GICD_ISPENDRN (0x27C/4)
-#define GICD_ICPENDR (0x280/4)
-#define GICD_ICPENDRN (0x2FC/4)
-#define GICD_ISACTIVER (0x300/4)
-#define GICD_ISACTIVERN (0x37C/4)
-#define GICD_ICACTIVER (0x380/4)
-#define GICD_ICACTIVERN (0x3FC/4)
-#define GICD_IPRIORITYR (0x400/4)
-#define GICD_IPRIORITYRN (0x7F8/4)
-#define GICD_ITARGETSR (0x800/4)
-#define GICD_ITARGETSRN (0xBF8/4)
-#define GICD_ICFGR (0xC00/4)
-#define GICD_ICFGRN (0xCFC/4)
-#define GICD_NSACR (0xE00/4)
-#define GICD_NSACRN (0xEFC/4)
-#define GICD_SGIR (0xF00/4)
-#define GICD_CPENDSGIR (0xF10/4)
-#define GICD_CPENDSGIRN (0xF1C/4)
-#define GICD_SPENDSGIR (0xF20/4)
-#define GICD_SPENDSGIRN (0xF2C/4)
-#define GICD_ICPIDR2 (0xFE8/4)
+#define GICD_CTLR (0x000)
+#define GICD_TYPER (0x004)
+#define GICD_IIDR (0x008)
+#define GICD_IGROUPR (0x080)
+#define GICD_IGROUPRN (0x0FC)
+#define GICD_ISENABLER (0x100)
+#define GICD_ISENABLERN (0x17C)
+#define GICD_ICENABLER (0x180)
+#define GICD_ICENABLERN (0x1fC)
+#define GICD_ISPENDR (0x200)
+#define GICD_ISPENDRN (0x27C)
+#define GICD_ICPENDR (0x280)
+#define GICD_ICPENDRN (0x2FC)
+#define GICD_ISACTIVER (0x300)
+#define GICD_ISACTIVERN (0x37C)
+#define GICD_ICACTIVER (0x380)
+#define GICD_ICACTIVERN (0x3FC)
+#define GICD_IPRIORITYR (0x400)
+#define GICD_IPRIORITYRN (0x7F8)
+#define GICD_ITARGETSR (0x800)
+#define GICD_ITARGETSRN (0xBF8)
+#define GICD_ICFGR (0xC00)
+#define GICD_ICFGRN (0xCFC)
+#define GICD_NSACR (0xE00)
+#define GICD_NSACRN (0xEFC)
+#define GICD_SGIR (0xF00)
+#define GICD_CPENDSGIR (0xF10)
+#define GICD_CPENDSGIRN (0xF1C)
+#define GICD_SPENDSGIR (0xF20)
+#define GICD_SPENDSGIRN (0xF2C)
+#define GICD_ICPIDR2 (0xFE8)
#define GICD_SGI_TARGET_LIST_SHIFT (24)
#define GICD_SGI_TARGET_LIST_MASK (0x3UL << GICD_SGI_TARGET_LIST_SHIFT)
@@ -60,27 +60,27 @@
#define GICD_SGI_GROUP1 (1UL<<15)
#define GICD_SGI_INTID_MASK (0xFUL)
-#define GICC_CTLR (0x0000/4)
-#define GICC_PMR (0x0004/4)
-#define GICC_BPR (0x0008/4)
-#define GICC_IAR (0x000C/4)
-#define GICC_EOIR (0x0010/4)
-#define GICC_RPR (0x0014/4)
-#define GICC_HPPIR (0x0018/4)
-#define GICC_APR (0x00D0/4)
-#define GICC_NSAPR (0x00E0/4)
-#define GICC_DIR (0x1000/4)
-
-#define GICH_HCR (0x00/4)
-#define GICH_VTR (0x04/4)
-#define GICH_VMCR (0x08/4)
-#define GICH_MISR (0x10/4)
-#define GICH_EISR0 (0x20/4)
-#define GICH_EISR1 (0x24/4)
-#define GICH_ELSR0 (0x30/4)
-#define GICH_ELSR1 (0x34/4)
-#define GICH_APR (0xF0/4)
-#define GICH_LR (0x100/4)
+#define GICC_CTLR (0x0000)
+#define GICC_PMR (0x0004)
+#define GICC_BPR (0x0008)
+#define GICC_IAR (0x000C)
+#define GICC_EOIR (0x0010)
+#define GICC_RPR (0x0014)
+#define GICC_HPPIR (0x0018)
+#define GICC_APR (0x00D0)
+#define GICC_NSAPR (0x00E0)
+#define GICC_DIR (0x1000)
+
+#define GICH_HCR (0x00)
+#define GICH_VTR (0x04)
+#define GICH_VMCR (0x08)
+#define GICH_MISR (0x10)
+#define GICH_EISR0 (0x20)
+#define GICH_EISR1 (0x24)
+#define GICH_ELSR0 (0x30)
+#define GICH_ELSR1 (0x34)
+#define GICH_APR (0xF0)
+#define GICH_LR (0x100)
/* Register bits */
#define GICD_CTL_ENABLE 0x1
--
1.7.9.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |