[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/2] xen/arm: Introduce accessors for GICv2 MMIO reads/writes



A future patch needs to make the accesses to GICC more complex. For consistency
introduce read/write wrappers for all three regions (GICD, GICC, GICH).

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
Cc: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxxxxxxxxxx>
---
 xen/arch/arm/gic-v2.c |  157 +++++++++++++++++++++++++++++--------------------
 1 file changed, 94 insertions(+), 63 deletions(-)

diff --git a/xen/arch/arm/gic-v2.c b/xen/arch/arm/gic-v2.c
index cc60af8..3820be5 100644
--- a/xen/arch/arm/gic-v2.c
+++ b/xen/arch/arm/gic-v2.c
@@ -60,16 +60,12 @@
 #define GICH_V2_VMCR_PRIORITY_MASK   0x1f
 #define GICH_V2_VMCR_PRIORITY_SHIFT  27
 
-#define GICD (gicv2.map_dbase)
-#define GICC (gicv2.map_cbase)
-#define GICH (gicv2.map_hbase)
-
 /* Global state */
 static struct {
     paddr_t dbase;            /* Address of distributor registers */
     void __iomem * map_dbase; /* IO mapped Address of distributor registers */
     paddr_t cbase;            /* Address of CPU interface registers */
-    void __iomem * map_cbase; /* IO mapped Address of CPU interface registers*/
+    void __iomem * map_cbase; /* IO mapped Address of CPU interface registers 
*/
     paddr_t hbase;            /* Address of virtual interface registers */
     void __iomem * map_hbase; /* IO Address of virtual interface registers */
     paddr_t vbase;            /* Address of virtual cpu interface registers */
@@ -87,6 +83,41 @@ static DEFINE_PER_CPU(u8, gic_cpu_id);
 /* Maximum cpu interface per GIC */
 #define NR_GIC_CPU_IF 8
 
+static inline void writeb_gicd(uint32_t val, int offset)
+{
+    writeb_relaxed(val, gicv2.map_dbase + offset);
+}
+
+static inline void writel_gicd(uint32_t val, int offset)
+{
+    writel_relaxed(val, gicv2.map_dbase + offset);
+}
+
+static inline uint32_t readl_gicd(int offset)
+{
+    return readl_relaxed(gicv2.map_dbase + offset);
+}
+
+static inline void writel_gicc(uint32_t val, int offset)
+{
+    writel_relaxed(val, gicv2.map_cbase + offset);
+}
+
+static inline uint32_t readl_gicc(int offset)
+{
+    return readl_relaxed(gicv2.map_cbase + offset);
+}
+
+static inline void writel_gich(uint32_t val, int offset)
+{
+    writel_relaxed(val, gicv2.map_hbase + offset);
+}
+
+static inline uint32_t readl_gich(int offset)
+{
+    return readl_relaxed(gicv2.map_hbase + offset);
+}
+
 static unsigned int gicv2_cpu_mask(const cpumask_t *cpumask)
 {
     unsigned int cpu;
@@ -112,12 +143,12 @@ static void gicv2_save_state(struct vcpu *v)
      * accessed simultaneously by another pCPU.
      */
     for ( i = 0; i < gicv2_info.nr_lrs; i++ )
-        v->arch.gic.v2.lr[i] = readl_relaxed(GICH + GICH_LR + i * 4);
+        v->arch.gic.v2.lr[i] = readl_gich(GICH_LR + i * 4);
 
-    v->arch.gic.v2.apr = readl_relaxed(GICH + GICH_APR);
-    v->arch.gic.v2.vmcr = readl_relaxed(GICH + GICH_VMCR);
+    v->arch.gic.v2.apr = readl_gich(GICH_APR);
+    v->arch.gic.v2.vmcr = readl_gich(GICH_VMCR);
     /* Disable until next VCPU scheduled */
-    writel_relaxed(0, GICH + GICH_HCR);
+    writel_gich(0, GICH_HCR);
 }
 
 static void gicv2_restore_state(const struct vcpu *v)
@@ -125,11 +156,11 @@ static void gicv2_restore_state(const struct vcpu *v)
     int i;
 
     for ( i = 0; i < gicv2_info.nr_lrs; i++ )
-        writel_relaxed(v->arch.gic.v2.lr[i], GICH + GICH_LR + i * 4);
+        writel_gich(v->arch.gic.v2.lr[i], GICH_LR + i * 4);
 
-    writel_relaxed(v->arch.gic.v2.apr, GICH + GICH_APR);
-    writel_relaxed(v->arch.gic.v2.vmcr, GICH + GICH_VMCR);
-    writel_relaxed(GICH_HCR_EN, GICH + GICH_HCR);
+    writel_gich(v->arch.gic.v2.apr, GICH_APR);
+    writel_gich(v->arch.gic.v2.vmcr, GICH_VMCR);
+    writel_gich(GICH_HCR_EN, GICH_HCR);
 }
 
 static void gicv2_dump_state(const struct vcpu *v)
@@ -140,7 +171,7 @@ static void gicv2_dump_state(const struct vcpu *v)
     {
         for ( i = 0; i < gicv2_info.nr_lrs; i++ )
             printk("   HW_LR[%d]=%x\n", i,
-                   readl_relaxed(GICH + GICH_LR + i * 4));
+                   readl_gich(GICH_LR + i * 4));
     }
     else
     {
@@ -153,18 +184,18 @@ static void gicv2_eoi_irq(struct irq_desc *irqd)
 {
     int irq = irqd->irq;
     /* Lower the priority */
-    writel_relaxed(irq, GICC + GICC_EOIR);
+    writel_gicc(irq, GICC_EOIR);
 }
 
 static void gicv2_dir_irq(struct irq_desc *irqd)
 {
     /* Deactivate */
-    writel_relaxed(irqd->irq, GICC + GICC_DIR);
+    writel_gicc(irqd->irq, GICC_DIR);
 }
 
 static unsigned int gicv2_read_irq(void)
 {
-    return (readl_relaxed(GICC + GICC_IAR) & GICC_IA_IRQ);
+    return (readl_gicc(GICC_IAR) & GICC_IA_IRQ);
 }
 
 /*
@@ -185,18 +216,18 @@ static void gicv2_set_irq_properties(struct irq_desc 
*desc,
 
     spin_lock(&gicv2.lock);
     /* Set edge / level */
-    cfg = readl_relaxed(GICD + GICD_ICFGR + (irq / 16) * 4);
+    cfg = readl_gicd(GICD_ICFGR + (irq / 16) * 4);
     edgebit = 2u << (2 * (irq % 16));
     if ( type & DT_IRQ_TYPE_LEVEL_MASK )
         cfg &= ~edgebit;
     else if ( type & DT_IRQ_TYPE_EDGE_BOTH )
         cfg |= edgebit;
-    writel_relaxed(cfg, GICD + GICD_ICFGR + (irq / 16) * 4);
+    writel_gicd(cfg, GICD_ICFGR + (irq / 16) * 4);
 
     /* Set target CPU mask (RAZ/WI on uniprocessor) */
-    writeb_relaxed(mask, GICD + GICD_ITARGETSR + irq);
+    writeb_gicd(mask, GICD_ITARGETSR + irq);
     /* Set priority */
-    writeb_relaxed(priority, GICD + GICD_IPRIORITYR + irq);
+    writeb_gicd(priority, GICD_IPRIORITYR + irq);
 
     spin_unlock(&gicv2.lock);
 }
@@ -208,79 +239,79 @@ static void __init gicv2_dist_init(void)
     uint32_t gic_cpus;
     int i;
 
-    cpumask = readl_relaxed(GICD + GICD_ITARGETSR) & 0xff;
+    cpumask = readl_gicd(GICD_ITARGETSR) & 0xff;
     cpumask |= cpumask << 8;
     cpumask |= cpumask << 16;
 
     /* Disable the distributor */
-    writel_relaxed(0, GICD + GICD_CTLR);
+    writel_gicd(0, GICD_CTLR);
 
-    type = readl_relaxed(GICD + GICD_TYPER);
+    type = readl_gicd(GICD_TYPER);
     gicv2_info.nr_lines = 32 * ((type & GICD_TYPE_LINES) + 1);
     gic_cpus = 1 + ((type & GICD_TYPE_CPUS) >> 5);
     printk("GICv2: %d lines, %d cpu%s%s (IID %8.8x).\n",
            gicv2_info.nr_lines, gic_cpus, (gic_cpus == 1) ? "" : "s",
            (type & GICD_TYPE_SEC) ? ", secure" : "",
-           readl_relaxed(GICD + GICD_IIDR));
+           readl_gicd(GICD_IIDR));
 
     /* Default all global IRQs to level, active low */
     for ( i = 32; i < gicv2_info.nr_lines; i += 16 )
-        writel_relaxed(0x0, GICD + GICD_ICFGR + (i / 16) * 4);
+        writel_gicd(0x0, GICD_ICFGR + (i / 16) * 4);
 
     /* Route all global IRQs to this CPU */
     for ( i = 32; i < gicv2_info.nr_lines; i += 4 )
-        writel_relaxed(cpumask, GICD + GICD_ITARGETSR + (i / 4) * 4);
+        writel_gicd(cpumask, GICD_ITARGETSR + (i / 4) * 4);
 
     /* Default priority for global interrupts */
     for ( i = 32; i < gicv2_info.nr_lines; i += 4 )
-        writel_relaxed (GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 |
-                        GIC_PRI_IRQ << 8 | GIC_PRI_IRQ,
-                        GICD + GICD_IPRIORITYR + (i / 4) * 4);
+        writel_gicd(GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 |
+                    GIC_PRI_IRQ << 8 | GIC_PRI_IRQ,
+                    GICD_IPRIORITYR + (i / 4) * 4);
 
     /* Disable all global interrupts */
     for ( i = 32; i < gicv2_info.nr_lines; i += 32 )
-        writel_relaxed(~0x0, GICD + GICD_ICENABLER + (i / 32) * 4);
+        writel_gicd(~0x0, GICD_ICENABLER + (i / 32) * 4);
 
     /* Turn on the distributor */
-    writel_relaxed(GICD_CTL_ENABLE, GICD + GICD_CTLR);
+    writel_gicd(GICD_CTL_ENABLE, GICD_CTLR);
 }
 
 static void __cpuinit gicv2_cpu_init(void)
 {
     int i;
 
-    this_cpu(gic_cpu_id) = readl_relaxed(GICD + GICD_ITARGETSR) & 0xff;
+    this_cpu(gic_cpu_id) = readl_gicd(GICD_ITARGETSR) & 0xff;
 
     /* The first 32 interrupts (PPI and SGI) are banked per-cpu, so
      * even though they are controlled with GICD registers, they must
      * be set up here with the other per-cpu state. */
-    writel_relaxed(0xffff0000, GICD + GICD_ICENABLER); /* Disable all PPI */
-    writel_relaxed(0x0000ffff, GICD + GICD_ISENABLER); /* Enable all SGI */
+    writel_gicd(0xffff0000, GICD_ICENABLER); /* Disable all PPI */
+    writel_gicd(0x0000ffff, GICD_ISENABLER); /* Enable all SGI */
 
     /* Set SGI priorities */
     for ( i = 0; i < 16; i += 4 )
-        writel_relaxed(GIC_PRI_IPI << 24 | GIC_PRI_IPI << 16 |
-                       GIC_PRI_IPI << 8 | GIC_PRI_IPI,
-                       GICD + GICD_IPRIORITYR + (i / 4) * 4);
+        writel_gicd(GIC_PRI_IPI << 24 | GIC_PRI_IPI << 16 |
+                    GIC_PRI_IPI << 8 | GIC_PRI_IPI,
+                    GICD_IPRIORITYR + (i / 4) * 4);
 
     /* Set PPI priorities */
     for ( i = 16; i < 32; i += 4 )
-        writel_relaxed(GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 |
-                      GIC_PRI_IRQ << 8 | GIC_PRI_IRQ,
-                      GICD + GICD_IPRIORITYR + (i / 4) * 4);
+        writel_gicd(GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 |
+                    GIC_PRI_IRQ << 8 | GIC_PRI_IRQ,
+                    GICD_IPRIORITYR + (i / 4) * 4);
 
     /* Local settings: interface controller */
     /* Don't mask by priority */
-    writel_relaxed(0xff, GICC + GICC_PMR);
+    writel_gicc(0xff, GICC_PMR);
     /* Finest granularity of priority */
-    writel_relaxed(0x0, GICC + GICC_BPR);
+    writel_gicc(0x0, GICC_BPR);
     /* Turn on delivery */
-    writel_relaxed(GICC_CTL_ENABLE|GICC_CTL_EOI, GICC + GICC_CTLR);
+    writel_gicc(GICC_CTL_ENABLE|GICC_CTL_EOI, GICC_CTLR);
 }
 
 static void gicv2_cpu_disable(void)
 {
-    writel_relaxed(0x0, GICC + GICC_CTLR);
+    writel_gicc(0x0, GICC_CTLR);
 }
 
 static void __cpuinit gicv2_hyp_init(void)
@@ -288,16 +319,16 @@ static void __cpuinit gicv2_hyp_init(void)
     uint32_t vtr;
     uint8_t nr_lrs;
 
-    vtr = readl_relaxed(GICH + GICH_VTR);
+    vtr = readl_gich(GICH_VTR);
     nr_lrs  = (vtr & GICH_V2_VTR_NRLRGS) + 1;
     gicv2_info.nr_lrs = nr_lrs;
 
-    writel_relaxed(GICH_MISR_EOI, GICH + GICH_MISR);
+    writel_gich(GICH_MISR_EOI, GICH_MISR);
 }
 
 static void __cpuinit gicv2_hyp_disable(void)
 {
-    writel_relaxed(0, GICH + GICH_HCR);
+    writel_gich(0, GICH_HCR);
 }
 
 static int gicv2_secondary_cpu_init(void)
@@ -321,17 +352,17 @@ static void gicv2_send_SGI(enum gic_sgi sgi, enum 
gic_sgi_mode irqmode,
     switch ( irqmode )
     {
     case SGI_TARGET_OTHERS:
-        writel_relaxed(GICD_SGI_TARGET_OTHERS | sgi, GICD + GICD_SGIR);
+        writel_gicd(GICD_SGI_TARGET_OTHERS | sgi, GICD_SGIR);
         break;
     case SGI_TARGET_SELF:
-        writel_relaxed(GICD_SGI_TARGET_SELF | sgi, GICD + GICD_SGIR);
+        writel_gicd(GICD_SGI_TARGET_SELF | sgi, GICD_SGIR);
         break;
     case SGI_TARGET_LIST:
         cpumask_and(&online_mask, cpu_mask, &cpu_online_map);
         mask = gicv2_cpu_mask(&online_mask);
-        writel_relaxed(GICD_SGI_TARGET_LIST |
-                       (mask << GICD_SGI_TARGET_SHIFT) | sgi,
-                       GICD + GICD_SGIR);
+        writel_gicd(GICD_SGI_TARGET_LIST |
+                    (mask << GICD_SGI_TARGET_SHIFT) | sgi,
+                    GICD_SGIR);
         break;
     default:
         BUG();
@@ -364,12 +395,12 @@ static void gicv2_update_lr(int lr, const struct 
pending_irq *p,
         lr_reg |= GICH_V2_LR_HW | ((p->desc->irq & GICH_V2_LR_PHYSICAL_MASK )
                                   << GICH_V2_LR_PHYSICAL_SHIFT);
 
-    writel_relaxed(lr_reg, GICH + GICH_LR + lr * 4);
+    writel_gich(lr_reg, GICH_LR + lr * 4);
 }
 
 static void gicv2_clear_lr(int lr)
 {
-    writel_relaxed(0, GICH + GICH_LR + lr * 4);
+    writel_gich(0, GICH_LR + lr * 4);
 }
 
 static int gicv2v_setup(struct domain *d)
@@ -422,7 +453,7 @@ static void gicv2_read_lr(int lr, struct gic_lr *lr_reg)
 {
     uint32_t lrv;
 
-    lrv          = readl_relaxed(GICH + GICH_LR + lr * 4);
+    lrv          = readl_gich(GICH_LR + lr * 4);
     lr_reg->pirq = (lrv >> GICH_V2_LR_PHYSICAL_SHIFT) & 
GICH_V2_LR_PHYSICAL_MASK;
     lr_reg->virq = (lrv >> GICH_V2_LR_VIRTUAL_SHIFT) & GICH_V2_LR_VIRTUAL_MASK;
     lr_reg->priority = (lrv >> GICH_V2_LR_PRIORITY_SHIFT) & 
GICH_V2_LR_PRIORITY_MASK;
@@ -445,30 +476,30 @@ static void gicv2_write_lr(int lr, const struct gic_lr 
*lr_reg)
                                        << GICH_V2_LR_HW_SHIFT)  |
           ((uint32_t)(lr_reg->grp & GICH_V2_LR_GRP_MASK) << 
GICH_V2_LR_GRP_SHIFT) );
 
-    writel_relaxed(lrv, GICH + GICH_LR + lr * 4);
+    writel_gich(lrv, GICH_LR + lr * 4);
 }
 
 static void gicv2_hcr_status(uint32_t flag, bool_t status)
 {
-    uint32_t hcr = readl_relaxed(GICH + GICH_HCR);
+    uint32_t hcr = readl_gich(GICH_HCR);
 
     if ( status )
         hcr |= flag;
     else
         hcr &= (~flag);
 
-    writel_relaxed(hcr, GICH + GICH_HCR);
+    writel_gich(hcr, GICH_HCR);
 }
 
 static unsigned int gicv2_read_vmcr_priority(void)
 {
-   return ((readl_relaxed(GICH + GICH_VMCR) >> GICH_V2_VMCR_PRIORITY_SHIFT)
+   return ((readl_gich(GICH_VMCR) >> GICH_V2_VMCR_PRIORITY_SHIFT)
            & GICH_V2_VMCR_PRIORITY_MASK);
 }
 
 static unsigned int gicv2_read_apr(int apr_reg)
 {
-   return readl_relaxed(GICH + GICH_APR);
+   return readl_gich(GICH_APR);
 }
 
 static void gicv2_irq_enable(struct irq_desc *desc)
@@ -482,7 +513,7 @@ static void gicv2_irq_enable(struct irq_desc *desc)
     desc->status &= ~IRQ_DISABLED;
     dsb(sy);
     /* Enable routing */
-    writel_relaxed((1u << (irq % 32)), GICD + GICD_ISENABLER + (irq / 32) * 4);
+    writel_gicd((1u << (irq % 32)), GICD_ISENABLER + (irq / 32) * 4);
     spin_unlock_irqrestore(&gicv2.lock, flags);
 }
 
@@ -495,7 +526,7 @@ static void gicv2_irq_disable(struct irq_desc *desc)
 
     spin_lock_irqsave(&gicv2.lock, flags);
     /* Disable routing */
-    writel_relaxed(1u << (irq % 32), GICD + GICD_ICENABLER + (irq / 32) * 4);
+    writel_gicd(1u << (irq % 32), GICD_ICENABLER + (irq / 32) * 4);
     desc->status |= IRQ_DISABLED;
     spin_unlock_irqrestore(&gicv2.lock, flags);
 }
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.