[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 10/12] arm64: Add ICV_IAR1_EL1 handler



From: Manish Jaggi <manish.jaggi@xxxxxxxxxx>

This patch is ported to xen from linux commit
132a324ab62fe4fb8d6dcc2ab4eddb0e93b69afe.

Add a handler for reading the guest's view of the ICC_IAR1_EL1
register. This involves finding the highest priority Group-1
interrupt, checking against both PMR and the active group
priority, activating the interrupt and setting the group
priority as active.

Signed-off-by: Manish Jaggi <manish.jaggi@xxxxxxxxxx>
---
 xen/arch/arm/arm64/vsysreg_errata.c | 196 ++++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/arm64/sysregs.h |   1 +
 xen/include/asm-arm/gic_v3_defs.h   |  17 ++++
 3 files changed, 214 insertions(+)

diff --git a/xen/arch/arm/arm64/vsysreg_errata.c 
b/xen/arch/arm/arm64/vsysreg_errata.c
index d7bf9d6ce3..9bc1d7b58a 100644
--- a/xen/arch/arm/arm64/vsysreg_errata.c
+++ b/xen/arch/arm/arm64/vsysreg_errata.c
@@ -3,8 +3,18 @@
 #include <asm/traps.h>
 #include <asm/system.h>
 #include <asm/gic_v3_defs.h>
+#include <xen/sched.h>
+#include <asm/vtimer.h>
+
 
 #define vtr_to_nr_pre_bits(v)     ((((u32)(v) >> 26) & 7) + 1)
+#define vtr_to_nr_apr_regs(v)     (1 << (vtr_to_nr_pre_bits(v) - 5))
+
+#define ESR_ELx_SYS64_ISS_CRM_SHIFT 1
+#define ESR_ELx_SYS64_ISS_CRM_MASK (0xf << ESR_ELx_SYS64_ISS_CRM_SHIFT)
+
+#define ICC_IAR1_EL1_SPURIOUS    0x3ff
+#define VGIC_MAX_SPI             1019
 
 static int  __vgic_v3_bpr_min(void)
 {
@@ -272,6 +282,188 @@ static void gicv3_ich_write_lr(int lr, uint64_t val)
     isb();
 }
 
+static int  __vgic_v3_get_group(const union hsr hsr)
+{
+    u8 crm = (hsr.bits & ESR_ELx_SYS64_ISS_CRM_MASK) >>
+              ESR_ELx_SYS64_ISS_CRM_SHIFT;
+
+    return crm != 8;
+}
+
+unsigned int gic_get_num_lrs(void)
+{
+    uint32_t vtr;
+
+    vtr = READ_SYSREG32(ICH_VTR_EL2);
+    return (vtr & GICH_VTR_NRLRGS) + 1;
+}
+
+static int __vgic_v3_highest_priority_lr(struct cpu_user_regs *regs,
+                                         u32 vmcr, u64 *lr_val)
+{
+    int i, lr = -1;
+    unsigned int used_lrs =  gic_get_num_lrs();
+    u8 priority = GICV3_IDLE_PRIORITY;
+
+    for ( i = 0; i < used_lrs; i++ )
+    {
+        u64 val =  gicv3_ich_read_lr(i);
+        u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
+
+        /* Not pending in the state? */
+        if ( (val & ICH_LR_STATE) != ICH_LR_PENDING_BIT )
+            continue;
+
+        /* Group-0 interrupt, but Group-0 disabled? */
+        if ( !(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK) )
+            continue;
+
+        /* Group-1 interrupt, but Group-1 disabled? */
+        if ( (val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK) )
+            continue;
+
+        /* Not the highest priority? */
+        if ( lr_prio >= priority )
+            continue;
+
+        /* This is a candidate */
+        priority = lr_prio;
+        *lr_val = val;
+        lr = i;
+    }
+
+    if ( lr == -1 )
+        *lr_val = ICC_IAR1_EL1_SPURIOUS;
+
+    return lr;
+}
+
+static int  __vgic_v3_get_highest_active_priority(void)
+{
+    int i;
+    u32 hap = 0;
+    u8 nr_apr_regs = vtr_to_nr_apr_regs(READ_SYSREG32(ICH_VTR_EL2));
+
+    for ( i = 0; i < nr_apr_regs; i++ )
+    {
+        u32 val;
+
+        /*
+         * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers
+         * contain the active priority levels for this VCPU
+         * for the maximum number of supported priority
+         * levels, and we return the full priority level only
+         * if the BPR is programmed to its minimum, otherwise
+         * we return a combination of the priority level and
+         * subpriority, as determined by the setting of the
+         * BPR, but without the full subpriority.
+         */
+        val  = __vgic_v3_read_ap0rn(i);
+        val |= __vgic_v3_read_ap1rn(i);
+        if ( !val )
+        {
+            hap += 32;
+            continue;
+        }
+
+        return (hap + __ffs(val)) << __vgic_v3_bpr_min();
+    }
+
+    return GICV3_IDLE_PRIORITY;
+}
+
+/*
+ * Convert a priority to a preemption level, taking the relevant BPR
+ * into account by zeroing the sub-priority bits.
+ */
+static u8  __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp)
+{
+    unsigned int bpr;
+
+    if ( !grp )
+        bpr = __vgic_v3_get_bpr0(vmcr) + 1;
+    else
+        bpr = __vgic_v3_get_bpr1(vmcr);
+
+    return pri & (GENMASK(7, 0) << bpr);
+}
+
+/*
+ * The priority value is independent of any of the BPR values, so we
+ * normalize it using the minumal BPR value. This guarantees that no
+ * matter what the guest does with its BPR, we can always set/get the
+ * same value of a priority.
+ */
+static void  __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp)
+{
+    u8 pre, ap;
+    u32 val;
+    int apr;
+
+    pre = __vgic_v3_pri_to_pre(pri, vmcr, grp);
+    ap = pre >> __vgic_v3_bpr_min();
+    apr = ap / 32;
+
+    if ( !grp )
+    {
+        val = __vgic_v3_read_ap0rn(apr);
+        __vgic_v3_write_ap0rn(val | BIT(ap % 32), apr);
+    }
+    else
+    {
+        val = __vgic_v3_read_ap1rn(apr);
+        __vgic_v3_write_ap1rn(val | BIT(ap % 32), apr);
+    }
+}
+
+static void  __vgic_v3_read_iar(struct cpu_user_regs *regs, int regidx,
+                                const union hsr hsr)
+{
+    u64 lr_val;
+    u8 lr_prio, pmr;
+    int lr, grp;
+
+    u32 vmcr = READ_SYSREG32(ICH_VMCR_EL2);
+    grp = __vgic_v3_get_group(hsr);
+
+    lr = __vgic_v3_highest_priority_lr(regs, vmcr, &lr_val);
+    if ( lr < 0 )
+        goto spurious;
+
+    if ( grp != !!(lr_val & ICH_LR_GROUP) )
+        goto spurious;
+
+    pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
+    lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
+    if ( pmr <= lr_prio )
+        goto spurious;
+
+    if ( __vgic_v3_get_highest_active_priority() <=
+         __vgic_v3_pri_to_pre(lr_prio, vmcr, grp) )
+        goto spurious;
+
+    lr_val &= ~ICH_LR_STATE;
+    /* No active state for LPIs */
+    if ( (lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI )
+        lr_val |= ICH_LR_ACTIVE_BIT;
+
+    gicv3_ich_write_lr(lr, lr_val);
+    __vgic_v3_set_active_priority(lr_prio, vmcr, grp);
+    set_user_reg(regs, regidx,  lr_val & ICH_LR_VIRTUAL_ID_MASK);
+
+    return;
+
+spurious:
+     set_user_reg(regs, regidx, ICC_IAR1_EL1_SPURIOUS);
+}
+
+void handle_iar(struct cpu_user_regs *regs, int regidx, const union hsr hsr)
+{
+    __vgic_v3_read_iar(regs, regidx, hsr);
+}
+
+
+
 bool vgic_v3_handle_cpuif_access(struct cpu_user_regs *regs, const union hsr 
hsr)
 {
     bool ret = 0;
@@ -294,6 +486,10 @@ bool vgic_v3_handle_cpuif_access(struct cpu_user_regs 
*regs, const union hsr hsr
         handle_igrpen1(regs, regidx, hsr.sysreg.read, hsr);
         break;
 
+    case HSR_SYSREG_ICC_IAR1_EL1:
+        handle_iar(regs, regidx, hsr);
+        break;
+
     default:
         ret = 1;
         break;
diff --git a/xen/include/asm-arm/arm64/sysregs.h 
b/xen/include/asm-arm/arm64/sysregs.h
index 731cabc74a..53d2251840 100644
--- a/xen/include/asm-arm/arm64/sysregs.h
+++ b/xen/include/asm-arm/arm64/sysregs.h
@@ -91,6 +91,7 @@
 #define HSR_SYSREG_ICC_SRE_EL1    HSR_SYSREG(3,0,c12,c12,5)
 #define HSR_SYSREG_ICC_BPR1_EL1   HSR_SYSREG(3,0,c12,c12,3)
 #define HSR_SYSREG_ICC_IGRPEN1_EL1 HSR_SYSREG(3,0,c12,c12,7)
+#define HSR_SYSREG_ICC_IAR1_EL1   HSR_SYSREG(3,0,c12,c12,0)
 #define HSR_SYSREG_CONTEXTIDR_EL1 HSR_SYSREG(3,0,c13,c0,1)
 
 #define HSR_SYSREG_PMCR_EL0       HSR_SYSREG(3,3,c9,c12,0)
diff --git a/xen/include/asm-arm/gic_v3_defs.h 
b/xen/include/asm-arm/gic_v3_defs.h
index ff8bda37d1..884fce0fd0 100644
--- a/xen/include/asm-arm/gic_v3_defs.h
+++ b/xen/include/asm-arm/gic_v3_defs.h
@@ -67,6 +67,7 @@
  */
 #define GICV3_GICD_IIDR_VAL          0x34c
 #define GICV3_GICR_IIDR_VAL          GICV3_GICD_IIDR_VAL
+#define GICV3_IDLE_PRIORITY          0xff
 
 #define GICR_CTLR                    (0x0000)
 #define GICR_IIDR                    (0x0004)
@@ -165,6 +166,10 @@
 #define ICH_VMCR_BPR1_MASK           (7 << ICH_VMCR_BPR1_SHIFT)
 #define ICH_VMCR_ENG1_SHIFT          1
 #define ICH_VMCR_ENG1_MASK           (1 << ICH_VMCR_ENG1_SHIFT)
+#define ICH_VMCR_ENG0_SHIFT          0
+#define ICH_VMCR_ENG0_MASK           (1 << ICH_VMCR_ENG0_SHIFT)
+#define ICH_VMCR_PMR_SHIFT           24
+#define ICH_VMCR_PMR_MASK            (0xffUL << ICH_VMCR_PMR_SHIFT)
 
 #define GICH_LR_VIRTUAL_MASK         0xffff
 #define GICH_LR_VIRTUAL_SHIFT        0
@@ -182,6 +187,18 @@
 #define GICH_LR_GRP1                 (1UL<<60)
 #define GICH_LR_HW                   (1UL<<61)
 
+#define ICH_LR_PRIORITY_SHIFT        48
+#define ICH_LR_PRIORITY_MASK         (0xffULL << ICH_LR_PRIORITY_SHIFT)
+#define ICH_LR_EOI                   (1ULL << 41)
+#define ICH_LR_GROUP                 (1ULL << 60)
+#define ICH_LR_HW                    (1ULL << 61)
+#define ICH_LR_STATE                 (3ULL << 62)
+#define ICH_LR_PENDING_BIT           (1ULL << 62)
+#define ICH_LR_ACTIVE_BIT            (1ULL << 63)
+#define ICH_LR_PHYS_ID_SHIFT         32
+#define ICH_LR_PHYS_ID_MASK          (0x3ffULL << ICH_LR_PHYS_ID_SHIFT)
+#define ICH_LR_VIRTUAL_ID_MASK       ((1ULL << 32) - 1)
+
 #define GICH_VTR_NRLRGS              0x3f
 #define GICH_VTR_PRIBITS_MASK        0x7
 #define GICH_VTR_PRIBITS_SHIFT       29
-- 
2.14.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.