[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 08/10] Add ICV_EOIR1_EL1 handler



From: Manish Jaggi <manish.jaggi@xxxxxxxxxx>

Add a handler for writing the guest's view of the ICC_EOIR1_EL1
register. This involves dropping the priority of the interrupt,
and deactivating it if required.

Signed-off-by: Manish Jaggi <manish.jaggi@xxxxxxxxxx>
---
 xen/arch/arm/arm64/vsysreg.c        | 136 ++++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/arm64/sysregs.h |   1 +
 xen/include/asm-arm/gic_v3_defs.h   |   4 ++
 3 files changed, 141 insertions(+)

diff --git a/xen/arch/arm/arm64/vsysreg.c b/xen/arch/arm/arm64/vsysreg.c
index 44c74d4217..393d167e56 100644
--- a/xen/arch/arm/arm64/vsysreg.c
+++ b/xen/arch/arm/arm64/vsysreg.c
@@ -230,6 +230,7 @@ void do_sysreg(struct cpu_user_regs *regs,
 
 #define ICC_IAR1_EL1_SPURIOUS    0x3ff
 #define VGIC_MAX_SPI             1019
+#define VGIC_MIN_LPI             8192
 
 static int  __vgic_v3_bpr_min(void)
 {
@@ -594,6 +595,137 @@ void handle_iar(struct cpu_user_regs *regs, int regidx, 
const union hsr hsr)
     __vgic_v3_read_iar(regs, regidx, hsr);
 }
 
+static int  __vgic_v3_find_active_lr(int intid, u64 *lr_val)
+{
+    int i;
+    unsigned int used_lrs =  gic_get_num_lrs();
+
+    for ( i = 0; i < used_lrs; i++ )
+    {
+        u64 val = __gicv3_ich_read_lr(i);
+
+        if ( (val & ICH_LR_VIRTUAL_ID_MASK) == intid &&
+            (val & ICH_LR_ACTIVE_BIT) )
+        {
+            *lr_val = val;
+            return i;
+        }
+    }
+
+    *lr_val = ICC_IAR1_EL1_SPURIOUS;
+    return -1;
+}
+
+static int  __vgic_v3_clear_highest_active_priority(void)
+{
+    u32 hap = 0;
+    int i;
+    u8 nr_apr_regs = vtr_to_nr_apr_regs(READ_SYSREG32(ICH_VTR_EL2));
+
+    for ( i = 0; i < nr_apr_regs; i++ )
+    {
+        u32 ap0, ap1;
+        int c0, c1;
+
+        ap0 = __vgic_v3_read_ap0rn(i);
+        ap1 = __vgic_v3_read_ap1rn(i);
+        if ( !ap0 && !ap1 )
+        {
+            hap += 32;
+            continue;
+        }
+
+        c0 = ap0 ? __ffs(ap0) : 32;
+        c1 = ap1 ? __ffs(ap1) : 32;
+
+        /* Always clear the LSB, which is the highest priority */
+        if (c0 < c1)
+        {
+            ap0 &= ~BIT(c0);
+            __vgic_v3_write_ap0rn(ap0, i);
+            hap += c0;
+        }
+        else
+        {
+            ap1 &= ~BIT(c1);
+            __vgic_v3_write_ap1rn(ap1, i);
+            hap += c1;
+        }
+
+        /* Rescale to 8 bits of priority */
+        return hap << __vgic_v3_bpr_min();
+    }
+
+    return GICV3_IDLE_PRIORITY;
+}
+
+static void  __vgic_v3_clear_active_lr(int lr, u64 lr_val)
+{
+    lr_val &= ~ICH_LR_ACTIVE_BIT;
+    if (lr_val & ICH_LR_HW)
+    {
+        u32 pid;
+
+        pid = (lr_val & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT;
+        WRITE_SYSREG32(pid, ICC_DIR_EL1);
+    }
+    __gicv3_ich_write_lr(lr, lr_val);
+}
+
+static void  __vgic_v3_bump_eoicount(void)
+{
+    u32 hcr;
+
+    hcr = READ_SYSREG32(ICH_HCR_EL2);
+    hcr += 1 << ICH_HCR_EOIcount_SHIFT;
+    WRITE_SYSREG32(hcr, ICH_HCR_EL2);
+}
+
+static void  __vgic_v3_write_eoir(struct cpu_user_regs *regs, int regidx,
+                                  const union hsr hsr)
+{
+    u32 vmcr = READ_SYSREG32(ICH_VMCR_EL2);
+    u32 vid = get_user_reg(regs, regidx);
+    u64 lr_val;
+    u8 lr_prio, act_prio;
+    int lr, grp;
+
+    grp = __vgic_v3_get_group(hsr);
+
+    /* Drop priority in any case */
+    act_prio = __vgic_v3_clear_highest_active_priority();
+
+    /* If EOIing an LPI, no deactivate to be performed */
+    if ( vid >= VGIC_MIN_LPI )
+        return;
+
+    /* EOImode == 1, nothing to be done here */
+    if ( vmcr & ICH_VMCR_EOIM_MASK )
+        return;
+
+    lr = __vgic_v3_find_active_lr(vid, &lr_val);
+    if ( lr == -1 )
+    {
+        __vgic_v3_bump_eoicount();
+        return;
+    }
+
+    lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
+
+    /* If priorities or group do not match, the guest has fscked-up. */
+    if ( grp != !!(lr_val & ICH_LR_GROUP) ||
+         __vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio )
+        return;
+
+    /* Let's now perform the deactivation */
+    __vgic_v3_clear_active_lr(lr, lr_val);
+}
+
+void handle_eoi(struct cpu_user_regs *regs, int regidx, const union hsr hsr)
+{
+    __vgic_v3_write_eoir(regs, regidx, hsr);
+}
+
 int do_fixup_vgic_errata(struct cpu_user_regs *regs, const union hsr hsr)
 {
     int ret = 0;
@@ -614,6 +746,10 @@ int do_fixup_vgic_errata(struct cpu_user_regs *regs, const 
union hsr hsr)
         handle_iar(regs, regidx, hsr);
         break;
 
+    case HSR_SYSREG_ICC_EOIR1_EL1:
+        handle_eoi(regs, regidx, hsr);
+        break;
+
     default:
         ret = -1;
         break;
diff --git a/xen/include/asm-arm/arm64/sysregs.h 
b/xen/include/asm-arm/arm64/sysregs.h
index 53d2251840..f9110ebf9c 100644
--- a/xen/include/asm-arm/arm64/sysregs.h
+++ b/xen/include/asm-arm/arm64/sysregs.h
@@ -92,6 +92,7 @@
 #define HSR_SYSREG_ICC_BPR1_EL1   HSR_SYSREG(3,0,c12,c12,3)
 #define HSR_SYSREG_ICC_IGRPEN1_EL1 HSR_SYSREG(3,0,c12,c12,7)
 #define HSR_SYSREG_ICC_IAR1_EL1   HSR_SYSREG(3,0,c12,c12,0)
+#define HSR_SYSREG_ICC_EOIR1_EL1  HSR_SYSREG(3,0,c12,c12,1)
 #define HSR_SYSREG_CONTEXTIDR_EL1 HSR_SYSREG(3,0,c13,c0,1)
 
 #define HSR_SYSREG_PMCR_EL0       HSR_SYSREG(3,3,c9,c12,0)
diff --git a/xen/include/asm-arm/gic_v3_defs.h 
b/xen/include/asm-arm/gic_v3_defs.h
index f22549a228..1bbcfd1c50 100644
--- a/xen/include/asm-arm/gic_v3_defs.h
+++ b/xen/include/asm-arm/gic_v3_defs.h
@@ -171,6 +171,10 @@
 #define ICH_VMCR_ENG0_MASK           (1 << ICH_VMCR_ENG0_SHIFT)
 #define ICH_VMCR_PMR_SHIFT           24
 #define ICH_VMCR_PMR_MASK            (0xffUL << ICH_VMCR_PMR_SHIFT)
+#define ICH_VMCR_EOIM_SHIFT          9
+#define ICH_VMCR_EOIM_MASK           (1 << ICH_VMCR_EOIM_SHIFT)
+#define ICH_HCR_EOIcount_SHIFT       27
+#define ICH_HCR_EOIcount_MASK        (0x1f << ICH_HCR_EOIcount_SHIFT)
 
 #define GICH_LR_VIRTUAL_MASK         0xffff
 #define GICH_LR_VIRTUAL_SHIFT        0
-- 
2.14.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.