[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 07/17] arm64: vgic-v3: Add ICV_EOIR1_EL1 handler



This patch is ported to xen from linux commit
b6f49035b4bf6e2709f2a5fed3107f5438c1fd02
KVM: arm64: vgic-v3: Add ICV_EOIR1_EL1 handler

Add a handler for writing the guest's view of the ICC_EOIR1_EL1
register. This involves dropping the priority of the interrupt,
and deactivating it if required (EOImode == 0).

Signed-off-by : Manish Jaggi <manish.jaggi@xxxxxxxxxx>
---
 xen/arch/arm/arm64/vgic-v3-sr.c     | 136 ++++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/arm64/sysregs.h |   1 +
 xen/include/asm-arm/gic_v3_defs.h   |   4 ++
 3 files changed, 141 insertions(+)

diff --git a/xen/arch/arm/arm64/vgic-v3-sr.c b/xen/arch/arm/arm64/vgic-v3-sr.c
index 026d64506f..e32ec01f56 100644
--- a/xen/arch/arm/arm64/vgic-v3-sr.c
+++ b/xen/arch/arm/arm64/vgic-v3-sr.c
@@ -33,6 +33,7 @@
 
 #define ICC_IAR1_EL1_SPURIOUS    0x3ff
 #define VGIC_MAX_SPI             1019
+#define VGIC_MIN_LPI             8192
 
 static int vgic_v3_bpr_min(void)
 {
@@ -482,6 +483,137 @@ static void vreg_emulate_iar(struct cpu_user_regs *regs, 
const union hsr hsr)
     vgic_v3_read_iar(regs, hsr);
 }
 
+static int vgic_v3_find_active_lr(int intid, uint64_t *lr_val)
+{
+    int i;
+    unsigned int used_lrs =  gic_get_num_lrs();
+
+    for ( i = 0; i < used_lrs; i++ )
+    {
+        uint64_t val = gicv3_ich_read_lr(i);
+
+        if ( (val & ICH_LR_VIRTUAL_ID_MASK) == intid &&
+            (val & ICH_LR_ACTIVE_BIT) )
+        {
+            *lr_val = val;
+            return i;
+        }
+    }
+
+    *lr_val = ICC_IAR1_EL1_SPURIOUS;
+    return -1;
+}
+
+static int vgic_v3_clear_highest_active_priority(void)
+{
+    int i;
+    uint32_t hap = 0;
+    uint8_t nr_apr_regs = vtr_to_nr_apr_regs(READ_SYSREG32(ICH_VTR_EL2));
+
+    for ( i = 0; i < nr_apr_regs; i++ )
+    {
+        uint32_t ap0, ap1;
+        int c0, c1;
+
+        ap0 = vgic_v3_read_ap0rn(i);
+        ap1 = vgic_v3_read_ap1rn(i);
+        if ( !ap0 && !ap1 )
+        {
+            hap += 32;
+            continue;
+        }
+
+        c0 = ap0 ? __ffs(ap0) : 32;
+        c1 = ap1 ? __ffs(ap1) : 32;
+
+        /* Always clear the LSB, which is the highest priority */
+        if ( c0 < c1 )
+        {
+            ap0 &= ~BIT(c0);
+            vgic_v3_write_ap0rn(ap0, i);
+            hap += c0;
+        }
+        else
+        {
+            ap1 &= ~BIT(c1);
+            vgic_v3_write_ap1rn(ap1, i);
+            hap += c1;
+        }
+
+        /* Rescale to 8 bits of priority */
+        return hap << vgic_v3_bpr_min();
+    }
+
+    return GICV3_IDLE_PRIORITY;
+}
+
+static void vgic_v3_clear_active_lr(int lr, uint64_t lr_val)
+{
+    lr_val &= ~ICH_LR_ACTIVE_BIT;
+    if ( lr_val & ICH_LR_HW )
+    {
+        uint32_t pid;
+
+        pid = (lr_val & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT;
+        WRITE_SYSREG32(pid, ICC_DIR_EL1);
+    }
+    gicv3_ich_write_lr(lr, lr_val);
+}
+
+static void vgic_v3_bump_eoicount(void)
+{
+    uint32_t hcr;
+
+    hcr = READ_SYSREG32(ICH_HCR_EL2);
+    hcr += 1 << ICH_HCR_EOIcount_SHIFT;
+    WRITE_SYSREG32(hcr, ICH_HCR_EL2);
+}
+
+static void vgic_v3_write_eoir(struct cpu_user_regs *regs,
+                               const union hsr hsr)
+{
+    uint32_t vmcr = READ_SYSREG32(ICH_VMCR_EL2);
+    register_t vid = get_user_reg(regs, hsr.sysreg.reg);
+    uint64_t lr_val;
+    uint8_t lr_prio, act_prio;
+    int lr, grp;
+
+    grp = vgic_v3_get_group(hsr);
+
+    /* Drop priority in any case */
+    act_prio = vgic_v3_clear_highest_active_priority();
+
+    /* If EOIing an LPI, no deactivate to be performed */
+    if ( vid >= VGIC_MIN_LPI )
+        return;
+
+    /* EOImode == 1, nothing to be done here */
+    if ( vmcr & ICH_VMCR_EOIM_MASK )
+        return;
+
+    lr = vgic_v3_find_active_lr(vid, &lr_val);
+    if ( lr == -1 )
+    {
+        vgic_v3_bump_eoicount();
+        return;
+    }
+
+    lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
+
+    /* If priorities or group do not match, the guest has fscked-up. */
+    if ( grp != !!(lr_val & ICH_LR_GROUP) ||
+         vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio )
+        return;
+
+    /* Let's now perform the deactivation */
+    vgic_v3_clear_active_lr(lr, lr_val);
+}
+
+static void vreg_emulate_eoi(struct cpu_user_regs *regs, const union hsr hsr)
+{
+    vgic_v3_write_eoir(regs, hsr);
+}
+
 /*
  * returns true if the register is emulated.
  */
@@ -512,6 +644,10 @@ bool vgic_v3_handle_cpuif_access(struct cpu_user_regs 
*regs)
         vreg_emulate_iar(regs, hsr);
         break;
 
+    case HSR_SYSREG_ICC_EOIR1_EL1:
+        vreg_emulate_eoi(regs, hsr);
+        break;
+
     default:
         ret = false;
         break;
diff --git a/xen/include/asm-arm/arm64/sysregs.h 
b/xen/include/asm-arm/arm64/sysregs.h
index 53d2251840..f9110ebf9c 100644
--- a/xen/include/asm-arm/arm64/sysregs.h
+++ b/xen/include/asm-arm/arm64/sysregs.h
@@ -92,6 +92,7 @@
 #define HSR_SYSREG_ICC_BPR1_EL1   HSR_SYSREG(3,0,c12,c12,3)
 #define HSR_SYSREG_ICC_IGRPEN1_EL1 HSR_SYSREG(3,0,c12,c12,7)
 #define HSR_SYSREG_ICC_IAR1_EL1   HSR_SYSREG(3,0,c12,c12,0)
+#define HSR_SYSREG_ICC_EOIR1_EL1  HSR_SYSREG(3,0,c12,c12,1)
 #define HSR_SYSREG_CONTEXTIDR_EL1 HSR_SYSREG(3,0,c13,c0,1)
 
 #define HSR_SYSREG_PMCR_EL0       HSR_SYSREG(3,3,c9,c12,0)
diff --git a/xen/include/asm-arm/gic_v3_defs.h 
b/xen/include/asm-arm/gic_v3_defs.h
index 884fce0fd0..b169e2cb78 100644
--- a/xen/include/asm-arm/gic_v3_defs.h
+++ b/xen/include/asm-arm/gic_v3_defs.h
@@ -170,6 +170,10 @@
 #define ICH_VMCR_ENG0_MASK           (1 << ICH_VMCR_ENG0_SHIFT)
 #define ICH_VMCR_PMR_SHIFT           24
 #define ICH_VMCR_PMR_MASK            (0xffUL << ICH_VMCR_PMR_SHIFT)
+#define ICH_VMCR_EOIM_SHIFT          9
+#define ICH_VMCR_EOIM_MASK           (1 << ICH_VMCR_EOIM_SHIFT)
+#define ICH_HCR_EOIcount_SHIFT       27
+#define ICH_HCR_EOIcount_MASK        (0x1f << ICH_HCR_EOIcount_SHIFT)
 
 #define GICH_LR_VIRTUAL_MASK         0xffff
 #define GICH_LR_VIRTUAL_SHIFT        0
-- 
2.14.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.