[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 06/17] arm64: Add ICV_IAR1_EL1 handler
This patch is ported to xen from linux commit 132a324ab62fe4fb8d6dcc2ab4eddb0e93b69afe. KVM: arm64: vgic-v3: Add ICV_IAR1_EL1 handler Add a handler for reading the guest's view of the ICC_IAR1_EL1 register. This involves finding the highest priority Group-1 interrupt, checking against both PMR and the active group priority, activating the interrupt and setting the group priority as active. Signed-off-by: Manish Jaggi <manish.jaggi@xxxxxxxxxx> --- xen/arch/arm/arm64/vgic-v3-sr.c | 194 ++++++++++++++++++++++++++++++++++++ xen/include/asm-arm/arm64/sysregs.h | 1 + xen/include/asm-arm/gic_v3_defs.h | 17 ++++ 3 files changed, 212 insertions(+) diff --git a/xen/arch/arm/arm64/vgic-v3-sr.c b/xen/arch/arm/arm64/vgic-v3-sr.c index fc5246539e..026d64506f 100644 --- a/xen/arch/arm/arm64/vgic-v3-sr.c +++ b/xen/arch/arm/arm64/vgic-v3-sr.c @@ -22,8 +22,17 @@ #include <asm/regs.h> #include <asm/system.h> #include <asm/traps.h> +#include <asm/vtimer.h> +#include <xen/sched.h> #define vtr_to_nr_pre_bits(v) ((((uint32_t)(v) >> 26) & 7) + 1) +#define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5)) + +#define ESR_ELx_SYS64_ISS_CRM_SHIFT 1 +#define ESR_ELx_SYS64_ISS_CRM_MASK (0xf << ESR_ELx_SYS64_ISS_CRM_SHIFT) + +#define ICC_IAR1_EL1_SPURIOUS 0x3ff +#define VGIC_MAX_SPI 1019 static int vgic_v3_bpr_min(void) { @@ -292,6 +301,187 @@ static void gicv3_ich_write_lr(int lr, uint64_t val) isb(); } +static int vgic_v3_get_group(const union hsr hsr) +{ + uint8_t crm = (hsr.bits & ESR_ELx_SYS64_ISS_CRM_MASK) >> + ESR_ELx_SYS64_ISS_CRM_SHIFT; + + return crm != 8; +} + +static unsigned int gic_get_num_lrs(void) +{ + uint32_t vtr; + + vtr = READ_SYSREG32(ICH_VTR_EL2); + return (vtr & GICH_VTR_NRLRGS) + 1; +} + +static int vgic_v3_highest_priority_lr(struct cpu_user_regs *regs, + uint32_t vmcr, uint64_t *lr_val) +{ + unsigned int i, lr = -1; + unsigned int used_lrs = gic_get_num_lrs(); + uint8_t priority = GICV3_IDLE_PRIORITY; + + for ( i = 0; i < used_lrs; i++ ) + { + uint64_t val = gicv3_ich_read_lr(i); + uint8_t lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; + + /* Not pending in the state? */ + if ( (val & ICH_LR_STATE) != ICH_LR_PENDING_BIT ) + continue; + + /* Group-0 interrupt, but Group-0 disabled? */ + if ( !(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK) ) + continue; + + /* Group-1 interrupt, but Group-1 disabled? */ + if ( (val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK) ) + continue; + + /* Not the highest priority? */ + if ( lr_prio >= priority ) + continue; + + /* This is a candidate */ + priority = lr_prio; + *lr_val = val; + lr = i; + } + + if ( lr == -1 ) + *lr_val = ICC_IAR1_EL1_SPURIOUS; + + return lr; +} + +static int vgic_v3_get_highest_active_priority(void) +{ + unsigned int i; + uint32_t hap = 0; + uint8_t nr_apr_regs = vtr_to_nr_apr_regs(READ_SYSREG32(ICH_VTR_EL2)); + + for ( i = 0; i < nr_apr_regs; i++ ) + { + uint32_t val; + + /* + * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers + * contain the active priority levels for this VCPU + * for the maximum number of supported priority + * levels, and we return the full priority level only + * if the BPR is programmed to its minimum, otherwise + * we return a combination of the priority level and + * subpriority, as determined by the setting of the + * BPR, but without the full subpriority. + */ + val = vgic_v3_read_ap0rn(i); + val |= vgic_v3_read_ap1rn(i); + if ( !val ) + { + hap += 32; + continue; + } + + return (hap + __ffs(val)) << vgic_v3_bpr_min(); + } + + return GICV3_IDLE_PRIORITY; +} + +/* + * Convert a priority to a preemption level, taking the relevant BPR + * into account by zeroing the sub-priority bits. + */ +static uint8_t vgic_v3_pri_to_pre(uint8_t pri, uint32_t vmcr, int grp) +{ + unsigned int bpr; + + if ( !grp ) + bpr = vgic_v3_get_bpr0(vmcr) + 1; + else + bpr = vgic_v3_get_bpr1(vmcr); + + return pri & (GENMASK(7, 0) << bpr); +} + +/* + * The priority value is independent of any of the BPR values, so we + * normalize it using the minumal BPR value. This guarantees that no + * matter what the guest does with its BPR, we can always set/get the + * same value of a priority. + */ +static void vgic_v3_set_active_priority(uint8_t pri, uint32_t vmcr, int grp) +{ + uint8_t pre, ap; + uint32_t val; + int apr; + + pre = vgic_v3_pri_to_pre(pri, vmcr, grp); + ap = pre >> vgic_v3_bpr_min(); + apr = ap / 32; + + if ( !grp ) + { + val = vgic_v3_read_ap0rn(apr); + vgic_v3_write_ap0rn(val | BIT(ap % 32), apr); + } + else + { + val = vgic_v3_read_ap1rn(apr); + vgic_v3_write_ap1rn(val | BIT(ap % 32), apr); + } +} + +static void vgic_v3_read_iar(struct cpu_user_regs *regs, + const union hsr hsr) +{ + uint64_t lr_val; + uint8_t lr_prio, pmr; + int lr, grp; + int regidx = hsr.sysreg.reg; + + uint32_t vmcr = READ_SYSREG32(ICH_VMCR_EL2); + grp = vgic_v3_get_group(hsr); + + lr = vgic_v3_highest_priority_lr(regs, vmcr, &lr_val); + if ( lr < 0 ) + goto spurious; + + if ( grp != !!(lr_val & ICH_LR_GROUP) ) + goto spurious; + + pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; + lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; + if ( pmr <= lr_prio ) + goto spurious; + + if ( vgic_v3_get_highest_active_priority() <= + vgic_v3_pri_to_pre(lr_prio, vmcr, grp) ) + goto spurious; + + lr_val &= ~ICH_LR_STATE; + /* No active state for LPIs */ + if ( (lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI ) + lr_val |= ICH_LR_ACTIVE_BIT; + + gicv3_ich_write_lr(lr, lr_val); + vgic_v3_set_active_priority(lr_prio, vmcr, grp); + set_user_reg(regs, regidx, lr_val & ICH_LR_VIRTUAL_ID_MASK); + + return; + +spurious: + set_user_reg(regs, regidx, ICC_IAR1_EL1_SPURIOUS); +} + +static void vreg_emulate_iar(struct cpu_user_regs *regs, const union hsr hsr) +{ + vgic_v3_read_iar(regs, hsr); +} + /* * returns true if the register is emulated. */ @@ -318,6 +508,10 @@ bool vgic_v3_handle_cpuif_access(struct cpu_user_regs *regs) vreg_emulate_igrpen1(regs, hsr); break; + case HSR_SYSREG_ICC_IAR1_EL1: + vreg_emulate_iar(regs, hsr); + break; + default: ret = false; break; diff --git a/xen/include/asm-arm/arm64/sysregs.h b/xen/include/asm-arm/arm64/sysregs.h index 731cabc74a..53d2251840 100644 --- a/xen/include/asm-arm/arm64/sysregs.h +++ b/xen/include/asm-arm/arm64/sysregs.h @@ -91,6 +91,7 @@ #define HSR_SYSREG_ICC_SRE_EL1 HSR_SYSREG(3,0,c12,c12,5) #define HSR_SYSREG_ICC_BPR1_EL1 HSR_SYSREG(3,0,c12,c12,3) #define HSR_SYSREG_ICC_IGRPEN1_EL1 HSR_SYSREG(3,0,c12,c12,7) +#define HSR_SYSREG_ICC_IAR1_EL1 HSR_SYSREG(3,0,c12,c12,0) #define HSR_SYSREG_CONTEXTIDR_EL1 HSR_SYSREG(3,0,c13,c0,1) #define HSR_SYSREG_PMCR_EL0 HSR_SYSREG(3,3,c9,c12,0) diff --git a/xen/include/asm-arm/gic_v3_defs.h b/xen/include/asm-arm/gic_v3_defs.h index ff8bda37d1..884fce0fd0 100644 --- a/xen/include/asm-arm/gic_v3_defs.h +++ b/xen/include/asm-arm/gic_v3_defs.h @@ -67,6 +67,7 @@ */ #define GICV3_GICD_IIDR_VAL 0x34c #define GICV3_GICR_IIDR_VAL GICV3_GICD_IIDR_VAL +#define GICV3_IDLE_PRIORITY 0xff #define GICR_CTLR (0x0000) #define GICR_IIDR (0x0004) @@ -165,6 +166,10 @@ #define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT) #define ICH_VMCR_ENG1_SHIFT 1 #define ICH_VMCR_ENG1_MASK (1 << ICH_VMCR_ENG1_SHIFT) +#define ICH_VMCR_ENG0_SHIFT 0 +#define ICH_VMCR_ENG0_MASK (1 << ICH_VMCR_ENG0_SHIFT) +#define ICH_VMCR_PMR_SHIFT 24 +#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) #define GICH_LR_VIRTUAL_MASK 0xffff #define GICH_LR_VIRTUAL_SHIFT 0 @@ -182,6 +187,18 @@ #define GICH_LR_GRP1 (1UL<<60) #define GICH_LR_HW (1UL<<61) +#define ICH_LR_PRIORITY_SHIFT 48 +#define ICH_LR_PRIORITY_MASK (0xffULL << ICH_LR_PRIORITY_SHIFT) +#define ICH_LR_EOI (1ULL << 41) +#define ICH_LR_GROUP (1ULL << 60) +#define ICH_LR_HW (1ULL << 61) +#define ICH_LR_STATE (3ULL << 62) +#define ICH_LR_PENDING_BIT (1ULL << 62) +#define ICH_LR_ACTIVE_BIT (1ULL << 63) +#define ICH_LR_PHYS_ID_SHIFT 32 +#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT) +#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1) + #define GICH_VTR_NRLRGS 0x3f #define GICH_VTR_PRIBITS_MASK 0x7 #define GICH_VTR_PRIBITS_SHIFT 29 -- 2.14.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |