|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v3 1/3] arm/mpu: implement setup_virt_paging for MPU system
From: Penny Zheng <Penny.Zheng@xxxxxxx>
Implement setup_virt_paging for aarch64 MPU systems, taking tare of
stage 2 address translation regime, IPA bits, supported VMID length
configuration and vtcr_el2/vstcr_el2 register programming.
Implement also the Armv8-R specific changes to ID_AA64MMFR0_EL1,
related to the supported memory system architecture (PMSA/VMSA)
and check that when MPU is built, the underlying HW is compatible
with PMSA. By default MPU at EL2 and EL1 is required.
Signed-off-by: Penny Zheng <penny.zheng@xxxxxxx>
Signed-off-by: Wei Chen <wei.chen@xxxxxxx>
Signed-off-by: Luca Fancellu <luca.fancellu@xxxxxxx>
Signed-off-by: Hari Limaye <hari.limaye@xxxxxxx>
Signed-off-by: Harry Ramsey <harry.ramsey@xxxxxxx>
---
v3:
- Refactor unused code to more relevant commits.
- Add P2M print information
- Formatting issues
- Update commit message
v2:
- Seperate commit into multiple commits
---
xen/arch/arm/arm64/mpu/p2m.c | 80 +++++++++++++++++++++++-
xen/arch/arm/include/asm/arm64/sysregs.h | 4 ++
xen/arch/arm/include/asm/cpufeature.h | 13 +++-
xen/arch/arm/include/asm/processor.h | 8 +++
4 files changed, 101 insertions(+), 4 deletions(-)
diff --git a/xen/arch/arm/arm64/mpu/p2m.c b/xen/arch/arm/arm64/mpu/p2m.c
index b6d8b2777b58..fda512dc7c8f 100644
--- a/xen/arch/arm/arm64/mpu/p2m.c
+++ b/xen/arch/arm/arm64/mpu/p2m.c
@@ -2,11 +2,89 @@
#include <xen/bug.h>
#include <xen/init.h>
+#include <xen/lib.h>
#include <asm/p2m.h>
void __init setup_virt_paging(void)
{
- BUG_ON("unimplemented");
+ register_t vtcr_el2 = READ_SYSREG(VTCR_EL2);
+ register_t vstcr_el2 = READ_SYSREG(VSTCR_EL2);
+
+ /* PA size */
+ const unsigned int pa_range_info[] = {32, 36, 40, 42, 44, 48, 52, 0,
+ /* Invalid */};
+
+ /*
+ * Restrict "p2m_ipa_bits" if needed. As P2M table is always configured
+ * with IPA bits == PA bits, compare against "pabits".
+ */
+ if ( pa_range_info[system_cpuinfo.mm64.pa_range] < p2m_ipa_bits )
+ p2m_ipa_bits = pa_range_info[system_cpuinfo.mm64.pa_range];
+
+ /*
+ * The MSA and MSA_frac fields in the ID_AA64MMFR0_EL1 register identify
the
+ * memory system configurations supported. In Armv8-R AArch64, the
+ * only permitted value for ID_AA64MMFR0_EL1.MSA is 0b1111.
+ */
+ if ( system_cpuinfo.mm64.msa != MM64_MSA_PMSA_SUPPORT )
+ goto fault;
+
+ /* Permitted values for ID_AA64MMFR0_EL1.MSA_frac are 0b0001 and 0b0010. */
+ if ( (system_cpuinfo.mm64.msa_frac != MM64_MSA_FRAC_PMSA_SUPPORT) &&
+ (system_cpuinfo.mm64.msa_frac != MM64_MSA_FRAC_VMSA_SUPPORT) )
+ goto fault;
+
+ /* Stage 1 EL1&0 translation regime uses PMSAv8 by default */
+ vtcr_el2 &= ~VTCR_MSA;
+
+ /*
+ * Clear VTCR_EL2.NSA bit to configure non-secure stage 2 translation
output
+ * address space to access the Secure PA space as Armv8r only implements
+ * secure state.
+ */
+ vtcr_el2 &= ~VTCR_NSA;
+
+ /*
+ * cpuinfo sanitization makes sure we support 16bits VMID only if all cores
+ * are supporting it.
+ *
+ * Set the VS bit only if 16 bit VIMD is supported.
+ */
+ if ( system_cpuinfo.mm64.vmid_bits == MM64_VMID_16_BITS_SUPPORT )
+ {
+ vtcr_el2 |= VTCR_VS;
+ max_vmid = MAX_VMID_16_BIT;
+ }
+ else
+ vtcr_el2 &= ~VTCR_VS;
+
+ p2m_vmid_allocator_init();
+
+ WRITE_SYSREG(vtcr_el2, VTCR_EL2);
+
+ /*
+ * VSTCR_EL2.SA defines secure stage 2 translation output address space.
+ * To make sure that all stage 2 translations for the Secure PA space
access
+ * the Secure PA space, we keep SA bit as 0.
+ *
+ * VSTCR_EL2.SC is NS check enable bit. To make sure that Stage 2 NS
+ * configuration is checked against stage 1 NS configuration in EL1&0
+ * translation regime for the given address, and generates a fault if they
+ * are different, we set SC bit 1.
+ */
+ vstcr_el2 &= ~VSTCR_EL2_SA;
+ vstcr_el2 |= VSTCR_EL2_SC;
+ WRITE_SYSREG(vstcr_el2, VSTCR_EL2);
+
+ printk("P2M: %d-bit IPA with %d-bit PA and %d-bit VMID\n",
+ p2m_ipa_bits,
+ pa_range_info[system_cpuinfo.mm64.pa_range],
+ ( MAX_VMID == MAX_VMID_16_BIT ) ? 16 : 8);
+
+ return;
+
+ fault:
+ panic("Hardware with no PMSAv8-64 support in any translation regime\n");
}
/*
diff --git a/xen/arch/arm/include/asm/arm64/sysregs.h
b/xen/arch/arm/include/asm/arm64/sysregs.h
index 19d409d3eb5a..f3c11d871e86 100644
--- a/xen/arch/arm/include/asm/arm64/sysregs.h
+++ b/xen/arch/arm/include/asm/arm64/sysregs.h
@@ -462,6 +462,10 @@
#define ZCR_ELx_LEN_SIZE 9
#define ZCR_ELx_LEN_MASK 0x1ff
+/* Virtualization Secure Translation Control Register */
+#define VSTCR_EL2_SA (_AC(0x1,U) << 30)
+#define VSTCR_EL2_SC (_AC(0x1,U) << 20)
+
#ifdef CONFIG_MPU
/*
* The Armv8-R AArch64 architecture always executes code in Secure
diff --git a/xen/arch/arm/include/asm/cpufeature.h
b/xen/arch/arm/include/asm/cpufeature.h
index 13353c8e1ad1..bf902a397068 100644
--- a/xen/arch/arm/include/asm/cpufeature.h
+++ b/xen/arch/arm/include/asm/cpufeature.h
@@ -248,6 +248,12 @@ struct cpuinfo_arm {
unsigned long tgranule_16K:4;
unsigned long tgranule_64K:4;
unsigned long tgranule_4K:4;
+#ifdef CONFIG_MPU
+ unsigned long __res0:16;
+ unsigned long msa:4;
+ unsigned long msa_frac:4;
+ unsigned long __res1:8;
+#else
unsigned long tgranule_16k_2:4;
unsigned long tgranule_64k_2:4;
unsigned long tgranule_4k_2:4;
@@ -255,6 +261,7 @@ struct cpuinfo_arm {
unsigned long __res0:8;
unsigned long fgt:4;
unsigned long ecv:4;
+#endif
/* MMFR1 */
unsigned long hafdbs:4;
@@ -267,13 +274,13 @@ struct cpuinfo_arm {
unsigned long xnx:4;
unsigned long twed:4;
unsigned long ets:4;
- unsigned long __res1:4;
+ unsigned long __res2:4;
unsigned long afp:4;
- unsigned long __res2:12;
+ unsigned long __res3:12;
unsigned long ecbhb:4;
/* MMFR2 */
- unsigned long __res3:64;
+ unsigned long __res4:64;
};
} mm64;
diff --git a/xen/arch/arm/include/asm/processor.h
b/xen/arch/arm/include/asm/processor.h
index 1a48c9ff3b39..895d7cd50244 100644
--- a/xen/arch/arm/include/asm/processor.h
+++ b/xen/arch/arm/include/asm/processor.h
@@ -403,6 +403,9 @@
#define VTCR_RES1 (_AC(1,UL)<<31)
+#define VTCR_MSA (_AC(0x1,UL)<<31)
+#define VTCR_NSA (_AC(0x1,UL)<<30)
+
/* HCPTR Hyp. Coprocessor Trap Register */
#define HCPTR_TAM ((_AC(1,U)<<30))
#define HCPTR_TTA ((_AC(1,U)<<20)) /* Trap trace registers */
@@ -464,6 +467,11 @@
#define MM64_VMID_16_BITS_SUPPORT 0x2
#endif
+#define MM64_MSA_PMSA_SUPPORT 0xf
+#define MM64_MSA_FRAC_NONE_SUPPORT 0x0
+#define MM64_MSA_FRAC_PMSA_SUPPORT 0x1
+#define MM64_MSA_FRAC_VMSA_SUPPORT 0x2
+
#ifndef __ASSEMBLER__
extern register_t __cpu_logical_map[];
--
2.34.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |