[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Minios-devel] [UNIKRAFT PATCHv5 32/46] plat/kvm: Enable MMU for Arm64


  • To: <minios-devel@xxxxxxxxxxxxxxxxxxxx>, <simon.kuenzer@xxxxxxxxx>
  • From: Wei Chen <wei.chen@xxxxxxx>
  • Date: Fri, 10 Aug 2018 07:08:22 +0000
  • Authentication-results: spf=fail (sender IP is 40.67.248.234) smtp.mailfrom=arm.com; lists.xenproject.org; dkim=none (message not signed) header.d=none;lists.xenproject.org; dmarc=none action=none header.from=arm.com;
  • Cc: Kaly.Xin@xxxxxxx, nd@xxxxxxx, wei.chen@xxxxxxx
  • Delivery-date: Fri, 10 Aug 2018 07:10:39 +0000
  • List-id: Mini-os development list <minios-devel.lists.xenproject.org>
  • Nodisclaimer: True
  • Spamdiagnosticmetadata: NSPM
  • Spamdiagnosticoutput: 1:99

From: Wei Chen <Wei.Chen@xxxxxxx>

QEMU/KVM provides a 1TB physical address for Arm64. In this case,
we should use 40-bit virtual address to map physical address.
In this patch, we enable the MMU to access memory with virtual
address.

Signed-off-by: Wei Chen <Wei.Chen@xxxxxxx>
---
 plat/common/include/arm/arm64/cpu_defs.h | 134 +++++++++++++++++++++++
 plat/kvm/arm/entry64.S                   |  25 ++++-
 plat/kvm/arm/pagetable.S                 |  56 ++++++++++
 3 files changed, 214 insertions(+), 1 deletion(-)

diff --git a/plat/common/include/arm/arm64/cpu_defs.h 
b/plat/common/include/arm/arm64/cpu_defs.h
index b8f207f..9fefcca 100644
--- a/plat/common/include/arm/arm64/cpu_defs.h
+++ b/plat/common/include/arm/arm64/cpu_defs.h
@@ -34,6 +34,13 @@
 #ifndef __CPU_ARM_64_DEFS_H__
 #define __CPU_ARM_64_DEFS_H__
 
+/*
+ * The supported virtual address bits.
+ * We will do 1:1 VA to PA Mapping, so we define the same address size
+ * for VA and PA. 1TB size for Virtual and Physical Address Space.
+ */
+#define VIRT_BITS 40
+
 /*
  * CLIDR_EL1, Cache Level ID Register
  * Identifies the type of cache, or caches, that are implemented at each
@@ -57,6 +64,24 @@
 #define DCCISW                 0x1
 #define DCCSW                  0x2
 
+/* Registers and Bits definitions for MMU */
+/* MAIR_EL1 - Memory Attribute Indirection Register */
+#define MAIR_ATTR_MASK(idx)    (0xff << ((n)* 8))
+#define MAIR_ATTR(attr, idx)   ((attr) << ((idx) * 8))
+
+/* Device-nGnRnE memory */
+#define MAIR_DEVICE_nGnRnE     0x00
+/* Device-nGnRE memory */
+#define MAIR_DEVICE_nGnRE      0x04
+/* Device-GRE memory */
+#define MAIR_DEVICE_GRE                0x0C
+/* Outer Non-cacheable + Inner Non-cacheable */
+#define MAIR_NORMAL_NC         0x44
+/* Outer + Inner Write-through non-transient */
+#define MAIR_NORMAL_WT         0xbb
+/* Outer + Inner Write-back non-transient */
+#define MAIR_NORMAL_WB         0xff
+
 /*
  * Memory types, these values are the indexs of the attributes
  * that defined in MAIR_EL1.
@@ -68,6 +93,115 @@
 #define NORMAL_WT      4
 #define NORMAL_WB      5
 
+#define MAIR_INIT_ATTR \
+               (MAIR_ATTR(MAIR_DEVICE_nGnRnE, DEVICE_nGnRnE) | \
+               MAIR_ATTR(MAIR_DEVICE_nGnRE, DEVICE_nGnRE) |   \
+               MAIR_ATTR(MAIR_DEVICE_GRE, DEVICE_GRE) |       \
+               MAIR_ATTR(MAIR_NORMAL_NC, NORMAL_NC) |         \
+               MAIR_ATTR(MAIR_NORMAL_WB, NORMAL_WT) |         \
+               MAIR_ATTR(MAIR_NORMAL_WT, NORMAL_WB))
+
+/* TCR_EL1 - Translation Control Register */
+#define TCR_ASID_16    (1 << 36)
+
+#define TCR_IPS_SHIFT  32
+#define TCR_IPS_32BIT  (0 << TCR_IPS_SHIFT)
+#define TCR_IPS_36BIT  (1 << TCR_IPS_SHIFT)
+#define TCR_IPS_40BIT  (2 << TCR_IPS_SHIFT)
+#define TCR_IPS_42BIT  (3 << TCR_IPS_SHIFT)
+#define TCR_IPS_44BIT  (4 << TCR_IPS_SHIFT)
+#define TCR_IPS_48BIT  (5 << TCR_IPS_SHIFT)
+
+#define TCR_TG1_SHIFT  30
+#define TCR_TG1_16K    (1 << TCR_TG1_SHIFT)
+#define TCR_TG1_4K     (2 << TCR_TG1_SHIFT)
+#define TCR_TG1_64K    (3 << TCR_TG1_SHIFT)
+
+#define TCR_TG0_SHIFT  14
+#define TCR_TG0_4K     (0 << TCR_TG0_SHIFT)
+#define TCR_TG0_64K    (1 << TCR_TG0_SHIFT)
+#define TCR_TG0_16K    (2 << TCR_TG0_SHIFT)
+
+#define TCR_SH1_SHIFT  28
+#define TCR_SH1_IS     (0x3 << TCR_SH1_SHIFT)
+#define TCR_ORGN1_SHIFT        26
+#define TCR_ORGN1_WBWA (0x1 << TCR_ORGN1_SHIFT)
+#define TCR_IRGN1_SHIFT        24
+#define TCR_IRGN1_WBWA (0x1 << TCR_IRGN1_SHIFT)
+#define TCR_SH0_SHIFT  12
+#define TCR_SH0_IS     (0x3 << TCR_SH0_SHIFT)
+#define TCR_ORGN0_SHIFT        10
+#define TCR_ORGN0_WBWA (0x1 << TCR_ORGN0_SHIFT)
+#define TCR_IRGN0_SHIFT        8
+#define TCR_IRGN0_WBWA (0x1 << TCR_IRGN0_SHIFT)
+
+#define TCR_CACHE_ATTRS ((TCR_IRGN0_WBWA | TCR_IRGN1_WBWA) | \
+                       (TCR_ORGN0_WBWA | TCR_ORGN1_WBWA))
+
+#define TCR_SMP_ATTRS  (TCR_SH0_IS | TCR_SH1_IS)
+
+#define TCR_T1SZ_SHIFT 16
+#define TCR_T0SZ_SHIFT 0
+#define TCR_T1SZ(x)    ((x) << TCR_T1SZ_SHIFT)
+#define TCR_T0SZ(x)    ((x) << TCR_T0SZ_SHIFT)
+#define TCR_TxSZ(x)    (TCR_T1SZ(x) | TCR_T0SZ(x))
+
+#define TCR_INIT_FLAGS (TCR_TxSZ(64 - VIRT_BITS) | TCR_ASID_16 | \
+                       TCR_TG0_4K | TCR_CACHE_ATTRS | TCR_SMP_ATTRS)
+
+/* SCTLR_EL1 - System Control Register */
+#define SCTLR_M                (_AC(1, UL) << 0)       /* MMU enable */
+#define SCTLR_A                (_AC(1, UL) << 1)       /* Alignment check 
enable */
+#define SCTLR_C                (_AC(1, UL) << 2)       /* Data/unified cache 
enable */
+#define SCTLR_SA       (_AC(1, UL) << 3)       /* Stack alignment check enable 
*/
+#define SCTLR_SA0      (_AC(1, UL) << 4)       /* Stack Alignment Check Enable 
for EL0 */
+#define SCTLR_CP15BEN  (_AC(1, UL) << 5)       /* System instruction memory 
barrier enable */
+#define SCTLR_ITD      (_AC(1, UL) << 7)       /* IT disable */
+#define SCTLR_SED      (_AC(1, UL) << 8)       /* SETEND instruction disable */
+#define SCTLR_UMA      (_AC(1, UL) << 9)       /* User mask access */
+#define SCTLR_I                (_AC(1, UL) << 12)      /* Instruction access 
Cacheability control */
+#define SCTLR_DZE      (_AC(1, UL) << 14)      /* Traps EL0 DC ZVA 
instructions to EL1 */
+#define SCTLR_UCT      (_AC(1, UL) << 15)      /* Traps EL0 accesses to the 
CTR_EL0 to EL1 */
+#define SCTLR_nTWI     (_AC(1, UL) << 16)      /* Don't trap EL0 WFI to EL1 */
+#define SCTLR_nTWE     (_AC(1, UL) << 18)      /* Don't trap EL0 WFE to EL1 */
+#define SCTLR_WXN      (_AC(1, UL) << 19)      /* Write permission implies XN 
*/
+#define SCTLR_EOE      (_AC(1, UL) << 24)      /* Endianness of data accesses 
at EL0 */
+#define SCTLR_EE       (_AC(1, UL) << 25)      /* Endianness of data accesses 
at EL1 */
+#define SCTLR_UCI      (_AC(1, UL) << 26)      /* Traps EL0 cache instructions 
to EL1 */
+
+/* Reserve to 1 */
+#define SCTLR_RES1_B11 (_AC(1, UL) << 11)
+#define SCTLR_RES1_B20 (_AC(1, UL) << 20)
+#define SCTLR_RES1_B22 (_AC(1, UL) << 22)
+#define SCTLR_RES1_B23 (_AC(1, UL) << 23)
+#define SCTLR_RES1_B28 (_AC(1, UL) << 28)
+#define SCTLR_RES1_B29 (_AC(1, UL) << 29)
+
+/* Reserve to 0 */
+#define SCTLR_RES0_B6  (_AC(1, UL) << 6)
+#define SCTLR_RES0_B10 (_AC(1, UL) << 10)
+#define SCTLR_RES0_B13 (_AC(1, UL) << 13)
+#define SCTLR_RES0_B17 (_AC(1, UL) << 17)
+#define SCTLR_RES0_B21 (_AC(1, UL) << 21)
+#define SCTLR_RES0_B27 (_AC(1, UL) << 27)
+#define SCTLR_RES0_B30 (_AC(1, UL) << 30)
+#define SCTLR_RES0_B31 (_AC(1, UL) << 31)
+
+/* Bits to set */
+#define SCTLR_SET_BITS \
+               (SCTLR_UCI | SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | \
+               SCTLR_DZE | SCTLR_I | SCTLR_SED | SCTLR_SA0 | SCTLR_SA | \
+               SCTLR_C | SCTLR_M | SCTLR_CP15BEN | SCTLR_RES1_B11 | \
+               SCTLR_RES1_B20 | SCTLR_RES1_B22 | SCTLR_RES1_B23 | \
+               SCTLR_RES1_B28 | SCTLR_RES1_B29)
+
+/* Bits to clear */
+#define SCTLR_CLEAR_BITS \
+               (SCTLR_EE | SCTLR_EOE | SCTLR_WXN | SCTLR_UMA | \
+               SCTLR_ITD | SCTLR_A | SCTLR_RES0_B6 | SCTLR_RES0_B10 | \
+               SCTLR_RES0_B13 | SCTLR_RES0_B17 | SCTLR_RES0_B21 | \
+               SCTLR_RES0_B27 | SCTLR_RES0_B30 | SCTLR_RES0_B31)
+
 /*
  * Definitions for Block and Page descriptor attributes
  */
diff --git a/plat/kvm/arm/entry64.S b/plat/kvm/arm/entry64.S
index d054e5c..fdbd295 100644
--- a/plat/kvm/arm/entry64.S
+++ b/plat/kvm/arm/entry64.S
@@ -34,6 +34,7 @@
 #include <uk/arch/limits.h>
 #include <asm.h>
 #include <mm.h>
+#include <arm/cpu_defs.h>
 
 .data
 .globl _dtb
@@ -68,10 +69,32 @@ ENTRY(_libkvmplat_entry)
 
        mov sp, x27
 
-       /* Setup excetpion vector table address before enable MMU */
+       /*
+        * Disable the MMU. We may have entered the kernel with it on and
+        * will need to update the tables later. If this has been set up
+        * with anything other than a VA == PA map then this will fail,
+        * but in this case the code to find where we are running from
+        * would have also failed.
+        */
+       dsb sy
+       mrs x2, sctlr_el1
+       bic x2, x2, #SCTLR_M
+       msr sctlr_el1, x2
+       isb
+
+       /* Set the context id */
+       msr contextidr_el1, xzr
+
+       /* Create a pagetable to do PA == VA mapping */
+       bl create_pagetables
+
+       /* Setup exception vector table address before enable MMU */
        ldr x29, =vector_table
        msr VBAR_EL1, x29
 
+       /* Enable the mmu */
+       bl start_mmu
+
        /* Load dtb address to x0 as a parameter */
        ldr x0, =_dtb
        b _libkvmplat_start
diff --git a/plat/kvm/arm/pagetable.S b/plat/kvm/arm/pagetable.S
index 4c8419f..ee4cc43 100644
--- a/plat/kvm/arm/pagetable.S
+++ b/plat/kvm/arm/pagetable.S
@@ -182,6 +182,62 @@ ENTRY(create_pagetables)
        ret
 END(create_pagetables)
 
+ENTRY(start_mmu)
+       /*
+        * Using dsb here to guarantee the create_pagetables has
+        * been done.
+        */
+       dsb sy
+
+       /* Load ttbr0, pagetable starts from _end */
+       ldr x27, =_end
+       msr ttbr0_el1, x27
+       isb
+
+       /* Clear the Monitor Debug System control register */
+       msr mdscr_el1, xzr
+
+       /* Invalidate the TLB to avoid stale one */
+       tlbi vmalle1
+       dsb nsh
+
+       ldr x2, =MAIR_INIT_ATTR
+       msr mair_el1, x2
+
+       /*
+        * Setup TCR according to PARange bits from ID_AA64MMFR0_EL1.
+        */
+       ldr x2, =TCR_INIT_FLAGS
+       mrs x3, id_aa64mmfr0_el1
+       bfi x2, x3, #32, #3
+       msr tcr_el1, x2
+
+
+       /* save lr */
+       mov x22, x30
+
+       /*
+        * Invalidate the data cache to avoid using invalid data that
+        * existed in dcache.
+        */
+       mov x0, #DCISW
+       bl dcsw_op_all
+
+       /* Setup SCTLR */
+       ldr x2, =SCTLR_SET_BITS
+       ldr x3, =SCTLR_CLEAR_BITS
+       mrs x1, sctlr_el1
+       bic x1, x1, x3  /* Clear the required bits */
+       orr x1, x1, x2  /* Set the required bits */
+       msr sctlr_el1, x1
+       isb
+
+       /* restore lr */
+       mov x30, x22
+
+       ret
+END(start_mmu)
+
 /*
  * Builds an L0 -> L1 table descriptor
  *
-- 
2.17.1


_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.