[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Minios-devel] [PATCH v3 06/43] arm64: add the boot code



This patch adds the boot code for arm64:
    0.) add the header which contains all the macros to setup the page table
    1.) init the MAIR/TCR for 48 bit virtual address.
    2.) setup the page table for the code section.
    3.) enable the MMU

This patch refers to Chen Baozi's patch:
     "Initial codes for arm64"

Signed-off-by: Huang Shijie <shijie.huang@xxxxxxx>
---
 arch/arm/arm64/arm64.S        | 286 ++++++++++++++++++++++++++++++++++++++++++
 arch/arm/arm64/asm.h          |  18 +++
 include/arm/arm64/pagetable.h | 108 ++++++++++++++++
 3 files changed, 412 insertions(+)
 create mode 100644 arch/arm/arm64/arm64.S
 create mode 100644 arch/arm/arm64/asm.h
 create mode 100644 include/arm/arm64/pagetable.h

diff --git a/arch/arm/arm64/arm64.S b/arch/arm/arm64/arm64.S
new file mode 100644
index 0000000..b454cc6
--- /dev/null
+++ b/arch/arm/arm64/arm64.S
@@ -0,0 +1,286 @@
+#include "asm.h"
+#include <arch_limits.h>
+#include <arm64/pagetable.h>
+#include <xen/xen.h>
+
+/* This macro will use the x0/x1/x2/x16 */
+#define PRINT(_s)                              \
+    adr     x2, 97f;                           \
+    adr     x1, 98f;                           \
+    sub     x1, x1, x2;                        \
+    mov     x0, #CONSOLEIO_write;              \
+    mov     x16, #__HYPERVISOR_console_io;     \
+    hvc     #XEN_HYPERCALL_TAG;                \
+    b       99f;                               \
+97: .asciz _s;                                 \
+98: ;                                          \
+    .align  2;                                 \
+99:                                            \
+
+    .data
+    .globl _boot_stack
+    .globl boot_l0_pgtable
+    .globl boot_l1_pgtable
+    .globl boot_l2_pgtable
+    .globl idmap_l0_pgtable
+    .globl idmap_l1_pgtable
+
+    .align 12
+boot_l0_pgtable:
+    .fill  PAGE_SIZE,1,0
+boot_l1_pgtable:
+    .fill  PAGE_SIZE,1,0
+boot_l2_pgtable:
+    .fill  PAGE_SIZE,1,0
+idmap_l0_pgtable:
+    .fill  PAGE_SIZE,1,0
+idmap_l1_pgtable:
+    .fill  PAGE_SIZE,1,0
+
+    .align 12
+_boot_stack:
+    .fill  __STACK_SIZE,1,0
+stack_end:
+
+/*
+ * Kernel startup entry point.
+ *
+ * Please refer to linux kernel file Documentation/arm64/booting.txt
+ * for the header format.
+ */
+    .text
+
+    b       _start                  /* branch to kernel start, magic */
+    .long   0                       /* reserved */
+    .quad   0x0                     /* Image load offset from start of RAM */
+    .quad   _end - _start           /* Effective Image size */
+    .quad   2                       /* kernel flags: LE, 4K page size */
+    .quad   0                       /* reserved */
+    .quad   0                       /* reserved */
+    .quad   0                       /* reserved */
+    .byte   0x41                    /* Magic number, "ARM\x64" */
+    .byte   0x52
+    .byte   0x4d
+    .byte   0x64
+    .long   0                       /* reserved */
+
+/*
+ * Primary CPU general-purpose register settings
+ * x0 = physical address of device tree blob (dtb) in system RAM.
+ * x1 = 0 (reserved for future use)
+ * x2 = 0 (reserved for future use)
+ * x3 = 0 (reserved for future use)
+ *
+ * The registers used by _start:
+ * x20 - FDT pointer
+ * x22 - offset between PA and VA
+ */
+ENTRY(_start)
+    /* Save the FDT pointer */
+    mov     x20, x0
+
+    /* Calculate where we are */
+    bl      _calc_offset
+
+    PRINT("- Mini-OS booting -\n")
+
+    PRINT("- Setup CPU -\n")
+    /* Setup CPU for turning on the MMU. */
+    bl      _setup_cpu
+
+    PRINT("- Setup booting pagetable -\n")
+    /* Setup the initial page table. */
+    bl      _setup_initial_pgtable
+    mov     x19, x0
+
+    /* Setup the identity mapping */
+    bl      _setup_idmap_pgtable
+
+    /* Load TTBRx */
+    msr     ttbr1_el1, x19
+    msr     ttbr0_el1, x0
+    isb
+
+    /* Turning on MMU */
+    tlbi    vmalle1
+    dsb     nsh
+    isb
+    ldr     x1, =(SCTLR_M | SCTLR_C | SCTLR_I)
+    msr     sctlr_el1, x1
+    isb
+
+    PRINT("- MMU on -\n")
+    ldr     x0, =mmu_on
+    br      x0
+
+mmu_on:
+    /* Do not use the TTBR0_EL1 any more */
+    mrs     x19, tcr_el1
+    add     x19, x19, TCR_EPD0
+    msr     tcr_el1, x19
+
+    /* Setup stack */
+    PRINT("- Setup stack -\n")
+    ldr     x1, =stack_end
+    mov     sp, x1
+
+    PRINT("- Jumping to C entry -\n")
+    mov     x0, x20                  /* x0 <- device tree (physical address) */
+    mov     x1, x22                  /* x1 <- phys_offset */
+
+    b      arch_init
+ENDPROC(_start)
+
+/*
+ * Get the phys-offset, and save it in x22
+ */
+_calc_offset:
+    ldr     x22, =_start             /* x0 := vaddr(_start)  */
+    adr     x21, _start              /* x21 := paddr(_start) */
+    sub     x22, x21, x22            /* x22 := phys-offset (paddr - vaddr) */
+    ret
+
+/*
+ * Setup the memory region attribute;
+ * Setup the TCR.
+ */
+_setup_cpu:
+    /*
+     * Setup memory attribute type tables
+     *
+     * Memory region attributes for LPAE:
+     *
+     *   n = AttrIndx[2:0]
+     *                      n       MAIR
+     *   DEVICE_nGnRnE      000     00000000 (0x00)
+     *   DEVICE_nGnRE       001     00000100 (0x04)
+     *   DEVICE_GRE         010     00001100 (0x0c)
+     *   NORMAL_NC          011     01000100 (0x44)
+     *   NORMAL             100     11111111 (0xff)
+     */
+    ldr     x0, =(SET_MAIR(0x00, MEM_DEVICE_nGnRnE) | \
+                  SET_MAIR(0x04, MEM_DEVICE_nGnRE)  | \
+                  SET_MAIR(0x0c, MEM_DEVICE_GRE)    | \
+                  SET_MAIR(0x44, MEM_NORMAL_NC)     | \
+                  SET_MAIR(0xff, MEM_NORMAL))
+    msr     mair_el1, x0
+
+    /*
+     * Setup translation control register (TCR)
+     */
+    ldr     x0, =(TCR_TxSZ(VA_BITS) | TCR_ASID16 | TCR_TG1_4K | \
+                  TCR_IRGN_WBWA | TCR_ORGN_WBWA | TCR_SHARED | TCR_IPS_48BIT)
+    msr     tcr_el1, x0
+
+    ret
+
+/*
+ * Setup the page table mapping for @addr at @level with @prot.
+ *
+ * Note: x22 stores the offset between virtual address and physical address.
+ */
+.macro set_page_table, addr, level, prot
+    /* Find the table index in @level, save it in x3  */
+.if \level == 0
+    lsr     x3, \addr, #L0_SHIFT
+    adr     x8, boot_l1_pgtable
+    adr     x11, boot_l0_pgtable
+.endif
+
+.if \level == 1
+    lsr     x3, \addr, #L1_SHIFT
+    adr     x8, boot_l2_pgtable
+    adr     x11, boot_l1_pgtable
+.endif
+
+.if \level == 2
+    lsr     x3, \addr, #L2_SHIFT
+    adr     x11, boot_l2_pgtable
+    /* Get the physical address, the @addr should be 2M aligned. */
+    add     x8, \addr, x22
+.endif
+
+    and     x3, x3, #Ln_ADDR_MASK
+
+    /* Build the page table entry */
+    ldr     x7, = \prot
+    lsr     x9, x8, #PAGE_SHIFT
+    orr     x7, x7, x9, lsl #PAGE_SHIFT
+
+    /* Store entry */
+    str     x7, [x11, x3, lsl #3]
+.endm
+
+/*
+ * Setup the mapping for code section
+ *
+ * => null
+ * <= x0 -> for TTBR1_EL1
+ */
+_setup_initial_pgtable:
+    /* Start to map the code */
+    ldr     x0, =_text                 /* x0 := vaddr(_text)            */
+    ldr     x1, =_end                  /* x1 := vaddr(_end)             */
+
+    set_page_table x0, 0, PT_PT
+    set_page_table x0, 1, PT_PT
+1:
+    set_page_table x0, 2, PT_MEM
+
+    add     x0, x0, L2_SIZE
+    cmp     x1, x0
+    b.gt    1b
+
+    adr     x0, boot_l0_pgtable
+    dsb     sy
+    ret
+
+/*
+ * Setup the page table mapping for @addr at @level with @prot.
+ *
+ * Only used for identity mapping.
+ */
+.macro set_ident_page_table, addr, level, prot
+    /* Find the table index in @level, save it in x3  */
+.if \level == 0
+    lsr     x3, \addr, #L0_SHIFT
+    adr     x8, idmap_l1_pgtable
+    adr     x11, idmap_l0_pgtable
+.endif
+
+.if \level == 1
+    lsr     x3, \addr, #L1_SHIFT
+    mov     x8, \addr
+    adr     x11, idmap_l1_pgtable
+.endif
+
+    and     x3, x3, #Ln_ADDR_MASK
+
+    /* Build the page table entry */
+    ldr     x7, = \prot
+    lsr     x9, x8, #PAGE_SHIFT
+    orr     x7, x7, x9, lsl #PAGE_SHIFT
+
+    /* Store entry */
+    str     x7, [x11, x3, lsl #3]
+.endm
+
+/*
+ * Setup the page table for TTBR0_EL1:
+ *   Mapping the page table for the code section.
+ *   We use 48bit address, and just use level 0/1
+ *   for the mapping (we do not use level 2 and level 3).
+ *
+ * => none
+ * <= x0 : save the page table pointer for TTBR0_EL1.
+ */
+_setup_idmap_pgtable:
+    /* Create the VA = PA map */
+    adr     x0, _text
+
+    set_ident_page_table x0, 0, PT_PT
+    set_ident_page_table x0, 1, PT_MEM
+
+    adr     x0, idmap_l0_pgtable
+    dsb     sy
+    ret
diff --git a/arch/arm/arm64/asm.h b/arch/arm/arm64/asm.h
new file mode 100644
index 0000000..3a498c4
--- /dev/null
+++ b/arch/arm/arm64/asm.h
@@ -0,0 +1,18 @@
+#ifndef __ASM_H__
+#define __ASM_H__
+
+#define ALIGN   .align 4
+
+#define ENTRY(name) \
+    .globl name; \
+    ALIGN; \
+    name:
+
+#define END(name) \
+    .size name, .-name
+
+#define ENDPROC(name) \
+    .type name, @function; \
+    END(name)
+
+#endif /* __ASM_H__ */
diff --git a/include/arm/arm64/pagetable.h b/include/arm/arm64/pagetable.h
new file mode 100644
index 0000000..8e2384f
--- /dev/null
+++ b/include/arm/arm64/pagetable.h
@@ -0,0 +1,108 @@
+#ifndef __ARM64_PAGE_TABLE__
+
+#define __ARM64_PAGE_TABLE__
+
+/* TCR flags */
+#define TCR_TxSZ(x)         ((((64) - (x)) << 16) | (((64) - (x)) << 0))
+#define TCR_IRGN_WBWA       (((1) << 8) | ((1) << 24))
+#define TCR_ORGN_WBWA       (((1) << 10) | ((1) << 26))
+#define TCR_SHARED          (((3) << 12) | ((3) << 28))
+#define TCR_ASID16          ((1) << 36)
+#define TCR_IPS_48BIT       ((5) << 32)
+#define TCR_TG1_4K          ((2) << 30)
+#define TCR_EPD0            (1 << 7)
+
+/* Max virtual address */
+#define        VM_MAX_ADDRESS      (0xffffffffffffffff)
+
+/* Number of virtual address bits */
+#define VA_BITS             48
+
+/*
+ * Memory types available.
+ */
+#define MEM_DEVICE_nGnRnE    0
+#define MEM_DEVICE_nGnRE     1
+#define MEM_DEVICE_GRE       2
+#define MEM_NORMAL_NC        3
+#define MEM_NORMAL           4
+
+#define SET_MAIR(attr, mt)  ((attr) << ((mt) * 8))
+
+/* SCTLR_EL1 - System Control Register */
+#define SCTLR_M             (1 << 0)
+#define SCTLR_C             (1 << 2)
+#define SCTLR_I             (1 << 12)
+
+/* Level 0 table, 512GiB per entry */
+#define L0_SHIFT            39
+#define L0_SIZE             (1UL << L0_SHIFT)
+#define L0_INVAL            0x0 /* An invalid address */
+#define L0_TABLE            0x3 /* A next-level table */
+
+/* Level 1 table, 1GiB per entry */
+#define L1_SHIFT            30
+#define L1_SIZE             (1 << L1_SHIFT)
+#define L1_OFFSET           (L1_SIZE - 1)
+#define L1_INVAL            L0_INVAL
+#define L1_BLOCK            0x1
+#define L1_TABLE            L0_TABLE
+#define L1_MASK             (~(L1_SIZE-1))
+
+/* Level 2 table, 2MiB per entry */
+#define L2_SHIFT            21
+#define L2_SIZE             (1 << L2_SHIFT)
+#define L2_OFFSET           (L2_SIZE - 1)
+#define L2_INVAL            L0_INVAL
+#define L2_BLOCK            L1_BLOCK
+#define L2_TABLE            L0_TABLE
+#define L2_MASK             (~(L2_SIZE-1))
+
+/* Level 3 table, 4KiB per entry */
+#define L3_SHIFT            12
+#define L3_SIZE             (1 << L3_SHIFT)
+#define L3_OFFSET           (L3_SIZE - 1)
+#define L3_INVAL            0x0
+#define L3_PAGE             0x3
+#define L3_MASK             (~(L3_SIZE-1))
+
+#define Ln_ENTRIES          (1 << 9)
+#define Ln_ADDR_MASK        (Ln_ENTRIES - 1)
+
+#define ATTR_MASK_L         0xfff
+
+#define l0_pgt_idx(va)      (((va) >> L0_SHIFT) & Ln_ADDR_MASK)
+#define l1_pgt_idx(va)      (((va) >> L1_SHIFT) & Ln_ADDR_MASK)
+#define l2_pgt_idx(va)      (((va) >> L2_SHIFT) & Ln_ADDR_MASK)
+#define l3_pgt_idx(va)      (((va) >> L3_SHIFT) & Ln_ADDR_MASK)
+
+/*
+ * Lower attributes fields in Stage 1 VMSAv8-A Block and Page descriptor
+ */
+#define ATTR_nG            (1 << 11)
+#define ATTR_AF            (1 << 10)
+#define ATTR_SH(x)         ((x) << 8)
+#define ATTR_SH_MASK       ATTR_SH(3)
+#define ATTR_SH_NS         0               /* Non-shareable */
+#define ATTR_SH_OS         2               /* Outer-shareable */
+#define ATTR_SH_IS         3               /* Inner-shareable */
+#define ATTR_AP_RW_BIT     (1 << 7)
+#define ATTR_AP(x)         ((x) << 6)
+#define ATTR_AP_MASK       ATTR_AP(3)
+#define ATTR_AP_RW         (0 << 1)
+#define ATTR_AP_RO         (1 << 1)
+#define ATTR_AP_USER       (1 << 0)
+#define ATTR_NS            (1 << 5)
+#define ATTR_IDX(x)        ((x) << 2)
+#define ATTR_IDX_MASK      (7 << 2)
+
+#define MEM_DEF_ATTR       (ATTR_AF|ATTR_SH(ATTR_SH_IS)|ATTR_IDX(MEM_NORMAL))
+#define MEM_NC_ATTR        
(ATTR_AF|ATTR_SH(ATTR_SH_IS)|ATTR_IDX(MEM_NORMAL_NC))
+#define MEM_DEV_ATTR       
(ATTR_AF|ATTR_SH(ATTR_SH_IS)|ATTR_IDX(MEM_DEVICE_nGnRnE))
+
+#define MEM_RO_ATTR        (MEM_DEF_ATTR|ATTR_AP(ATTR_AP_RO))
+
+#define PT_PT              (L0_TABLE)
+#define PT_MEM             (MEM_DEF_ATTR | L1_BLOCK)
+
+#endif
-- 
2.7.4


_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.