|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v1 5/8] xen/riscv: introduce identity mapping
The way how switch to virtual address was implemented in the
commit e66003e7be ("xen/riscv: introduce setup_initial_pages")
wasn't safe enough so identity mapping was introduced and
used.
Fixes: e66003e7be ("xen/riscv: introduce setup_initial_pages")
Signed-off-by: Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx>
---
xen/arch/riscv/include/asm/mm.h | 3 +-
xen/arch/riscv/mm.c | 99 ++++++++++++++++++++++-----------
xen/arch/riscv/riscv64/head.S | 30 ++++++++++
xen/arch/riscv/setup.c | 14 +----
4 files changed, 99 insertions(+), 47 deletions(-)
diff --git a/xen/arch/riscv/include/asm/mm.h b/xen/arch/riscv/include/asm/mm.h
index 996041ce81..500fdc9c5a 100644
--- a/xen/arch/riscv/include/asm/mm.h
+++ b/xen/arch/riscv/include/asm/mm.h
@@ -9,7 +9,8 @@
void setup_initial_pagetables(void);
void enable_mmu(void);
-void cont_after_mmu_is_enabled(void);
+
+void remove_identity_mapping(void);
void calc_phys_offset(void);
diff --git a/xen/arch/riscv/mm.c b/xen/arch/riscv/mm.c
index c092897f9a..ab790f571d 100644
--- a/xen/arch/riscv/mm.c
+++ b/xen/arch/riscv/mm.c
@@ -24,6 +24,11 @@ static unsigned long phys_offset;
#define LOAD_TO_LINK(addr) ((unsigned long)(addr) - phys_offset)
#define LINK_TO_LOAD(addr) ((unsigned long)(addr) + phys_offset)
+/*
+ * Should be removed as soon as enough headers will be merged for inclusion of
+ * <xen/lib.h>.
+ */
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
/*
* It is expected that Xen won't be more then 2 MB.
@@ -35,8 +40,10 @@ static unsigned long phys_offset;
*
* It might be needed one more page table in case when Xen load address
* isn't 2 MB aligned.
+ *
+ * 3 additional page tables are needed for identity mapping.
*/
-#define PGTBL_INITIAL_COUNT ((CONFIG_PAGING_LEVELS - 1) + 1)
+#define PGTBL_INITIAL_COUNT ((CONFIG_PAGING_LEVELS - 1) + 1 + 3)
pte_t __section(".bss.page_aligned") __aligned(PAGE_SIZE)
stage1_pgtbl_root[PAGETABLE_ENTRIES];
@@ -75,6 +82,7 @@ static void __init setup_initial_mapping(struct mmu_desc
*mmu_desc,
unsigned int index;
pte_t *pgtbl;
unsigned long page_addr;
+ bool is_identity_mapping = (map_start == pa_start);
if ( (unsigned long)_start % XEN_PT_LEVEL_SIZE(0) )
{
@@ -108,16 +116,18 @@ static void __init setup_initial_mapping(struct mmu_desc
*mmu_desc,
{
unsigned long paddr = (page_addr - map_start) + pa_start;
unsigned int permissions = PTE_LEAF_DEFAULT;
+ unsigned long addr = (is_identity_mapping) ?
+ page_addr : LINK_TO_LOAD(page_addr);
pte_t pte_to_be_written;
index = pt_index(0, page_addr);
- if ( is_kernel_text(LINK_TO_LOAD(page_addr)) ||
- is_kernel_inittext(LINK_TO_LOAD(page_addr)) )
- permissions =
- PTE_EXECUTABLE | PTE_READABLE | PTE_VALID;
+ if ( is_kernel_text(addr) ||
+ is_kernel_inittext(addr) )
+ permissions =
+ PTE_EXECUTABLE | PTE_READABLE | PTE_VALID;
- if ( is_kernel_rodata(LINK_TO_LOAD(page_addr)) )
+ if ( is_kernel_rodata(addr) )
permissions = PTE_READABLE | PTE_VALID;
pte_to_be_written = paddr_to_pte(paddr, permissions);
@@ -232,22 +242,27 @@ void __init setup_initial_pagetables(void)
linker_start,
linker_end,
load_start);
+
+ if ( linker_start == load_start )
+ return;
+
+ setup_initial_mapping(&mmu_desc,
+ load_start,
+ load_start + PAGE_SIZE,
+ load_start);
+
+ setup_initial_mapping(&mmu_desc,
+ (unsigned long)cpu0_boot_stack,
+ (unsigned long)cpu0_boot_stack + PAGE_SIZE,
+ (unsigned long)cpu0_boot_stack);
}
-void __init noreturn noinline enable_mmu()
+/*
+ * enable_mmu() can't be __init because __init section isn't part of identity
+ * mapping so it will cause an issue after MMU will be enabled.
+ */
+void enable_mmu(void)
{
- /*
- * Calculate a linker time address of the mmu_is_enabled
- * label and update CSR_STVEC with it.
- * MMU is configured in a way where linker addresses are mapped
- * on load addresses so in a case when linker addresses are not equal
- * to load addresses, after MMU is enabled, it will cause
- * an exception and jump to linker time addresses.
- * Otherwise if load addresses are equal to linker addresses the code
- * after mmu_is_enabled label will be executed without exception.
- */
- csr_write(CSR_STVEC, LOAD_TO_LINK((unsigned long)&&mmu_is_enabled));
-
/* Ensure page table writes precede loading the SATP */
sfence_vma();
@@ -255,25 +270,41 @@ void __init noreturn noinline enable_mmu()
csr_write(CSR_SATP,
PFN_DOWN((unsigned long)stage1_pgtbl_root) |
RV_STAGE1_MODE << SATP_MODE_SHIFT);
+}
+
+void __init remove_identity_mapping(void)
+{
+ int i, j;
+ pte_t *pgtbl;
+ unsigned int index, xen_index;
- asm volatile ( ".p2align 2" );
- mmu_is_enabled:
/*
- * Stack should be re-inited as:
- * 1. Right now an address of the stack is relative to load time
- * addresses what will cause an issue in case of load start address
- * isn't equal to linker start address.
- * 2. Addresses in stack are all load time relative which can be an
- * issue in case when load start address isn't equal to linker
- * start address.
- *
- * We can't return to the caller because the stack was reseted
- * and it may have stash some variable on the stack.
- * Jump to a brand new function as the stack was reseted
+ * id_addrs should be in sync with id mapping in
+ * setup_initial_pagetables()
*/
+ unsigned long id_addrs[] = {
+ LINK_TO_LOAD(_start),
+ LINK_TO_LOAD(cpu0_boot_stack),
+ };
- switch_stack_and_jump((unsigned long)cpu0_boot_stack + STACK_SIZE,
- cont_after_mmu_is_enabled);
+ pgtbl = stage1_pgtbl_root;
+
+ for ( j = 0; j < ARRAY_SIZE(id_addrs); j++ )
+ {
+ for ( pgtbl = stage1_pgtbl_root, i = CONFIG_PAGING_LEVELS - 1; i >= 0;
i-- )
+ {
+ index = pt_index(i, id_addrs[j]);
+ xen_index = pt_index(i, XEN_VIRT_START);
+
+ if ( index != xen_index )
+ {
+ pgtbl[index].pte = 0;
+ break;
+ }
+
+ pgtbl = &pgtbl[index];
+ }
+ }
}
/*
diff --git a/xen/arch/riscv/riscv64/head.S b/xen/arch/riscv/riscv64/head.S
index 69f3a24987..582078798a 100644
--- a/xen/arch/riscv/riscv64/head.S
+++ b/xen/arch/riscv/riscv64/head.S
@@ -31,6 +31,36 @@ ENTRY(start)
jal calc_phys_offset
+ jal setup_initial_pagetables
+
+ jal enable_mmu
+
+ /*
+ * Calculate physical offset
+ *
+ * We can't re-use a value in phys_offset variable here as
+ * phys_offset is located in .bss and this section isn't
+ * 1:1 mapped and an access to it will cause MMU fault
+ */
+ li t0, XEN_VIRT_START
+ la t1, start
+ sub t1, t1, t0
+
+ /* Calculate proper VA after jump from 1:1 mapping */
+ la t0, .L_primary_switched
+ sub t0, t0, t1
+
+ /* Jump from 1:1 mapping world */
+ jr t0
+
+.L_primary_switched:
+ /*
+ * cpu0_boot_stack address is 1:1 mapping related so it should be
+ * recalculated after jump from 1:1 mapping world as 1:1 mapping
+ * will be removed soon in start_xen().
+ */
+ jal reset_stack
+
tail start_xen
ENTRY(reset_stack)
diff --git a/xen/arch/riscv/setup.c b/xen/arch/riscv/setup.c
index 845d18d86f..c4ef0b3165 100644
--- a/xen/arch/riscv/setup.c
+++ b/xen/arch/riscv/setup.c
@@ -11,20 +11,10 @@ unsigned char __initdata cpu0_boot_stack[STACK_SIZE]
void __init noreturn start_xen(unsigned long bootcpu_id,
paddr_t dtb_addr)
{
- early_printk("Hello from C env\n");
-
- setup_initial_pagetables();
-
- enable_mmu();
-
- for ( ;; )
- asm volatile ("wfi");
+ remove_identity_mapping();
- unreachable();
-}
+ early_printk("Hello from C env\n");
-void __init noreturn cont_after_mmu_is_enabled(void)
-{
early_printk("All set up\n");
for ( ;; )
--
2.40.1
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |