[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v6 08/13] xen/arm: Fold mmu_init_secondary_cpu() to head.S



Hi Henry,

On 28/08/2023 02:32, Henry Wang wrote:
Currently mmu_init_secondary_cpu() only enforces the page table
should not contain mapping that are both Writable and eXecutables
after boot. To ease the arch/arm/mm.c split work, fold this function
to head.S.

Introduce assembly macro pt_enforce_wxn for both arm32 and arm64.
For arm64, the macro is called at the end of enable_secondary_cpu_mm().
For arm32, the macro is called before secondary CPUs jumping into
the C world.

Signed-off-by: Henry Wang <Henry.Wang@xxxxxxx>
---
v6:
- New patch.
---
  xen/arch/arm/arm32/head.S     | 20 ++++++++++++++++++++
  xen/arch/arm/arm64/mmu/head.S | 21 +++++++++++++++++++++
  xen/arch/arm/include/asm/mm.h |  2 --
  xen/arch/arm/mm.c             |  6 ------
  xen/arch/arm/smpboot.c        |  2 --
  5 files changed, 41 insertions(+), 10 deletions(-)

diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
index 33b038e7e0..39218cf15f 100644
--- a/xen/arch/arm/arm32/head.S
+++ b/xen/arch/arm/arm32/head.S
@@ -83,6 +83,25 @@
          isb
  .endm
+/*
+ * Enforce Xen page-tables do not contain mapping that are both
+ * Writable and eXecutables.
+ *
+ * This should be called on each secondary CPU.
+ */
+.macro pt_enforce_wxn tmp
+        mrc   CP32(\tmp, HSCTLR)
+        orr   \tmp, \tmp, #SCTLR_Axx_ELx_WXN
+        dsb
+        mcr   CP32(\tmp, HSCTLR)
+        /*
+         * The TLBs may cache SCTLR_EL2.WXN. So ensure it is synchronized
+         * before flushing the TLBs.
+         */
+        isb
+        flush_xen_tlb_local \tmp
+.endm
+
  /*
   * Common register usage in this file:
   *   r0  -
@@ -254,6 +273,7 @@ secondary_switched:
          /* Use a virtual address to access the UART. */
          mov_w r11, EARLY_UART_VIRTUAL_ADDRESS
  #endif
+        pt_enforce_wxn r0
          PRINT("- Ready -\r\n")
          /* Jump to C world */
          mov_w r2, start_secondary
diff --git a/xen/arch/arm/arm64/mmu/head.S b/xen/arch/arm/arm64/mmu/head.S
index a5271e3880..25028bdf07 100644
--- a/xen/arch/arm/arm64/mmu/head.S
+++ b/xen/arch/arm/arm64/mmu/head.S
@@ -31,6 +31,25 @@
          isb
  .endm
+/*
+ * Enforce Xen page-tables do not contain mapping that are both
+ * Writable and eXecutables.
+ *
+ * This should be called on each secondary CPU.
+ */
+.macro pt_enforce_wxn tmp
+       mrs   \tmp, SCTLR_EL2
+       orr   \tmp, \tmp, #SCTLR_Axx_ELx_WXN
+       dsb   sy
+       msr   SCTLR_EL2, \tmp
+       /*
+        * The TLBs may cache SCTLR_EL2.WXN. So ensure it is synchronized
+        * before flushing the TLBs.
+        */
+       isb
+       flush_xen_tlb_local
+.endm
+

It would be preferable if we can set the flag right when the MMU is initialized enabled configured. This would avoid the extra TLB flush and SCTLR dance. How about the following (not compiled/cleaned) code:

diff --git a/xen/arch/arm/arm64/mmu/head.S b/xen/arch/arm/arm64/mmu/head.S
index a5271e388071..6b19d15ff89f 100644
--- a/xen/arch/arm/arm64/mmu/head.S
+++ b/xen/arch/arm/arm64/mmu/head.S
@@ -264,10 +264,11 @@ ENDPROC(create_page_tables)
  * Inputs:
  *   x0 : Physical address of the page tables.
  *
- * Clobbers x0 - x4
+ * Clobbers x0 - x6
  */
 enable_mmu:
         mov   x4, x0
+        mov   x5, x1
         PRINT("- Turning on paging -\r\n")

         /*
@@ -283,6 +284,7 @@ enable_mmu:
         mrs   x0, SCTLR_EL2
         orr   x0, x0, #SCTLR_Axx_ELx_M  /* Enable MMU */
         orr   x0, x0, #SCTLR_Axx_ELx_C  /* Enable D-cache */
+        orr   x0, x0, x5                /* Enable extra flags */
dsb sy /* Flush PTE writes and finish reads */
         msr   SCTLR_EL2, x0          /* now paging is enabled */
         isb                          /* Now, flush the icache */
@@ -297,16 +299,17 @@ ENDPROC(enable_mmu)
  * Inputs:
  *   lr : Virtual address to return to.
  *
- * Clobbers x0 - x5
+ * Clobbers x0 - x6
  */
 ENTRY(enable_secondary_cpu_mm)
-        mov   x5, lr
+        mov   x6, lr

         load_paddr x0, init_ttbr
         ldr   x0, [x0]

+        mov   x1, #SCTLR_Axx_ELx_WXN        /* Enable WxN from the start */
         bl    enable_mmu
-        mov   lr, x5
+        mov   lr, x6

         /* Return to the virtual address requested by the caller. */
         ret
@@ -320,16 +323,17 @@ ENDPROC(enable_secondary_cpu_mm)
  * Inputs:
  *   lr : Virtual address to return to.
  *
- * Clobbers x0 - x5
+ * Clobbers x0 - x6
  */
 ENTRY(enable_boot_cpu_mm)
-        mov   x5, lr
+        mov   x6, lr

         bl    create_page_tables
         load_paddr x0, boot_pgtable

+        mov   x1, #0        /* No extra SCTLR flags */
         bl    enable_mmu
-        mov   lr, x5
+        mov   lr, x6

         /*
          * The MMU is turned on and we are in the 1:1 mapping. Switch

The same logic could be used for arm32.

Cheers,

--
Julien Grall



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.