 
	
| [Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [PATCH v8 8/8] xen/arm: mmu: move MMU specific P2M code to mmu/p2m.{c,h}
 Hi Julien,
> On Nov 2, 2023, at 02:34, Julien Grall <julien@xxxxxxx> wrote:
> 
> Hi Henry,
> 
> On 23/10/2023 03:13, Henry Wang wrote:
>> From: Penny Zheng <penny.zheng@xxxxxxx>
>> Current P2M implementation is designed for MMU system only.
>> We move the MMU-specific codes into mmu/p2m.c, and only keep generic
>> codes in p2m.c, like VMID allocator, etc. We also move MMU-specific
>> definitions and declarations to mmu/p2m.h, such as p2m_tlb_flush_sync().
>> Also expose previously static functions p2m_vmid_allocator_init(),
>> p2m_alloc_vmid() for further MPU usage. Since with the code movement
>> p2m_free_vmid() is now used in two files, also expose p2m_free_vmid().
>> With the code movement, global variable max_vmid is used in multiple
>> files instead of a single file (and will be used in MPU P2M
>> implementation), declare it in the header and remove the "static" of
>> this variable.
>> Also, since p2m_invalidate_root() should be MMU only and after the
>> code movement the only caller of p2m_invalidate_root() outside of
>> mmu/p2m.c is arch_domain_creation_finished(), creating a new function
>> named p2m_domain_creation_finished() in mmu/p2m.c for the original
>> code in arch_domain_creation_finished(), and marking
>> p2m_invalidate_root() as static.
>> Take the opportunity to fix the incorrect coding style when possible.
>> When there is bit shift in macros, take the opportunity to add the
>> missing 'U' as a compliance of MISRA.
>> Signed-off-by: Penny Zheng <penny.zheng@xxxxxxx>
>> Signed-off-by: Wei Chen <wei.chen@xxxxxxx>
>> Signed-off-by: Henry Wang <Henry.Wang@xxxxxxx>
> 
> Acked-by: Julien Grall <jgrall@xxxxxxxxxx>
Thanks!
> 
> I think the series is now fully acked. But I will wait for 4.18 to be 
> released before merging this series.
I think the third patch "xen/arm: Fold mmu_init_secondary_cpu() to head.S” will 
need the
double check from your side :)
Here is what I have locally, to save time I will just show the content here for 
you to check,
and I will push it in the next few days:
commit ba72d6dc17fd7ce9a863b9e00b06b33c069c7641
Author: Henry Wang <Henry.Wang@xxxxxxx>
Date:   Wed Aug 23 17:59:50 2023 +0800
    xen/arm: Fold mmu_init_secondary_cpu() to head.S
    Currently mmu_init_secondary_cpu() only enforces the page table
    should not contain mapping that are both Writable and eXecutables
    after boot. To ease the arch/arm/mm.c split work, fold this function
    to head.S.
    For arm32, the WXN bit cannot be set early because at the point when
    the MMU is enabled, the page-tables may still contain mapping which
    are writable and executable. Therefore, introduce an assembly macro
    pt_enforce_wxn. The macro is called before secondary CPUs jumping
    into the C world.
    For arm64, set the SCTLR_Axx_ELx_WXN flag right when the MMU is
    enabled. This would avoid the extra TLB flush and SCTLR dance.
    Signed-off-by: Henry Wang <Henry.Wang@xxxxxxx>
    Co-authored-by: Julien Grall <jgrall@xxxxxxxxxx>
    Signed-off-by: Julien Grall <jgrall@xxxxxxxxxx>
    Signed-off-by: Ayan Kumar Halder <ayan.kumar.halder@xxxxxxx>
    ---
    v9:
    - Move pt_enforce_wxn() for arm32 up a few lines.
    - Add commit message explaining why WXN cannot be set early for arm32.
    - Correct in-code comment for enable_mmu().
    v8:
    - Change the setting of SCTLR_Axx_ELx_WXN for arm64 to set the
      flag right when the MMU is enabled.
    v7:
    - No change.
    v6:
    - New patch.
diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
index 33b038e7e0..2fea2a872a 100644
--- a/xen/arch/arm/arm32/head.S
+++ b/xen/arch/arm/arm32/head.S
@@ -83,6 +83,25 @@
         isb
 .endm
+/*
+ * Enforce Xen page-tables do not contain mapping that are both
+ * Writable and eXecutables.
+ *
+ * This should be called on each secondary CPU.
+ */
+.macro pt_enforce_wxn tmp
+        mrc   CP32(\tmp, HSCTLR)
+        orr   \tmp, \tmp, #SCTLR_Axx_ELx_WXN
+        dsb
+        mcr   CP32(\tmp, HSCTLR)
+        /*
+         * The TLBs may cache SCTLR_EL2.WXN. So ensure it is synchronized
+         * before flushing the TLBs.
+         */
+        isb
+        flush_xen_tlb_local \tmp
+.endm
+
 /*
  * Common register usage in this file:
  *   r0  -
@@ -249,6 +268,7 @@ secondary_switched:
         dsb
         isb
         flush_xen_tlb_local r0
+        pt_enforce_wxn r0
 #ifdef CONFIG_EARLY_PRINTK
         /* Use a virtual address to access the UART. */
diff --git a/xen/arch/arm/arm64/mmu/head.S b/xen/arch/arm/arm64/mmu/head.S
index 88075ef083..34de5df0c7 100644
--- a/xen/arch/arm/arm64/mmu/head.S
+++ b/xen/arch/arm/arm64/mmu/head.S
@@ -263,11 +263,13 @@ ENDPROC(create_page_tables)
  *
  * Inputs:
  *   x0 : Physical address of the page tables.
+ *   x1 : Extra flags of the SCTLR.
  *
- * Clobbers x0 - x4
+ * Clobbers x0 - x5
  */
 enable_mmu:
         mov   x4, x0
+        mov   x5, x1
         PRINT("- Turning on paging -\r\n")
         /*
@@ -283,6 +285,7 @@ enable_mmu:
         mrs   x0, SCTLR_EL2
         orr   x0, x0, #SCTLR_Axx_ELx_M  /* Enable MMU */
         orr   x0, x0, #SCTLR_Axx_ELx_C  /* Enable D-cache */
+        orr   x0, x0, x5                /* Enable extra flags */
         dsb   sy                     /* Flush PTE writes and finish reads */
         msr   SCTLR_EL2, x0          /* now paging is enabled */
         isb                          /* Now, flush the icache */
@@ -297,16 +300,17 @@ ENDPROC(enable_mmu)
  * Inputs:
  *   lr : Virtual address to return to.
  *
- * Clobbers x0 - x5
+ * Clobbers x0 - x6
  */
 ENTRY(enable_secondary_cpu_mm)
-        mov   x5, lr
+        mov   x6, lr
         load_paddr x0, init_ttbr
         ldr   x0, [x0]
+        mov   x1, #SCTLR_Axx_ELx_WXN        /* Enable WxN from the start */
         bl    enable_mmu
-        mov   lr, x5
+        mov   lr, x6
         /* Return to the virtual address requested by the caller. */
         ret
@@ -320,14 +324,15 @@ ENDPROC(enable_secondary_cpu_mm)
  * Inputs:
  *   lr : Virtual address to return to.
  *
- * Clobbers x0 - x5
+ * Clobbers x0 - x6
  */
 ENTRY(enable_boot_cpu_mm)
-        mov   x5, lr
+        mov   x6, lr
         bl    create_page_tables
         load_paddr x0, boot_pgtable
+        mov   x1, #0        /* No extra SCTLR flags */
         bl    enable_mmu
         /*
@@ -337,7 +342,7 @@ ENTRY(enable_boot_cpu_mm)
         ldr   x0, =1f
         br    x0
 1:
-        mov   lr, x5
+        mov   lr, x6
         /*
          * The 1:1 map may clash with other parts of the Xen virtual memory
          * layout. As it is not used anymore, remove it completely to
diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index d25e59f828..163d22ecd3 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -214,8 +214,6 @@ extern void remove_early_mappings(void);
 /* Allocate and initialise pagetables for a secondary CPU. Sets init_ttbr to 
the
  * new page table */
 extern int init_secondary_pagetables(int cpu);
-/* Switch secondary CPUS to its own pagetables and finalise MMU setup */
-extern void mmu_init_secondary_cpu(void);
 /*
  * For Arm32, set up the direct-mapped xenheap: up to 1GB of contiguous,
  * always-mapped memory. Base must be 32MB aligned and size a multiple of 32MB.
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index b7eb3a6e08..923a90925c 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -326,12 +326,6 @@ void __init setup_pagetables(unsigned long 
boot_phys_offset)
 #endif
 }
-/* MMU setup for secondary CPUS (which already have paging enabled) */
-void mmu_init_secondary_cpu(void)
-{
-    xen_pt_enforce_wnx();
-}
-
 #ifdef CONFIG_ARM_32
 /*
  * Set up the direct-mapped xenheap:
diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
index ec76de3cac..beb137d06e 100644
--- a/xen/arch/arm/smpboot.c
+++ b/xen/arch/arm/smpboot.c
@@ -361,8 +361,6 @@ void start_secondary(void)
      */
     update_system_features(¤t_cpu_data);
-    mmu_init_secondary_cpu();
-
     gic_init_secondary_cpu();
     set_current(idle_vcpu[cpuid]);
Kind regards,
Henry
 
 
 | 
|  | Lists.xenproject.org is hosted with RackSpace, monitoring our |