[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v3 4/5] x86/kernel: Move page table macros to header


  • To: Juergen Gross <jgross@xxxxxxxx>, Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>, Thomas Gleixner <tglx@xxxxxxxxxxxxx>, "Ingo Molnar" <mingo@xxxxxxxxxx>, Borislav Petkov <bp@xxxxxxxxx>, Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>, <x86@xxxxxxxxxx>, "H. Peter Anvin" <hpa@xxxxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, "Oleksandr Tyshchenko" <oleksandr_tyshchenko@xxxxxxxx>, Paolo Bonzini <pbonzini@xxxxxxxxxx>, Brian Gerst <brgerst@xxxxxxxxx>
  • From: Jason Andryuk <jason.andryuk@xxxxxxx>
  • Date: Fri, 23 Aug 2024 15:36:29 -0400
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 165.204.84.17) smtp.rcpttodomain=suse.com smtp.mailfrom=amd.com; dmarc=pass (p=quarantine sp=quarantine pct=100) action=none header.from=amd.com; dkim=none (message not signed); arc=none (0)
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=3WNcTBJWFqlSsl7h7qLZb7kjFufC1sLILMFbSKVwKI0=; b=sFMohX8S/ImnJ3sw6spbBG4PjYY8gVTWZrsVJyAa0ajT7qbLecV1/OItoQ2OoFFFlfGO3Gow+D5YfD2fQcBSIYyWejKfyNAKA8G/FksM2TpTY6jiw16x8MksEqqVw5MKsno4yOgzmGZMBFdAVMwej3x/D4wB8EobM4quR+6lwdeUsVhb/dEDQFMcjKSt2k/n2pb9NpHSJXj/cQTvXVwqKMlLY7WrQ3ZrgDRkVWIcQSwcR9NXozdyIyoFPtAN+768ASQpuQvx8X/LaO9gZYPa2fzqUvHVYIqbrBOfdQhAhZgCZSwMJW8vPrEEgFaaJ2xdPJX4t5uUUnsCDIwZLmBlkw==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=B3sMVq8VN0eDPtNrm6Y5209AmqJDoEaZnWPVYtlW0agBUGgv6vTxEW5RPrn+G4JtOaFvoyjLQ+QBvZSuTnYE3bXJgKs40/v5g12sbZX1VniIG9Z/eylVwOI9YJ11aHdObwx4uLrtjVDfuaIXR0e6p+XvWtzTT1zu1Dl6iFxQLNLQ0KZgE3RluBWK8NEdn5DRXT7SlDzWqhat/im87jmInZmLPZbitUmZURrhOWeOni0Dwhv5wDE/PU2Oj2a2yo+pSPf18O6Fwy4t5a3B0Bn7BWTR+bdG11kM3sZRTA+MySklJXENQssqIs2vcj77bTJa/fBlMZQhgPeH6jM6ZroqCQ==
  • Cc: <xen-devel@xxxxxxxxxxxxxxxxxxxx>, <linux-kernel@xxxxxxxxxxxxxxx>, "Jason Andryuk" <jason.andryuk@xxxxxxx>
  • Delivery-date: Fri, 23 Aug 2024 19:36:50 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

The PVH entry point will need an additional set of prebuild page tables.
Move the macros and defines to pgtable_64.h, so they can be re-used.

Signed-off-by: Jason Andryuk <jason.andryuk@xxxxxxx>
Reviewed-by: Juergen Gross <jgross@xxxxxxxx>
---
v3:
Add Juergen's R-b

v2:
Use existing pgtable_64.h
s/-/ - / in pud_index()
Did not add Juergen's R-b in case the header movement matters
---
 arch/x86/include/asm/pgtable_64.h | 23 ++++++++++++++++++++++-
 arch/x86/kernel/head_64.S         | 20 --------------------
 2 files changed, 22 insertions(+), 21 deletions(-)

diff --git a/arch/x86/include/asm/pgtable_64.h 
b/arch/x86/include/asm/pgtable_64.h
index 3c4407271d08..72912b8edfdf 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -271,5 +271,26 @@ static inline bool gup_fast_permitted(unsigned long start, 
unsigned long end)
 
 #include <asm/pgtable-invert.h>
 
-#endif /* !__ASSEMBLY__ */
+#else /* __ASSEMBLY__ */
+
+#define l4_index(x)    (((x) >> 39) & 511)
+#define pud_index(x)   (((x) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
+
+L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4)
+L4_START_KERNEL = l4_index(__START_KERNEL_map)
+
+L3_START_KERNEL = pud_index(__START_KERNEL_map)
+
+#define SYM_DATA_START_PAGE_ALIGNED(name)                      \
+       SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
+
+/* Automate the creation of 1 to 1 mapping pmd entries */
+#define PMDS(START, PERM, COUNT)                       \
+       i = 0 ;                                         \
+       .rept (COUNT) ;                                 \
+       .quad   (START) + (i << PMD_SHIFT) + (PERM) ;   \
+       i = i + 1 ;                                     \
+       .endr
+
+#endif /* __ASSEMBLY__ */
 #endif /* _ASM_X86_PGTABLE_64_H */
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 330922b328bf..16752b8dfa89 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -32,13 +32,6 @@
  * We are not able to switch in one step to the final KERNEL ADDRESS SPACE
  * because we need identity-mapped pages.
  */
-#define l4_index(x)    (((x) >> 39) & 511)
-#define pud_index(x)   (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-
-L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4)
-L4_START_KERNEL = l4_index(__START_KERNEL_map)
-
-L3_START_KERNEL = pud_index(__START_KERNEL_map)
 
        __HEAD
        .code64
@@ -577,9 +570,6 @@ SYM_CODE_START_NOALIGN(vc_no_ghcb)
 SYM_CODE_END(vc_no_ghcb)
 #endif
 
-#define SYM_DATA_START_PAGE_ALIGNED(name)                      \
-       SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
-
 #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
 /*
  * Each PGD needs to be 8k long and 8k aligned.  We do not
@@ -601,14 +591,6 @@ SYM_CODE_END(vc_no_ghcb)
 #define PTI_USER_PGD_FILL      0
 #endif
 
-/* Automate the creation of 1 to 1 mapping pmd entries */
-#define PMDS(START, PERM, COUNT)                       \
-       i = 0 ;                                         \
-       .rept (COUNT) ;                                 \
-       .quad   (START) + (i << PMD_SHIFT) + (PERM) ;   \
-       i = i + 1 ;                                     \
-       .endr
-
        __INITDATA
        .balign 4
 
@@ -708,8 +690,6 @@ SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt)
        .endr
 SYM_DATA_END(level1_fixmap_pgt)
 
-#undef PMDS
-
        .data
        .align 16
 
-- 
2.34.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.