[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v5 6/6] arm64: remove the rest of asm-uaccess.h



The __uaccess_ttbr0_disable and __uaccess_ttbr0_enable,
are the last two macros defined in asm-uaccess.h.

For now move them to entry.S where they are used. Eventually,
these macros should be replaced with C wrappers to reduce the
maintenance burden.

Also, once these macros are unified with the C counterparts, it
is a good idea to check that PAN is in correct state on every
enable/disable calls.

Signed-off-by: Pavel Tatashin <pasha.tatashin@xxxxxxxxxx>
---
 arch/arm64/include/asm/asm-uaccess.h | 39 ----------------------------
 arch/arm64/kernel/entry.S            | 27 ++++++++++++++++++-
 arch/arm64/lib/clear_user.S          |  2 +-
 arch/arm64/lib/copy_from_user.S      |  2 +-
 arch/arm64/lib/copy_in_user.S        |  2 +-
 arch/arm64/lib/copy_to_user.S        |  2 +-
 arch/arm64/mm/cache.S                |  1 -
 7 files changed, 30 insertions(+), 45 deletions(-)
 delete mode 100644 arch/arm64/include/asm/asm-uaccess.h

diff --git a/arch/arm64/include/asm/asm-uaccess.h 
b/arch/arm64/include/asm/asm-uaccess.h
deleted file mode 100644
index fba2a69f7fef..000000000000
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ASM_UACCESS_H
-#define __ASM_ASM_UACCESS_H
-
-#include <asm/alternative.h>
-#include <asm/kernel-pgtable.h>
-#include <asm/mmu.h>
-#include <asm/sysreg.h>
-#include <asm/assembler.h>
-
-/*
- * User access enabling/disabling macros.
- */
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
-       .macro  __uaccess_ttbr0_disable, tmp1
-       mrs     \tmp1, ttbr1_el1                        // swapper_pg_dir
-       bic     \tmp1, \tmp1, #TTBR_ASID_MASK
-       sub     \tmp1, \tmp1, #RESERVED_TTBR0_SIZE      // reserved_ttbr0 just 
before swapper_pg_dir
-       msr     ttbr0_el1, \tmp1                        // set reserved 
TTBR0_EL1
-       isb
-       add     \tmp1, \tmp1, #RESERVED_TTBR0_SIZE
-       msr     ttbr1_el1, \tmp1                // set reserved ASID
-       isb
-       .endm
-
-       .macro  __uaccess_ttbr0_enable, tmp1, tmp2
-       get_current_task \tmp1
-       ldr     \tmp1, [\tmp1, #TSK_TI_TTBR0]   // load saved TTBR0_EL1
-       mrs     \tmp2, ttbr1_el1
-       extr    \tmp2, \tmp2, \tmp1, #48
-       ror     \tmp2, \tmp2, #16
-       msr     ttbr1_el1, \tmp2                // set the active ASID
-       isb
-       msr     ttbr0_el1, \tmp1                // set the non-PAN TTBR0_EL1
-       isb
-       .endm
-#endif
-
-#endif
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 7c6a0a41676f..cc6c0dbb7734 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -22,8 +22,8 @@
 #include <asm/mmu.h>
 #include <asm/processor.h>
 #include <asm/ptrace.h>
+#include <asm/kernel-pgtable.h>
 #include <asm/thread_info.h>
-#include <asm/asm-uaccess.h>
 #include <asm/unistd.h>
 
 /*
@@ -144,6 +144,31 @@ alternative_cb_end
 #endif
        .endm
 
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+       .macro  __uaccess_ttbr0_disable, tmp1
+       mrs     \tmp1, ttbr1_el1                // swapper_pg_dir
+       bic     \tmp1, \tmp1, #TTBR_ASID_MASK
+       sub     \tmp1, \tmp1, #RESERVED_TTBR0_SIZE // reserved_ttbr0 just 
before swapper_pg_dir
+       msr     ttbr0_el1, \tmp1                // set reserved TTBR0_EL1
+       isb
+       add     \tmp1, \tmp1, #RESERVED_TTBR0_SIZE
+       msr     ttbr1_el1, \tmp1                // set reserved ASID
+       isb
+       .endm
+
+       .macro  __uaccess_ttbr0_enable, tmp1, tmp2
+       get_current_task \tmp1
+       ldr     \tmp1, [\tmp1, #TSK_TI_TTBR0]   // load saved TTBR0_EL1
+       mrs     \tmp2, ttbr1_el1
+       extr    \tmp2, \tmp2, \tmp1, #48
+       ror     \tmp2, \tmp2, #16
+       msr     ttbr1_el1, \tmp2                // set the active ASID
+       isb
+       msr     ttbr0_el1, \tmp1                // set the non-PAN TTBR0_EL1
+       isb
+       .endm
+#endif
+
        .macro  kernel_entry, el, regsize = 64
        .if     \regsize == 32
        mov     w0, w0                          // zero upper 32 bits of x0
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index aeafc03e961a..b0b4a86a09e2 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -6,7 +6,7 @@
  */
 #include <linux/linkage.h>
 
-#include <asm/asm-uaccess.h>
+#include <asm/alternative.h>
 #include <asm/assembler.h>
 
        .text
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index ebb3c06cbb5d..142bc7505518 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -5,7 +5,7 @@
 
 #include <linux/linkage.h>
 
-#include <asm/asm-uaccess.h>
+#include <asm/alternative.h>
 #include <asm/assembler.h>
 #include <asm/cache.h>
 
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 3d8153a1ebce..04dc48ca26f7 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -7,7 +7,7 @@
 
 #include <linux/linkage.h>
 
-#include <asm/asm-uaccess.h>
+#include <asm/alternative.h>
 #include <asm/assembler.h>
 #include <asm/cache.h>
 
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 357eae2c18eb..8f3218ae88ab 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -5,7 +5,7 @@
 
 #include <linux/linkage.h>
 
-#include <asm/asm-uaccess.h>
+#include <asm/alternative.h>
 #include <asm/assembler.h>
 #include <asm/cache.h>
 
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 0093bb9fcd12..627be857b8d0 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -12,7 +12,6 @@
 #include <asm/assembler.h>
 #include <asm/cpufeature.h>
 #include <asm/alternative.h>
-#include <asm/asm-uaccess.h>
 
 /*
  *     __asm_flush_cache_user_range(start,end)
-- 
2.17.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.