|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 3/7] x86: Temporary disable SMAP to legally access user pages in kernel mode
Use STAC/CLAC to temporarily disable SMAP to allow legal accesses to
user pages in kernel mode
Signed-off-by: Feng Wu <feng.wu@xxxxxxxxx>
---
xen/arch/x86/domain_build.c | 3 +++
xen/arch/x86/usercopy.c | 6 ++++++
xen/arch/x86/x86_64/compat/entry.S | 2 ++
xen/arch/x86/x86_64/entry.S | 4 ++++
xen/include/asm-x86/uaccess.h | 4 ++++
xen/include/asm-x86/x86_64/system.h | 2 ++
6 files changed, 21 insertions(+)
diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index 84ce392..15af110 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -778,6 +778,7 @@ int __init construct_dom0(
}
bootstrap_map(NULL);
+ stac();
if ( UNSET_ADDR != parms.virt_hypercall )
{
if ( (parms.virt_hypercall < v_start) ||
@@ -787,6 +788,7 @@ int __init construct_dom0(
write_ptbase(current);
printk("Invalid HYPERCALL_PAGE field in ELF notes.\n");
rc = -1;
+ clac();
goto out;
}
hypercall_page_initialise(
@@ -1150,6 +1152,7 @@ int __init construct_dom0(
elf_check_broken(&elf));
iommu_dom0_init(dom0);
+ clac();
return 0;
out:
diff --git a/xen/arch/x86/usercopy.c b/xen/arch/x86/usercopy.c
index b79202b..946e40f 100644
--- a/xen/arch/x86/usercopy.c
+++ b/xen/arch/x86/usercopy.c
@@ -15,6 +15,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void
*from, unsigned n)
unsigned long __d0, __d1, __d2, __n = n;
asm volatile (
+ ASM_STAC(%%)"\n"
" cmp $"STR(2*BYTES_PER_LONG-1)",%0\n"
" jbe 1f\n"
" mov %1,%0\n"
@@ -30,6 +31,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void
*from, unsigned n)
" mov %3,%0\n"
"1: rep movsb\n" /* ...remainder copied as bytes */
"2:\n"
+ ASM_CLAC(%%)"\n"
".section .fixup,\"ax\"\n"
"5: add %3,%0\n"
" jmp 2b\n"
@@ -52,6 +54,7 @@ __copy_from_user_ll(void *to, const void __user *from,
unsigned n)
unsigned long __d0, __d1, __d2, __n = n;
asm volatile (
+ ASM_STAC(%%)"\n"
" cmp $"STR(2*BYTES_PER_LONG-1)",%0\n"
" jbe 1f\n"
" mov %1,%0\n"
@@ -67,6 +70,7 @@ __copy_from_user_ll(void *to, const void __user *from,
unsigned n)
" mov %3,%0\n"
"1: rep; movsb\n" /* ...remainder copied as bytes */
"2:\n"
+ ASM_CLAC(%%)"\n"
".section .fixup,\"ax\"\n"
"5: add %3,%0\n"
" jmp 6f\n"
@@ -114,10 +118,12 @@ copy_to_user(void __user *to, const void *from, unsigned
n)
do { \
long __d0; \
__asm__ __volatile__( \
+ ASM_STAC(%%)"\n" \
"0: rep; stosl\n" \
" movl %2,%0\n" \
"1: rep; stosb\n" \
"2:\n" \
+ ASM_CLAC(%%)"\n" \
".section .fixup,\"ax\"\n" \
"3: lea 0(%2,%0,4),%0\n" \
" jmp 2b\n" \
diff --git a/xen/arch/x86/x86_64/compat/entry.S
b/xen/arch/x86/x86_64/compat/entry.S
index ac594c9..298c1a9 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -266,6 +266,7 @@ ENTRY(compat_int80_direct_trap)
/* On return only %rbx and %rdx are guaranteed non-clobbered. */
compat_create_bounce_frame:
ASSERT_INTERRUPTS_ENABLED
+ ASM_STAC
mov %fs,%edi
testb $2,UREGS_cs+8(%rsp)
jz 1f
@@ -337,6 +338,7 @@ __UNLIKELY_END(compat_bounce_null_selector)
movl %eax,UREGS_cs+8(%rsp)
movl TRAPBOUNCE_eip(%rdx),%eax
movl %eax,UREGS_rip+8(%rsp)
+ ASM_CLAC
ret
.section .fixup,"ax"
.Lfx13:
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index d294064..e49f9c4 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -380,6 +380,7 @@ __UNLIKELY_END(create_bounce_frame_bad_sp)
movb TRAPBOUNCE_flags(%rdx),%cl
subq $40,%rsi
movq UREGS_ss+8(%rsp),%rax
+ ASM_STAC
.Lft2: movq %rax,32(%rsi) # SS
movq UREGS_rsp+8(%rsp),%rax
.Lft3: movq %rax,24(%rsi) # RSP
@@ -437,9 +438,11 @@ UNLIKELY_END(bounce_failsafe)
testq %rax,%rax
UNLIKELY_START(z, create_bounce_frame_bad_bounce_ip)
lea
UNLIKELY_DISPATCH_LABEL(create_bounce_frame_bad_bounce_ip)(%rip), %rdi
+ ASM_CLAC
jmp asm_domain_crash_synchronous /* Does not return */
__UNLIKELY_END(create_bounce_frame_bad_bounce_ip)
movq %rax,UREGS_rip+8(%rsp)
+ ASM_CLAC
ret
_ASM_EXTABLE(.Lft2, dom_crash_sync_extable)
_ASM_EXTABLE(.Lft3, dom_crash_sync_extable)
@@ -466,6 +469,7 @@ ENTRY(dom_crash_sync_extable)
leal (%rax,%rax,2),%eax
orb %al,UREGS_cs(%rsp)
xorl %edi,%edi
+ ASM_CLAC
jmp asm_domain_crash_synchronous /* Does not return */
/* No special register assumptions. */
diff --git a/xen/include/asm-x86/uaccess.h b/xen/include/asm-x86/uaccess.h
index 88b4ba2..9cb72fa 100644
--- a/xen/include/asm-x86/uaccess.h
+++ b/xen/include/asm-x86/uaccess.h
@@ -147,8 +147,10 @@ struct __large_struct { unsigned long buf[100]; };
*/
#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
__asm__ __volatile__( \
+ ASM_STAC(%%)"\n" \
"1: mov"itype" %"rtype"1,%2\n" \
"2:\n" \
+ ASM_CLAC(%%)"\n" \
".section .fixup,\"ax\"\n" \
"3: mov %3,%0\n" \
" jmp 2b\n" \
@@ -159,8 +161,10 @@ struct __large_struct { unsigned long buf[100]; };
#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
__asm__ __volatile__( \
+ ASM_STAC(%%)"\n" \
"1: mov"itype" %2,%"rtype"1\n" \
"2:\n" \
+ ASM_CLAC(%%)"\n" \
".section .fixup,\"ax\"\n" \
"3: mov %3,%0\n" \
" xor"itype" %"rtype"1,%"rtype"1\n" \
diff --git a/xen/include/asm-x86/x86_64/system.h
b/xen/include/asm-x86/x86_64/system.h
index 20f038b..1dde8b9 100644
--- a/xen/include/asm-x86/x86_64/system.h
+++ b/xen/include/asm-x86/x86_64/system.h
@@ -13,8 +13,10 @@
*/
#define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \
asm volatile ( \
+ ASM_STAC(%%)"\n" \
"1: lock; cmpxchg"_isuff" %"_oppre"2,%3\n" \
"2:\n" \
+ ASM_CLAC(%%)"\n" \
".section .fixup,\"ax\"\n" \
"3: movl $1,%1\n" \
" jmp 2b\n" \
--
1.8.3.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |