[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging] x86/entry: Adjust guest paths to be shadow stack compatible
commit 43b98e71903845c727028d2e84b90e5905ff8d66 Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> AuthorDate: Fri Apr 24 14:34:44 2020 +0100 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Fri May 29 23:09:46 2020 +0100 x86/entry: Adjust guest paths to be shadow stack compatible The SYSCALL/SYSENTER/SYSRET paths need to use {SET,CLR}SSBSY. The IRET to guest paths must not. In the SYSRET path, re-position the mov which loads rip into %rcx so we can use %rcx for CLRSSBSY, rather than spilling another register to the stack. While we can in principle detect shadow stack corruption and a failure to clear the supervisor token busy bit in the SYSRET path (by inspecting the carry flag following CLRSSBSY), we cannot detect similar problems for the IRET path (IRET is specified not to fault in this case). We will double fault at some point later, when next trying to enter Xen, due to an already-set supervisor shadow stack busy bit. As SYSRET is a uncommon path anyway, avoid the added complexity for no appreciable gain. The IST switch onto the primary stack is not great as we have an instruction boundary with no shadow stack. This is the least bad option available. These paths are not used before shadow stacks are properly established, so can use alternatives to avoid extra runtime CET detection logic. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> --- xen/arch/x86/x86_64/compat/entry.S | 1 + xen/arch/x86/x86_64/entry.S | 36 ++++++++++++++++++++++++++++++++++-- 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S index 3cd375bd48..2ca81341a4 100644 --- a/xen/arch/x86/x86_64/compat/entry.S +++ b/xen/arch/x86/x86_64/compat/entry.S @@ -198,6 +198,7 @@ ENTRY(cr4_pv32_restore) /* See lstar_enter for entry register state. */ ENTRY(cstar_enter) + ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK /* sti could live here when we don't switch page tables below. */ CR4_PV32_RESTORE movq 8(%rsp),%rax /* Restore %rax. */ diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S index e68d680791..6470266d2f 100644 --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -191,9 +191,16 @@ restore_all_guest: sarq $47,%rcx incl %ecx cmpl $1,%ecx - movq 8(%rsp),%rcx # RIP - ja iret_exit_to_guest + ja .Lrestore_rcx_iret_exit_to_guest + /* Clear the supervisor shadow stack token busy bit. */ +.macro rag_clrssbsy + rdsspq %rcx + clrssbsy (%rcx) +.endm + ALTERNATIVE "", rag_clrssbsy, X86_FEATURE_XEN_SHSTK + + movq 8(%rsp), %rcx # RIP cmpw $FLAT_USER_CS32,16(%rsp)# CS movq 32(%rsp),%rsp # RSP je 1f @@ -201,6 +208,8 @@ restore_all_guest: 1: sysretl ALIGN +.Lrestore_rcx_iret_exit_to_guest: + movq 8(%rsp), %rcx # RIP /* No special register assumptions. */ iret_exit_to_guest: andl $~(X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_VM),24(%rsp) @@ -226,6 +235,7 @@ iret_exit_to_guest: * %ss must be saved into the space left by the trampoline. */ ENTRY(lstar_enter) + ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK /* sti could live here when we don't switch page tables below. */ movq 8(%rsp),%rax /* Restore %rax. */ movq $FLAT_KERNEL_SS,8(%rsp) @@ -259,6 +269,7 @@ ENTRY(lstar_enter) jmp test_all_events ENTRY(sysenter_entry) + ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK /* sti could live here when we don't switch page tables below. */ pushq $FLAT_USER_SS pushq $0 @@ -877,6 +888,27 @@ handle_ist_exception: movl $UREGS_kernel_sizeof/8,%ecx movq %rdi,%rsp rep movsq + + /* Switch Shadow Stacks */ +.macro ist_switch_shstk + rdsspq %rdi + clrssbsy (%rdi) + /* + * Switching supervisor shadow stacks is specially hard, as supervisor + * and restore tokens are incompatible. + * + * For now, we only need to switch on to an unused primary shadow + * stack, so use SETSSBSY for the purpose, exactly like the + * SYSCALL/SYSENTER entry. + * + * Ideally, we'd want to CLRSSBSY after switching stacks, but that + * will leave SSP zeroed so it not an option. Instead, we transiently + * have a zero SSP on this instruction boundary, and depend on IST for + * NMI/#MC protection. + */ + setssbsy +.endm + ALTERNATIVE "", ist_switch_shstk, X86_FEATURE_XEN_SHSTK 1: #else ASSERT_CONTEXT_IS_XEN -- generated by git-patchbot for /home/xen/git/xen.git#staging
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |