[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 61/65] x86/entry: Make syscall/sysenter entrypoints CET-IBT compatible
Each of MSR_{L,C}STAR and MSR_SYSENTER_EIP need to land on an endbr64 instruction. For sysenter, this is easy. Unfortunately for syscall, the stubs are already 29 byte long with a limit of 32. endbr64 is 4 bytes. Luckily, there is a 1 byte instruction which can move from the stubs into the main handlers. Move the push %rax out of the stub and into {l,c}star_entry(), allowing room for the endbr64 instruction when appropriate. Update the comment describing the entry state. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Roger Pau Monné <roger.pau@xxxxxxxxxx> CC: Wei Liu <wl@xxxxxxx> --- xen/arch/x86/x86_64/entry.S | 18 +++++++++--------- xen/arch/x86/x86_64/traps.c | 13 +++++++++---- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S index 8494b97a54a2..9abcf95bd010 100644 --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -241,18 +241,17 @@ iret_exit_to_guest: * When entering SYSCALL from user mode: * Vector directly to the registered arch.syscall_addr. * - * Initial work is done by per-CPU trampolines. At this point %rsp has been - * initialised to point at the correct Xen stack, %rsp has been saved, and - * %rax needs to be restored from the %ss save slot. All other registers are - * still to be saved onto the stack, starting with RFLAGS, and an appropriate - * %ss must be saved into the space left by the trampoline. + * Initial work is done by per-CPU trampolines. + * - Guest %rax stored in the %ss slot + * - Guest %rsp stored in %rax + * - Xen stack loaded, pointing at the %ss slot */ ENTRY(lstar_enter) #ifdef CONFIG_XEN_SHSTK ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK #endif - /* sti could live here when we don't switch page tables below. */ - movq 8(%rsp),%rax /* Restore %rax. */ + push %rax /* Guest %rsp */ + movq 8(%rsp), %rax /* Restore guest %rax */ movq $FLAT_KERNEL_SS,8(%rsp) pushq %r11 pushq $FLAT_KERNEL_CS64 @@ -288,9 +287,9 @@ ENTRY(cstar_enter) #ifdef CONFIG_XEN_SHSTK ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK #endif - /* sti could live here when we don't switch page tables below. */ + push %rax /* Guest %rsp */ CR4_PV32_RESTORE - movq 8(%rsp), %rax /* Restore %rax. */ + movq 8(%rsp), %rax /* Restore guest %rax. */ movq $FLAT_USER_SS32, 8(%rsp) /* Assume a 64bit domain. Compat handled lower. */ pushq %r11 pushq $FLAT_USER_CS32 @@ -323,6 +322,7 @@ ENTRY(cstar_enter) jmp switch_to_kernel ENTRY(sysenter_entry) + ENDBR64 #ifdef CONFIG_XEN_SHSTK ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK #endif diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c index 6f3c65bedc7a..3b9a332efb99 100644 --- a/xen/arch/x86/x86_64/traps.c +++ b/xen/arch/x86/x86_64/traps.c @@ -295,6 +295,15 @@ static unsigned int write_stub_trampoline( { unsigned char *p = stub; + if ( cpu_has_xen_ibt ) + { + /* endbr64 */ + *p++ = 0xf3; + *p++ = 0x0f; + *p++ = 0x1e; + *p++ = 0xfa; + } + /* Store guest %rax into %ss slot */ /* movabsq %rax, stack_bottom - 8 */ *p++ = 0x48; @@ -315,10 +324,6 @@ static unsigned int write_stub_trampoline( *(uint64_t *)p = stack_bottom - 8; p += 8; - /* Store guest %rsp into %rsp slot */ - /* pushq %rax */ - *p++ = 0x50; - /* jmp target_va */ *p++ = 0xe9; *(int32_t *)p = target_va - (stub_va + (p - stub) + 4); -- 2.11.0
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |