[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen stable-4.18] x86/thunk: Build Xen with Return Thunks
commit cf4c900be3852d08081603c15a03b34122d3426a Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Mon Apr 7 17:15:17 2025 +0200 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Mon May 12 17:30:56 2025 +0100 x86/thunk: Build Xen with Return Thunks The Indirect Target Selection speculative vulnerability means that indirect branches (including RETs) are unsafe when in the first half of a cacheline. In order to mitigate this, build with return thunks and arrange for __x86_return_thunk to be (mis)aligned in the same manner as __x86_indirect_thunk_* so the RET instruction is placed in a safe location. place_ret() needs to conditionally emit JMP __x86_return_thunk instead of RET. This is part of XSA-469 / CVE-2024-28956 Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> (cherry picked from commit afcb4a06c740f7f71d2e9746c9d147c38a6e6c90) --- xen/arch/x86/Kconfig | 5 +++++ xen/arch/x86/Makefile | 1 + xen/arch/x86/acpi/wakeup_prot.S | 2 +- xen/arch/x86/alternative.c | 31 ++++++++++++++++++++++++++++++- xen/arch/x86/arch.mk | 3 +++ xen/arch/x86/bhb-thunk.S | 2 +- xen/arch/x86/clear_page.S | 3 ++- xen/arch/x86/copy_page.S | 3 ++- xen/arch/x86/efi/check.c | 3 +++ xen/arch/x86/include/asm/asm-defns.h | 6 ++++++ xen/arch/x86/indirect-thunk.S | 27 +++++++++++++++++++++++++++ xen/arch/x86/pv/emul-priv-op.c | 2 +- xen/arch/x86/pv/gpr_switch.S | 5 ++--- xen/arch/x86/spec_ctrl.c | 3 +++ xen/arch/x86/x86_64/compat/entry.S | 6 +++--- xen/arch/x86/x86_64/entry.S | 2 +- xen/arch/x86/xen.lds.S | 1 + xen/common/Kconfig | 11 +++++++++++ 18 files changed, 103 insertions(+), 13 deletions(-) diff --git a/xen/arch/x86/Kconfig b/xen/arch/x86/Kconfig index 1acdffc51c..9611b80761 100644 --- a/xen/arch/x86/Kconfig +++ b/xen/arch/x86/Kconfig @@ -35,9 +35,14 @@ config ARCH_DEFCONFIG default "arch/x86/configs/x86_64_defconfig" config CC_HAS_INDIRECT_THUNK + # GCC >= 8 or Clang >= 6 def_bool $(cc-option,-mindirect-branch-register) || \ $(cc-option,-mretpoline-external-thunk) +config CC_HAS_RETURN_THUNK + # GCC >= 8 or Clang >= 15 + def_bool $(cc-option,-mfunction-return=thunk-extern) + config HAS_AS_CET_SS # binutils >= 2.29 or LLVM >= 6 def_bool $(as-instr,wrssq %rax$(comma)0;setssbsy) diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile index 7df877a09d..fc984d629e 100644 --- a/xen/arch/x86/Makefile +++ b/xen/arch/x86/Makefile @@ -44,6 +44,7 @@ obj-$(CONFIG_LIVEPATCH) += livepatch.o obj-y += msi.o obj-y += msr.o obj-$(CONFIG_INDIRECT_THUNK) += indirect-thunk.o +obj-$(CONFIG_RETURN_THUNK) += indirect-thunk.o obj-$(CONFIG_PV) += ioport_emulate.o obj-y += irq.o obj-$(CONFIG_KEXEC) += machine_kexec.o diff --git a/xen/arch/x86/acpi/wakeup_prot.S b/xen/arch/x86/acpi/wakeup_prot.S index 66f7993399..97bd676aae 100644 --- a/xen/arch/x86/acpi/wakeup_prot.S +++ b/xen/arch/x86/acpi/wakeup_prot.S @@ -133,7 +133,7 @@ ENTRY(s3_resume) pop %r12 pop %rbx pop %rbp - ret + RET .data .align 16 diff --git a/xen/arch/x86/alternative.c b/xen/arch/x86/alternative.c index be19577234..ccb9985a76 100644 --- a/xen/arch/x86/alternative.c +++ b/xen/arch/x86/alternative.c @@ -137,16 +137,45 @@ void init_or_livepatch add_nops(void *insns, unsigned int len) } } +void nocall __x86_return_thunk(void); + /* * Place a return at @ptr. @ptr must be in the writable alias of a stub. * + * When CONFIG_RETURN_THUNK is active, this may be a JMP __x86_return_thunk + * instead, depending on the safety of @ptr with respect to Indirect Target + * Selection. + * * Returns the next position to write into the stub. */ void *place_ret(void *ptr) { + unsigned long addr = (unsigned long)ptr; uint8_t *p = ptr; - *p++ = 0xc3; + /* + * When Return Thunks are used, if a RET would be unsafe at this location + * with respect to Indirect Target Selection (i.e. if addr is in the first + * half of a cacheline), insert a JMP __x86_return_thunk instead. + * + * The displacement needs to be relative to the executable alias of the + * stub, not to @ptr which is the writeable alias. + */ + if ( IS_ENABLED(CONFIG_RETURN_THUNK) && !(addr & 0x20) ) + { + long stub_va = (this_cpu(stubs.addr) & PAGE_MASK) + (addr & ~PAGE_MASK); + long disp = (long)__x86_return_thunk - (stub_va + 5); + + BUG_ON((int32_t)disp != disp); + + *p++ = 0xe9; + *(int32_t *)p = disp; + p += 4; + } + else + { + *p++ = 0xc3; + } return p; } diff --git a/xen/arch/x86/arch.mk b/xen/arch/x86/arch.mk index 28217c9ace..21a753b639 100644 --- a/xen/arch/x86/arch.mk +++ b/xen/arch/x86/arch.mk @@ -46,6 +46,9 @@ CFLAGS-$(CONFIG_CC_IS_GCC) += -fno-jump-tables CFLAGS-$(CONFIG_CC_IS_CLANG) += -mretpoline-external-thunk endif +# Compile with return thunk support if selected. +CFLAGS-$(CONFIG_RETURN_THUNK) += -mfunction-return=thunk-extern + # Disable the addition of a .note.gnu.property section to object files when # livepatch support is enabled. The contents of that section can change # depending on the instructions used, and livepatch-build-tools doesn't know diff --git a/xen/arch/x86/bhb-thunk.S b/xen/arch/x86/bhb-thunk.S index 05f1043df7..472da481dd 100644 --- a/xen/arch/x86/bhb-thunk.S +++ b/xen/arch/x86/bhb-thunk.S @@ -23,7 +23,7 @@ ENTRY(clear_bhb_tsx) 0: .byte 0xc6, 0xf8, 0 /* xabort $0 */ int3 1: - ret + RET .size clear_bhb_tsx, . - clear_bhb_tsx .type clear_bhb_tsx, @function diff --git a/xen/arch/x86/clear_page.S b/xen/arch/x86/clear_page.S index 5b5622cc52..fd4c8c0b2a 100644 --- a/xen/arch/x86/clear_page.S +++ b/xen/arch/x86/clear_page.S @@ -1,5 +1,6 @@ .file __FILE__ +#include <asm/asm_defns.h> #include <asm/page.h> ENTRY(clear_page_sse2) @@ -15,7 +16,7 @@ ENTRY(clear_page_sse2) jnz 0b sfence - ret + RET .type clear_page_sse2, @function .size clear_page_sse2, . - clear_page_sse2 diff --git a/xen/arch/x86/copy_page.S b/xen/arch/x86/copy_page.S index ddb6e0ebbb..184127c11d 100644 --- a/xen/arch/x86/copy_page.S +++ b/xen/arch/x86/copy_page.S @@ -1,5 +1,6 @@ .file __FILE__ +#include <asm/asm_defns.h> #include <asm/page.h> #define src_reg %rsi @@ -40,7 +41,7 @@ ENTRY(copy_page_sse2) movnti tmp4_reg, 3*WORD_SIZE(dst_reg) sfence - ret + RET .type copy_page_sse2, @function .size copy_page_sse2, . - copy_page_sse2 diff --git a/xen/arch/x86/efi/check.c b/xen/arch/x86/efi/check.c index 9e473faad3..23ba30abf3 100644 --- a/xen/arch/x86/efi/check.c +++ b/xen/arch/x86/efi/check.c @@ -3,6 +3,9 @@ int __attribute__((__ms_abi__)) test(int i) return i; } +/* In case -mfunction-return is in use. */ +void __x86_return_thunk(void) {}; + /* * Populate an array with "addresses" of relocatable and absolute values. * This is to probe ld for (a) emitting base relocations at all and (b) not diff --git a/xen/arch/x86/include/asm/asm-defns.h b/xen/arch/x86/include/asm/asm-defns.h index 32d6b44910..97ebe21298 100644 --- a/xen/arch/x86/include/asm/asm-defns.h +++ b/xen/arch/x86/include/asm/asm-defns.h @@ -58,6 +58,12 @@ .endif .endm +#ifdef CONFIG_RETURN_THUNK +# define RET jmp __x86_return_thunk +#else +# define RET ret +#endif + #ifdef CONFIG_XEN_IBT # define ENDBR64 endbr64 #else diff --git a/xen/arch/x86/indirect-thunk.S b/xen/arch/x86/indirect-thunk.S index e7ef104d3b..239cf7dc77 100644 --- a/xen/arch/x86/indirect-thunk.S +++ b/xen/arch/x86/indirect-thunk.S @@ -11,6 +11,9 @@ #include <asm/asm_defns.h> + +#ifdef CONFIG_INDIRECT_THUNK + .macro IND_THUNK_RETPOLINE reg:req call 1f int3 @@ -60,3 +63,27 @@ ENTRY(__x86_indirect_thunk_\reg) .irp reg, ax, cx, dx, bx, bp, si, di, 8, 9, 10, 11, 12, 13, 14, 15 GEN_INDIRECT_THUNK reg=r\reg .endr + +#endif /* CONFIG_INDIRECT_THUNK */ + +#ifdef CONFIG_RETURN_THUNK + .section .text.entry.__x86_return_thunk, "ax", @progbits + + /* + * The Indirect Target Selection speculative vulnerability means that + * indirect branches (including RETs) are unsafe when in the first + * half of a cacheline. Arrange for them to be in the second half. + * + * Align to 64, then skip 32. + */ + .balign 64 + .fill 32, 1, 0xcc + +ENTRY(__x86_return_thunk) + ret + int3 /* Halt straight-line speculation */ + + .size __x86_return_thunk, . - __x86_return_thunk + .type __x86_return_thunk, @function + +#endif /* CONFIG_RETURN_THUNK */ diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c index ff5d1c9f86..295d847ea2 100644 --- a/xen/arch/x86/pv/emul-priv-op.c +++ b/xen/arch/x86/pv/emul-priv-op.c @@ -131,7 +131,7 @@ static io_emul_stub_t *io_emul_stub_setup(struct priv_op_ctxt *ctxt, u8 opcode, BUILD_BUG_ON(STUB_BUF_SIZE / 2 < (sizeof(prologue) + sizeof(epilogue) + 10 /* 2x call */ + MAX(3 /* default stub */, IOEMUL_QUIRK_STUB_BYTES) + - 1 /* ret */)); + (IS_ENABLED(CONFIG_RETURN_THUNK) ? 5 : 1) /* ret */)); /* Runtime confirmation that we haven't clobbered an adjacent stub. */ BUG_ON(STUB_BUF_SIZE / 2 < (p - ctxt->io_emul_stub)); diff --git a/xen/arch/x86/pv/gpr_switch.S b/xen/arch/x86/pv/gpr_switch.S index e7f5bfcd2d..bf830a78f8 100644 --- a/xen/arch/x86/pv/gpr_switch.S +++ b/xen/arch/x86/pv/gpr_switch.S @@ -26,12 +26,11 @@ ENTRY(load_guest_gprs) movq UREGS_r15(%rdi), %r15 movq UREGS_rcx(%rdi), %rcx movq UREGS_rdi(%rdi), %rdi - ret + RET .size load_guest_gprs, . - load_guest_gprs .type load_guest_gprs, STT_FUNC - /* Save guest GPRs. Parameter on the stack above the return address. */ ENTRY(save_guest_gprs) pushq %rdi @@ -51,7 +50,7 @@ ENTRY(save_guest_gprs) movq %rbx, UREGS_rbx(%rdi) movq %rdx, UREGS_rdx(%rdi) movq %rcx, UREGS_rcx(%rdi) - ret + RET .size save_guest_gprs, . - save_guest_gprs .type save_guest_gprs, STT_FUNC diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c index 51a66a144e..8daa28e1ea 100644 --- a/xen/arch/x86/spec_ctrl.c +++ b/xen/arch/x86/spec_ctrl.c @@ -569,6 +569,9 @@ static void __init print_details(enum ind_thunk thunk) #ifdef CONFIG_INDIRECT_THUNK " INDIRECT_THUNK" #endif +#ifdef CONFIG_RETURN_THUNK + " RETURN_THUNK" +#endif #ifdef CONFIG_SHADOW_PAGING " SHADOW_PAGING" #endif diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S index 3c544e9a14..dfa1152b60 100644 --- a/xen/arch/x86/x86_64/compat/entry.S +++ b/xen/arch/x86/x86_64/compat/entry.S @@ -183,7 +183,7 @@ ENTRY(cr4_pv32_restore) mov %rax, %cr4 mov %rax, (%rdx) pop %rdx - ret + RET 0: #ifndef NDEBUG /* Check that _all_ of the bits intended to be set actually are. */ @@ -202,7 +202,7 @@ ENTRY(cr4_pv32_restore) #endif pop %rdx xor %eax, %eax - ret + RET ENTRY(compat_syscall) /* Fix up reported %cs/%ss for compat domains. */ @@ -329,7 +329,7 @@ __UNLIKELY_END(compat_bounce_null_selector) xor %eax, %eax mov %ax, TRAPBOUNCE_cs(%rdx) mov %al, TRAPBOUNCE_flags(%rdx) - ret + RET .section .fixup,"ax" .Lfx13: diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S index df3f3b4ea7..ccf058ae55 100644 --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -598,7 +598,7 @@ __UNLIKELY_END(create_bounce_frame_bad_bounce_ip) xor %eax, %eax mov %rax, TRAPBOUNCE_eip(%rdx) mov %al, TRAPBOUNCE_flags(%rdx) - ret + RET .pushsection .fixup, "ax", @progbits # Numeric tags below represent the intended overall %rsi adjustment. diff --git a/xen/arch/x86/xen.lds.S b/xen/arch/x86/xen.lds.S index 8930e14fc4..b66e708ebf 100644 --- a/xen/arch/x86/xen.lds.S +++ b/xen/arch/x86/xen.lds.S @@ -86,6 +86,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); _stextentry = .; *(.text.entry) + *(.text.entry.*) . = ALIGN(PAGE_SIZE); _etextentry = .; diff --git a/xen/common/Kconfig b/xen/common/Kconfig index 3361a6d892..2c103c2c47 100644 --- a/xen/common/Kconfig +++ b/xen/common/Kconfig @@ -127,6 +127,17 @@ config INDIRECT_THUNK When enabled, indirect branches are implemented using a new construct called "retpoline" that prevents speculation. +config RETURN_THUNK + bool "Out-of-line Returns" + depends on CC_HAS_RETURN_THUNK + default INDIRECT_THUNK + help + Compile Xen with out-of-line returns. + + This allows Xen to mitigate a variety of speculative vulnerabilities + by choosing a hardware-dependent instruction sequence to implement + function returns safely. + config SPECULATIVE_HARDEN_ARRAY bool "Speculative Array Hardening" default y -- generated by git-patchbot for /home/xen/git/xen.git#stable-4.18
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |