[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging-4.17] x86/thunk: Build Xen with Return Thunks
commit 3c31a87bcbdf04fda9019d46236436ead53b0c1e Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Mon Apr 7 17:15:17 2025 +0200 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Mon May 12 17:32:24 2025 +0100 x86/thunk: Build Xen with Return Thunks The Indirect Target Selection speculative vulnerability means that indirect branches (including RETs) are unsafe when in the first half of a cacheline. In order to mitigate this, build with return thunks and arrange for __x86_return_thunk to be (mis)aligned in the same manner as __x86_indirect_thunk_* so the RET instruction is placed in a safe location. place_ret() needs to conditionally emit JMP __x86_return_thunk instead of RET. This is part of XSA-469 / CVE-2024-28956 Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> (cherry picked from commit afcb4a06c740f7f71d2e9746c9d147c38a6e6c90) --- xen/arch/x86/Kconfig | 5 +++++ xen/arch/x86/Makefile | 1 + xen/arch/x86/acpi/wakeup_prot.S | 2 +- xen/arch/x86/alternative.c | 31 ++++++++++++++++++++++++++++++- xen/arch/x86/arch.mk | 3 +++ xen/arch/x86/bhb-thunk.S | 2 +- xen/arch/x86/clear_page.S | 3 ++- xen/arch/x86/copy_page.S | 3 ++- xen/arch/x86/efi/check.c | 3 +++ xen/arch/x86/include/asm/asm-defns.h | 6 ++++++ xen/arch/x86/indirect-thunk.S | 27 +++++++++++++++++++++++++++ xen/arch/x86/pv/emul-priv-op.c | 2 +- xen/arch/x86/pv/gpr_switch.S | 4 ++-- xen/arch/x86/spec_ctrl.c | 3 +++ xen/arch/x86/x86_64/compat/entry.S | 6 +++--- xen/arch/x86/x86_64/entry.S | 2 +- xen/arch/x86/xen.lds.S | 1 + xen/common/Kconfig | 11 +++++++++++ 18 files changed, 103 insertions(+), 12 deletions(-) diff --git a/xen/arch/x86/Kconfig b/xen/arch/x86/Kconfig index 471cfd8a80..370558756f 100644 --- a/xen/arch/x86/Kconfig +++ b/xen/arch/x86/Kconfig @@ -35,9 +35,14 @@ config ARCH_DEFCONFIG default "arch/x86/configs/x86_64_defconfig" config CC_HAS_INDIRECT_THUNK + # GCC >= 8 or Clang >= 6 def_bool $(cc-option,-mindirect-branch-register) || \ $(cc-option,-mretpoline-external-thunk) +config CC_HAS_RETURN_THUNK + # GCC >= 8 or Clang >= 15 + def_bool $(cc-option,-mfunction-return=thunk-extern) + config HAS_AS_CET_SS # binutils >= 2.29 or LLVM >= 6 def_bool $(as-instr,wrssq %rax$(comma)0;setssbsy) diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile index 882408ccaf..22293d969b 100644 --- a/xen/arch/x86/Makefile +++ b/xen/arch/x86/Makefile @@ -42,6 +42,7 @@ obj-$(CONFIG_LIVEPATCH) += livepatch.o obj-y += msi.o obj-y += msr.o obj-$(CONFIG_INDIRECT_THUNK) += indirect-thunk.o +obj-$(CONFIG_RETURN_THUNK) += indirect-thunk.o obj-$(CONFIG_PV) += ioport_emulate.o obj-y += irq.o obj-$(CONFIG_KEXEC) += machine_kexec.o diff --git a/xen/arch/x86/acpi/wakeup_prot.S b/xen/arch/x86/acpi/wakeup_prot.S index 3855ff1ddb..cd4682b9f9 100644 --- a/xen/arch/x86/acpi/wakeup_prot.S +++ b/xen/arch/x86/acpi/wakeup_prot.S @@ -131,7 +131,7 @@ ENTRY(s3_resume) pop %r12 pop %rbx pop %rbp - ret + RET .data .align 16 diff --git a/xen/arch/x86/alternative.c b/xen/arch/x86/alternative.c index 88082f68a9..151df73583 100644 --- a/xen/arch/x86/alternative.c +++ b/xen/arch/x86/alternative.c @@ -149,16 +149,45 @@ void init_or_livepatch add_nops(void *insns, unsigned int len) } } +void nocall __x86_return_thunk(void); + /* * Place a return at @ptr. @ptr must be in the writable alias of a stub. * + * When CONFIG_RETURN_THUNK is active, this may be a JMP __x86_return_thunk + * instead, depending on the safety of @ptr with respect to Indirect Target + * Selection. + * * Returns the next position to write into the stub. */ void *place_ret(void *ptr) { + unsigned long addr = (unsigned long)ptr; uint8_t *p = ptr; - *p++ = 0xc3; + /* + * When Return Thunks are used, if a RET would be unsafe at this location + * with respect to Indirect Target Selection (i.e. if addr is in the first + * half of a cacheline), insert a JMP __x86_return_thunk instead. + * + * The displacement needs to be relative to the executable alias of the + * stub, not to @ptr which is the writeable alias. + */ + if ( IS_ENABLED(CONFIG_RETURN_THUNK) && !(addr & 0x20) ) + { + long stub_va = (this_cpu(stubs.addr) & PAGE_MASK) + (addr & ~PAGE_MASK); + long disp = (long)__x86_return_thunk - (stub_va + 5); + + BUG_ON((int32_t)disp != disp); + + *p++ = 0xe9; + *(int32_t *)p = disp; + p += 4; + } + else + { + *p++ = 0xc3; + } return p; } diff --git a/xen/arch/x86/arch.mk b/xen/arch/x86/arch.mk index 227d439a45..379317c013 100644 --- a/xen/arch/x86/arch.mk +++ b/xen/arch/x86/arch.mk @@ -50,6 +50,9 @@ CFLAGS-$(CONFIG_CC_IS_GCC) += -fno-jump-tables CFLAGS-$(CONFIG_CC_IS_CLANG) += -mretpoline-external-thunk endif +# Compile with return thunk support if selected. +CFLAGS-$(CONFIG_RETURN_THUNK) += -mfunction-return=thunk-extern + ifdef CONFIG_XEN_IBT # Force -fno-jump-tables to work around # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104816 diff --git a/xen/arch/x86/bhb-thunk.S b/xen/arch/x86/bhb-thunk.S index 05f1043df7..472da481dd 100644 --- a/xen/arch/x86/bhb-thunk.S +++ b/xen/arch/x86/bhb-thunk.S @@ -23,7 +23,7 @@ ENTRY(clear_bhb_tsx) 0: .byte 0xc6, 0xf8, 0 /* xabort $0 */ int3 1: - ret + RET .size clear_bhb_tsx, . - clear_bhb_tsx .type clear_bhb_tsx, @function diff --git a/xen/arch/x86/clear_page.S b/xen/arch/x86/clear_page.S index d9d524c79e..ea70bd9167 100644 --- a/xen/arch/x86/clear_page.S +++ b/xen/arch/x86/clear_page.S @@ -1,5 +1,6 @@ .file __FILE__ +#include <asm/asm_defns.h> #include <asm/page.h> ENTRY(clear_page_sse2) @@ -15,4 +16,4 @@ ENTRY(clear_page_sse2) jnz 0b sfence - ret + RET diff --git a/xen/arch/x86/copy_page.S b/xen/arch/x86/copy_page.S index 2da81126c5..bb79c5fc79 100644 --- a/xen/arch/x86/copy_page.S +++ b/xen/arch/x86/copy_page.S @@ -1,5 +1,6 @@ .file __FILE__ +#include <asm/asm_defns.h> #include <asm/page.h> #define src_reg %rsi @@ -40,4 +41,4 @@ ENTRY(copy_page_sse2) movnti tmp4_reg, 3*WORD_SIZE(dst_reg) sfence - ret + RET diff --git a/xen/arch/x86/efi/check.c b/xen/arch/x86/efi/check.c index 9e473faad3..23ba30abf3 100644 --- a/xen/arch/x86/efi/check.c +++ b/xen/arch/x86/efi/check.c @@ -3,6 +3,9 @@ int __attribute__((__ms_abi__)) test(int i) return i; } +/* In case -mfunction-return is in use. */ +void __x86_return_thunk(void) {}; + /* * Populate an array with "addresses" of relocatable and absolute values. * This is to probe ld for (a) emitting base relocations at all and (b) not diff --git a/xen/arch/x86/include/asm/asm-defns.h b/xen/arch/x86/include/asm/asm-defns.h index 7e22fcb9c0..a9ca0d05ec 100644 --- a/xen/arch/x86/include/asm/asm-defns.h +++ b/xen/arch/x86/include/asm/asm-defns.h @@ -47,6 +47,12 @@ .endif .endm +#ifdef CONFIG_RETURN_THUNK +# define RET jmp __x86_return_thunk +#else +# define RET ret +#endif + #ifdef CONFIG_XEN_IBT # define ENDBR64 endbr64 #else diff --git a/xen/arch/x86/indirect-thunk.S b/xen/arch/x86/indirect-thunk.S index e7ef104d3b..239cf7dc77 100644 --- a/xen/arch/x86/indirect-thunk.S +++ b/xen/arch/x86/indirect-thunk.S @@ -11,6 +11,9 @@ #include <asm/asm_defns.h> + +#ifdef CONFIG_INDIRECT_THUNK + .macro IND_THUNK_RETPOLINE reg:req call 1f int3 @@ -60,3 +63,27 @@ ENTRY(__x86_indirect_thunk_\reg) .irp reg, ax, cx, dx, bx, bp, si, di, 8, 9, 10, 11, 12, 13, 14, 15 GEN_INDIRECT_THUNK reg=r\reg .endr + +#endif /* CONFIG_INDIRECT_THUNK */ + +#ifdef CONFIG_RETURN_THUNK + .section .text.entry.__x86_return_thunk, "ax", @progbits + + /* + * The Indirect Target Selection speculative vulnerability means that + * indirect branches (including RETs) are unsafe when in the first + * half of a cacheline. Arrange for them to be in the second half. + * + * Align to 64, then skip 32. + */ + .balign 64 + .fill 32, 1, 0xcc + +ENTRY(__x86_return_thunk) + ret + int3 /* Halt straight-line speculation */ + + .size __x86_return_thunk, . - __x86_return_thunk + .type __x86_return_thunk, @function + +#endif /* CONFIG_RETURN_THUNK */ diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c index 28ea7cc580..872a89db76 100644 --- a/xen/arch/x86/pv/emul-priv-op.c +++ b/xen/arch/x86/pv/emul-priv-op.c @@ -143,7 +143,7 @@ static io_emul_stub_t *io_emul_stub_setup(struct priv_op_ctxt *ctxt, u8 opcode, BUILD_BUG_ON(STUB_BUF_SIZE / 2 < (sizeof(prologue) + sizeof(epilogue) + 10 /* 2x call */ + MAX(3 /* default stub */, IOEMUL_QUIRK_STUB_BYTES) + - 1 /* ret */)); + (IS_ENABLED(CONFIG_RETURN_THUNK) ? 5 : 1) /* ret */)); /* Runtime confirmation that we haven't clobbered an adjacent stub. */ BUG_ON(STUB_BUF_SIZE / 2 < (p - ctxt->io_emul_stub)); diff --git a/xen/arch/x86/pv/gpr_switch.S b/xen/arch/x86/pv/gpr_switch.S index e7f5bfcd2d..d90435be88 100644 --- a/xen/arch/x86/pv/gpr_switch.S +++ b/xen/arch/x86/pv/gpr_switch.S @@ -26,7 +26,7 @@ ENTRY(load_guest_gprs) movq UREGS_r15(%rdi), %r15 movq UREGS_rcx(%rdi), %rcx movq UREGS_rdi(%rdi), %rdi - ret + RET .size load_guest_gprs, . - load_guest_gprs .type load_guest_gprs, STT_FUNC @@ -51,7 +51,7 @@ ENTRY(save_guest_gprs) movq %rbx, UREGS_rbx(%rdi) movq %rdx, UREGS_rdx(%rdi) movq %rcx, UREGS_rcx(%rdi) - ret + RET .size save_guest_gprs, . - save_guest_gprs .type save_guest_gprs, STT_FUNC diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c index b1e47a849e..2f777e8a7e 100644 --- a/xen/arch/x86/spec_ctrl.c +++ b/xen/arch/x86/spec_ctrl.c @@ -575,6 +575,9 @@ static void __init print_details(enum ind_thunk thunk) #ifdef CONFIG_INDIRECT_THUNK " INDIRECT_THUNK" #endif +#ifdef CONFIG_RETURN_THUNK + " RETURN_THUNK" +#endif #ifdef CONFIG_SHADOW_PAGING " SHADOW_PAGING" #endif diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S index ff462a92e0..57e54dc75f 100644 --- a/xen/arch/x86/x86_64/compat/entry.S +++ b/xen/arch/x86/x86_64/compat/entry.S @@ -183,7 +183,7 @@ ENTRY(cr4_pv32_restore) mov %rax, %cr4 mov %rax, (%rdx) pop %rdx - ret + RET 0: #ifndef NDEBUG /* Check that _all_ of the bits intended to be set actually are. */ @@ -202,7 +202,7 @@ ENTRY(cr4_pv32_restore) #endif pop %rdx xor %eax, %eax - ret + RET ENTRY(compat_syscall) /* Fix up reported %cs/%ss for compat domains. */ @@ -329,7 +329,7 @@ __UNLIKELY_END(compat_bounce_null_selector) xor %eax, %eax mov %ax, TRAPBOUNCE_cs(%rdx) mov %al, TRAPBOUNCE_flags(%rdx) - ret + RET .section .fixup,"ax" .Lfx13: diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S index 7bb0cc708a..fd63ee2e4c 100644 --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -598,7 +598,7 @@ __UNLIKELY_END(create_bounce_frame_bad_bounce_ip) xor %eax, %eax mov %rax, TRAPBOUNCE_eip(%rdx) mov %al, TRAPBOUNCE_flags(%rdx) - ret + RET .pushsection .fixup, "ax", @progbits # Numeric tags below represent the intended overall %rsi adjustment. diff --git a/xen/arch/x86/xen.lds.S b/xen/arch/x86/xen.lds.S index 8930e14fc4..b66e708ebf 100644 --- a/xen/arch/x86/xen.lds.S +++ b/xen/arch/x86/xen.lds.S @@ -86,6 +86,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); _stextentry = .; *(.text.entry) + *(.text.entry.*) . = ALIGN(PAGE_SIZE); _etextentry = .; diff --git a/xen/common/Kconfig b/xen/common/Kconfig index cd73851538..c82bee92f4 100644 --- a/xen/common/Kconfig +++ b/xen/common/Kconfig @@ -112,6 +112,17 @@ config INDIRECT_THUNK When enabled, indirect branches are implemented using a new construct called "retpoline" that prevents speculation. +config RETURN_THUNK + bool "Out-of-line Returns" + depends on CC_HAS_RETURN_THUNK + default INDIRECT_THUNK + help + Compile Xen with out-of-line returns. + + This allows Xen to mitigate a variety of speculative vulnerabilities + by choosing a hardware-dependent instruction sequence to implement + function returns safely. + config SPECULATIVE_HARDEN_ARRAY bool "Speculative Array Hardening" default y -- generated by git-patchbot for /home/xen/git/xen.git#staging-4.17
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |