[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 2/4] x86/altcall: Optimise away endbr64 instruction where possible
With altcall, we convert indirect branches into direct ones. With that complete, none of the potential targets need an endbr64 instruction. Furthermore, removing the endbr64 instructions is a security defence-in-depth improvement, because it limits the options available to an attacker who has managed to hijack a function pointer. Introduce a new .init.data.cf_clobber section. Have _apply_alternatives() walk over the entire section, looking for any pointers into .text, and clobber an endbr64 instruction if found. This is some minor structure (ab)use but it works alarmingly well. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Roger Pau Monné <roger.pau@xxxxxxxxxx> CC: Wei Liu <wl@xxxxxxx> It would be nice for the printk() to say "optimised away %u of %u", but the latter number can only feasibly come from post-processing of xen-syms during the build. --- xen/arch/x86/alternative.c | 38 ++++++++++++++++++++++++++++++++++++++ xen/arch/x86/xen.lds.S | 5 +++++ xen/include/xen/init.h | 2 ++ 3 files changed, 45 insertions(+) diff --git a/xen/arch/x86/alternative.c b/xen/arch/x86/alternative.c index 5ae4c80d5119..65fc8534b97f 100644 --- a/xen/arch/x86/alternative.c +++ b/xen/arch/x86/alternative.c @@ -173,6 +173,9 @@ text_poke(void *addr, const void *opcode, size_t len) return memcpy(addr, opcode, len); } +extern unsigned long __initdata_cf_clobber_start[]; +extern unsigned long __initdata_cf_clobber_end[]; + /* * Replace instructions with better alternatives for this CPU type. * This runs before SMP is initialized to avoid SMP problems with @@ -329,6 +332,41 @@ static void init_or_livepatch _apply_alternatives(struct alt_instr *start, add_nops(buf + a->repl_len, total_len - a->repl_len); text_poke(orig, buf, total_len); } + + /* + * Clobber endbr64 instructions now that altcall has finished optimised + * all indirect branches to direct ones. + */ + if ( force && cpu_has_xen_ibt ) + { + unsigned long *val; + unsigned int clobbered = 0; + + /* + * This is some minor structure (ab)use. We walk the entire contents + * of .init.data.cf_clobber as if it were an array of pointers. + * + * If the pointer points into .text, and has an endbr64 instruction, + * nop out the endbr64. This causes the pointer to no longer be a + * legal indirect branch target under CET-IBT. This is a + * defence-in-depth measure, to reduce the options available to an + * adversary who has managed to hijack a function pointer. + */ + for ( val = __initdata_cf_clobber_start; + val < __initdata_cf_clobber_end; + val++ ) + { + void *ptr = (void *)*val; + + if ( !is_kernel_text(ptr) || !is_endbr64(ptr) ) + continue; + + add_nops(ptr, 4); + clobbered++; + } + + printk("altcall: Optimised away %u endbr64 instructions\n", clobbered); + } } void init_or_livepatch apply_alternatives(struct alt_instr *start, diff --git a/xen/arch/x86/xen.lds.S b/xen/arch/x86/xen.lds.S index 87e344d4dd97..5b16a98e4df1 100644 --- a/xen/arch/x86/xen.lds.S +++ b/xen/arch/x86/xen.lds.S @@ -214,6 +214,11 @@ SECTIONS *(.initcall1.init) __initcall_end = .; + . = ALIGN(POINTER_ALIGN); + __initdata_cf_clobber_start = .; + *(.init.data.cf_clobber) + __initdata_cf_clobber_end = .; + *(.init.data) *(.init.data.rel) *(.init.data.rel.*) diff --git a/xen/include/xen/init.h b/xen/include/xen/init.h index bfe789e93f6b..66b324892a52 100644 --- a/xen/include/xen/init.h +++ b/xen/include/xen/init.h @@ -18,6 +18,8 @@ #define __init_call(lvl) __used_section(".initcall" lvl ".init") #define __exit_call __used_section(".exitcall.exit") +#define __initdata_cf_clobber __section(".init.data.cf_clobber") + /* These macros are used to mark some functions or * initialized data (doesn't apply to uninitialized data) * as `initialization' functions. The kernel can take this -- 2.11.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |