[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [RFC PATCH 05/28] x86: Define the stack protector guard symbol explicitly
On Wed, Sep 25, 2024 at 8:02 AM Ard Biesheuvel <ardb+git@xxxxxxxxxx> wrote: > > From: Ard Biesheuvel <ardb@xxxxxxxxxx> > > Specify the guard symbol for the stack cookie explicitly, rather than > positioning it exactly 40 bytes into the per-CPU area. Doing so removes > the need for the per-CPU region to be absolute rather than relative to > the placement of the per-CPU template region in the kernel image, and > this allows the special handling for absolute per-CPU symbols to be > removed entirely. > > This is a worthwhile cleanup in itself, but it is also a prerequisite > for PIE codegen and PIE linking, which can replace our bespoke and > rather clunky runtime relocation handling. > > Signed-off-by: Ard Biesheuvel <ardb@xxxxxxxxxx> > --- > arch/x86/Makefile | 4 ++++ > arch/x86/include/asm/init.h | 2 +- > arch/x86/include/asm/processor.h | 11 +++-------- > arch/x86/include/asm/stackprotector.h | 4 ---- > tools/perf/util/annotate.c | 4 ++-- > 5 files changed, 10 insertions(+), 15 deletions(-) > > diff --git a/arch/x86/Makefile b/arch/x86/Makefile > index 6b3fe6e2aadd..b78b7623a4a9 100644 > --- a/arch/x86/Makefile > +++ b/arch/x86/Makefile > @@ -193,6 +193,10 @@ else > KBUILD_RUSTFLAGS += -Cno-redzone=y > KBUILD_RUSTFLAGS += -Ccode-model=kernel > > + ifeq ($(CONFIG_STACKPROTECTOR),y) > + KBUILD_CFLAGS += > -mstack-protector-guard-symbol=fixed_percpu_data > + endif > + > # Don't emit relaxable GOTPCREL relocations > KBUILD_AFLAGS_KERNEL += -Wa,-mrelax-relocations=no > KBUILD_CFLAGS_KERNEL += -Wa,-mrelax-relocations=no > diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h > index 14d72727d7ee..3ed0e8ec973f 100644 > --- a/arch/x86/include/asm/init.h > +++ b/arch/x86/include/asm/init.h > @@ -2,7 +2,7 @@ > #ifndef _ASM_X86_INIT_H > #define _ASM_X86_INIT_H > > -#define __head __section(".head.text") > +#define __head __section(".head.text") __no_stack_protector > > struct x86_mapping_info { > void *(*alloc_pgt_page)(void *); /* allocate buf for page table */ > diff --git a/arch/x86/include/asm/processor.h > b/arch/x86/include/asm/processor.h > index 4a686f0e5dbf..56bc36116814 100644 > --- a/arch/x86/include/asm/processor.h > +++ b/arch/x86/include/asm/processor.h > @@ -402,14 +402,9 @@ struct irq_stack { > #ifdef CONFIG_X86_64 > struct fixed_percpu_data { > /* > - * GCC hardcodes the stack canary as %gs:40. Since the > - * irq_stack is the object at %gs:0, we reserve the bottom > - * 48 bytes of the irq stack for the canary. > - * > - * Once we are willing to require -mstack-protector-guard-symbol= > - * support for x86_64 stackprotector, we can get rid of this. > + * Since the irq_stack is the object at %gs:0, the bottom 8 bytes of > + * the irq stack are reserved for the canary. > */ > - char gs_base[40]; > unsigned long stack_canary; > }; > > @@ -418,7 +413,7 @@ DECLARE_INIT_PER_CPU(fixed_percpu_data); > > static inline unsigned long cpu_kernelmode_gs_base(int cpu) > { > - return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu); > + return (unsigned long)&per_cpu(fixed_percpu_data, cpu); > } > > extern asmlinkage void entry_SYSCALL32_ignore(void); > diff --git a/arch/x86/include/asm/stackprotector.h > b/arch/x86/include/asm/stackprotector.h > index 00473a650f51..d1dcd22a0a4c 100644 > --- a/arch/x86/include/asm/stackprotector.h > +++ b/arch/x86/include/asm/stackprotector.h > @@ -51,10 +51,6 @@ static __always_inline void boot_init_stack_canary(void) > { > unsigned long canary = get_random_canary(); > > -#ifdef CONFIG_X86_64 > - BUILD_BUG_ON(offsetof(struct fixed_percpu_data, stack_canary) != 40); > -#endif > - > current->stack_canary = canary; > #ifdef CONFIG_X86_64 > this_cpu_write(fixed_percpu_data.stack_canary, canary); > diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c > index 37ce43c4eb8f..7ecfedf5edb9 100644 > --- a/tools/perf/util/annotate.c > +++ b/tools/perf/util/annotate.c > @@ -2485,10 +2485,10 @@ static bool is_stack_operation(struct arch *arch, > struct disasm_line *dl) > > static bool is_stack_canary(struct arch *arch, struct annotated_op_loc *loc) > { > - /* On x86_64, %gs:40 is used for stack canary */ > + /* On x86_64, %gs:0 is used for stack canary */ > if (arch__is(arch, "x86")) { > if (loc->segment == INSN_SEG_X86_GS && loc->imm && > - loc->offset == 40) > + loc->offset == 0) As a new perf tool can run on old kernels we may need to have this be something like: (loc->offset == 40 /* pre v6.xx kernels */ || loc->offset == 0 /* v6.xx and later */ ) We could make this dependent on the kernel by processing the os_release string: https://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools-next.git/tree/tools/perf/util/env.h#n55 but that could well be more trouble than it is worth. Thanks, Ian > return true; > } > > -- > 2.46.0.792.g87dc391469-goog >
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |