Re: [PATCH v4 08/16] x86/stackprotector/64: Convert to normal percpu variable

From: Uros Bizjak
Date: Sat Mar 23 2024 - 13:11:58 EST


On Fri, Mar 22, 2024 at 5:52 PM Brian Gerst <brgerst@xxxxxxxxx> wrote:
>
> Older versions of GCC fixed the location of the stack protector canary
> at %gs:40. This constraint forced the percpu section to be linked at
> virtual address 0 so that the canary could be the first data object in
> the percpu section. Supporting the zero-based percpu section requires
> additional code to handle relocations for RIP-relative references to
> percpu data, extra complexity to kallsyms, and workarounds for linker
> bugs due to the use of absolute symbols.
>
> Use compiler options to redefine the stack protector location if
> supported, otherwise use objtool. This will remove the contraint that
> the percpu section must be zero-based.
>
> Signed-off-by: Brian Gerst <brgerst@xxxxxxxxx>

HAVE_STACKPROTECTOR change is not needed without STACKPROTECTOR_OBJTOOL support.

With the above remark:

Reviewed-by: Uros Bizjak <ubizjak@xxxxxxxxx>

> ---
> arch/x86/Kconfig | 11 ++++----
> arch/x86/Makefile | 21 ++++++++++------
> arch/x86/entry/entry_64.S | 2 +-
> arch/x86/include/asm/processor.h | 16 ++----------
> arch/x86/include/asm/stackprotector.h | 36 ++++-----------------------
> arch/x86/kernel/asm-offsets_64.c | 6 -----
> arch/x86/kernel/cpu/common.c | 5 +---
> arch/x86/kernel/head_64.S | 3 +--
> arch/x86/xen/xen-head.S | 3 +--
> 9 files changed, 30 insertions(+), 73 deletions(-)
>
> diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
> index 121cfb9ffc0e..3dbefdb8a5d6 100644
> --- a/arch/x86/Kconfig
> +++ b/arch/x86/Kconfig
> @@ -271,7 +271,7 @@ config X86
> select HAVE_FUNCTION_ARG_ACCESS_API
> select HAVE_SETUP_PER_CPU_AREA
> select HAVE_SOFTIRQ_ON_OWN_STACK
> - select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR
> + select HAVE_STACKPROTECTOR if X86_64 || CC_HAS_SANE_STACKPROTECTOR
> select HAVE_STACK_VALIDATION if HAVE_OBJTOOL
> select HAVE_STATIC_CALL
> select HAVE_STATIC_CALL_INLINE if HAVE_OBJTOOL
> @@ -411,15 +411,14 @@ config PGTABLE_LEVELS
>
> config CC_HAS_SANE_STACKPROTECTOR
> bool
> - default y if 64BIT
> + default $(cc-option,-mstack-protector-guard-reg=gs -mstack-protector-guard-symbol=__stack_chk_guard) if 64BIT
> default $(cc-option,-mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard)
> - help
> - We have to make sure stack protector is unconditionally disabled if
> - the compiler does not allow control of the segment and symbol.
>
> config STACKPROTECTOR_OBJTOOL
> bool
> - default n
> + depends on X86_64 && STACKPROTECTOR
> + default !CC_HAS_SANE_STACKPROTECTOR
> + prompt "Debug objtool stack protector conversion" if CC_HAS_SANE_STACKPROTECTOR && DEBUG_KERNEL
>
> menu "Processor type and features"
>
> diff --git a/arch/x86/Makefile b/arch/x86/Makefile
> index 662d9d4033e6..2a3ba1abb802 100644
> --- a/arch/x86/Makefile
> +++ b/arch/x86/Makefile
> @@ -116,13 +116,7 @@ ifeq ($(CONFIG_X86_32),y)
> # temporary until string.h is fixed
> KBUILD_CFLAGS += -ffreestanding
>
> - ifeq ($(CONFIG_STACKPROTECTOR),y)
> - ifeq ($(CONFIG_SMP),y)
> - KBUILD_CFLAGS += -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard
> - else
> - KBUILD_CFLAGS += -mstack-protector-guard=global
> - endif
> - endif
> + percpu_seg := fs
> else
> BITS := 64
> UTS_MACHINE := x86_64
> @@ -172,6 +166,19 @@ else
> KBUILD_CFLAGS += -mcmodel=kernel
> KBUILD_RUSTFLAGS += -Cno-redzone=y
> KBUILD_RUSTFLAGS += -Ccode-model=kernel
> +
> + percpu_seg := gs
> +endif
> +
> +ifeq ($(CONFIG_STACKPROTECTOR),y)
> + ifneq ($(CONFIG_STACKPROTECTOR_OBJTOOL),y)
> + ifeq ($(CONFIG_SMP),y)
> + KBUILD_CFLAGS += -mstack-protector-guard-reg=$(percpu_seg)
> + KBUILD_CFLAGS += -mstack-protector-guard-symbol=__stack_chk_guard
> + else
> + KBUILD_CFLAGS += -mstack-protector-guard=global
> + endif
> + endif
> endif
>
> #
> diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
> index 8af2a26b24f6..9478ff768dd0 100644
> --- a/arch/x86/entry/entry_64.S
> +++ b/arch/x86/entry/entry_64.S
> @@ -191,7 +191,7 @@ SYM_FUNC_START(__switch_to_asm)
>
> #ifdef CONFIG_STACKPROTECTOR
> movq TASK_stack_canary(%rsi), %rbx
> - movq %rbx, PER_CPU_VAR(fixed_percpu_data + FIXED_stack_canary)
> + movq %rbx, PER_CPU_VAR(__stack_chk_guard)
> #endif
>
> /*
> diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
> index 89ed5237e79f..946bebce396f 100644
> --- a/arch/x86/include/asm/processor.h
> +++ b/arch/x86/include/asm/processor.h
> @@ -387,16 +387,8 @@ struct irq_stack {
>
> #ifdef CONFIG_X86_64
> struct fixed_percpu_data {
> - /*
> - * GCC hardcodes the stack canary as %gs:40. Since the
> - * irq_stack is the object at %gs:0, we reserve the bottom
> - * 48 bytes of the irq stack for the canary.
> - *
> - * Once we are willing to require -mstack-protector-guard-symbol=
> - * support for x86_64 stackprotector, we can get rid of this.
> - */
> char gs_base[40];
> - unsigned long stack_canary;
> + unsigned long reserved;
> };
>
> DECLARE_PER_CPU_FIRST(struct fixed_percpu_data, fixed_percpu_data) __visible;
> @@ -411,11 +403,7 @@ extern asmlinkage void entry_SYSCALL32_ignore(void);
>
> /* Save actual FS/GS selectors and bases to current->thread */
> void current_save_fsgs(void);
> -#else /* X86_64 */
> -#ifdef CONFIG_STACKPROTECTOR
> -DECLARE_PER_CPU(unsigned long, __stack_chk_guard);
> -#endif
> -#endif /* !X86_64 */
> +#endif /* X86_64 */
>
> struct perf_event;
>
> diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
> index 00473a650f51..d43fb589fcf6 100644
> --- a/arch/x86/include/asm/stackprotector.h
> +++ b/arch/x86/include/asm/stackprotector.h
> @@ -2,26 +2,10 @@
> /*
> * GCC stack protector support.
> *
> - * Stack protector works by putting predefined pattern at the start of
> + * Stack protector works by putting a predefined pattern at the start of
> * the stack frame and verifying that it hasn't been overwritten when
> - * returning from the function. The pattern is called stack canary
> - * and unfortunately gcc historically required it to be at a fixed offset
> - * from the percpu segment base. On x86_64, the offset is 40 bytes.
> - *
> - * The same segment is shared by percpu area and stack canary. On
> - * x86_64, percpu symbols are zero based and %gs (64-bit) points to the
> - * base of percpu area. The first occupant of the percpu area is always
> - * fixed_percpu_data which contains stack_canary at the appropriate
> - * offset. On x86_32, the stack canary is just a regular percpu
> - * variable.
> - *
> - * Putting percpu data in %fs on 32-bit is a minor optimization compared to
> - * using %gs. Since 32-bit userspace normally has %fs == 0, we are likely
> - * to load 0 into %fs on exit to usermode, whereas with percpu data in
> - * %gs, we are likely to load a non-null %gs on return to user mode.
> - *
> - * Once we are willing to require GCC 8.1 or better for 64-bit stackprotector
> - * support, we can remove some of this complexity.
> + * returning from the function. The pattern is called the stack canary
> + * and is a unique value for each task.
> */
>
> #ifndef _ASM_STACKPROTECTOR_H
> @@ -36,6 +20,8 @@
>
> #include <linux/sched.h>
>
> +DECLARE_PER_CPU(unsigned long, __stack_chk_guard);
> +
> /*
> * Initialize the stackprotector canary value.
> *
> @@ -51,25 +37,13 @@ static __always_inline void boot_init_stack_canary(void)
> {
> unsigned long canary = get_random_canary();
>
> -#ifdef CONFIG_X86_64
> - BUILD_BUG_ON(offsetof(struct fixed_percpu_data, stack_canary) != 40);
> -#endif
> -
> current->stack_canary = canary;
> -#ifdef CONFIG_X86_64
> - this_cpu_write(fixed_percpu_data.stack_canary, canary);
> -#else
> this_cpu_write(__stack_chk_guard, canary);
> -#endif
> }
>
> static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
> {
> -#ifdef CONFIG_X86_64
> - per_cpu(fixed_percpu_data.stack_canary, cpu) = idle->stack_canary;
> -#else
> per_cpu(__stack_chk_guard, cpu) = idle->stack_canary;
> -#endif
> }
>
> #else /* STACKPROTECTOR */
> diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
> index bb65371ea9df..590b6cd0eac0 100644
> --- a/arch/x86/kernel/asm-offsets_64.c
> +++ b/arch/x86/kernel/asm-offsets_64.c
> @@ -54,11 +54,5 @@ int main(void)
> BLANK();
> #undef ENTRY
>
> - BLANK();
> -
> -#ifdef CONFIG_STACKPROTECTOR
> - OFFSET(FIXED_stack_canary, fixed_percpu_data, stack_canary);
> - BLANK();
> -#endif
> return 0;
> }
> diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
> index 9a34651d24e7..f49e8f5b858d 100644
> --- a/arch/x86/kernel/cpu/common.c
> +++ b/arch/x86/kernel/cpu/common.c
> @@ -2063,16 +2063,13 @@ void syscall_init(void)
> if (!cpu_feature_enabled(X86_FEATURE_FRED))
> idt_syscall_init();
> }
> -
> -#else /* CONFIG_X86_64 */
> +#endif /* CONFIG_X86_64 */
>
> #ifdef CONFIG_STACKPROTECTOR
> DEFINE_PER_CPU(unsigned long, __stack_chk_guard);
> EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
> #endif
>
> -#endif /* CONFIG_X86_64 */
> -
> /*
> * Clear all 6 debug registers:
> */
> diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
> index b11526869a40..cfbf0486d424 100644
> --- a/arch/x86/kernel/head_64.S
> +++ b/arch/x86/kernel/head_64.S
> @@ -361,8 +361,7 @@ SYM_INNER_LABEL(common_startup_64, SYM_L_LOCAL)
>
> /* Set up %gs.
> *
> - * The base of %gs always points to fixed_percpu_data. If the
> - * stack protector canary is enabled, it is located at %gs:40.
> + * The base of %gs always points to fixed_percpu_data.
> * Note that, on SMP, the boot cpu uses init data section until
> * the per cpu areas are set up.
> */
> diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
> index 758bcd47b72d..ae4672ea00bb 100644
> --- a/arch/x86/xen/xen-head.S
> +++ b/arch/x86/xen/xen-head.S
> @@ -53,8 +53,7 @@ SYM_CODE_START(startup_xen)
>
> /* Set up %gs.
> *
> - * The base of %gs always points to fixed_percpu_data. If the
> - * stack protector canary is enabled, it is located at %gs:40.
> + * The base of %gs always points to fixed_percpu_data.
> * Note that, on SMP, the boot cpu uses init data section until
> * the per cpu areas are set up.
> */
> --
> 2.44.0
>