[PATCH V2] x86/stackprotector: Pre-initialize canary for secondary CPUs

From: Brian Gerst
Date: Wed Jun 17 2020 - 18:56:36 EST


The idle tasks created for each secondary CPU already have a random stack
canary generated by fork(). Copy the canary to the percpu variable before
starting the secondary CPU which removes the need to call
boot_init_stack_canary().

Signed-off-by: Brian Gerst <brgerst@xxxxxxxxx>
---

V2: Fixed stack protector disabled case

arch/x86/include/asm/stackprotector.h | 12 ++++++++++++
arch/x86/kernel/smpboot.c | 14 ++------------
arch/x86/xen/smp_pv.c | 2 --
3 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index 9804a7957f4e..7fb482f0f25b 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -90,6 +90,15 @@ static __always_inline void boot_init_stack_canary(void)
#endif
}

+static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
+{
+#ifdef CONFIG_X86_64
+ per_cpu(fixed_percpu_data.stack_canary, cpu) = idle->stack_canary;
+#else
+ per_cpu(stack_canary.canary, cpu) = idle->stack_canary;
+#endif
+}
+
static inline void setup_stack_canary_segment(int cpu)
{
#ifdef CONFIG_X86_32
@@ -119,6 +128,9 @@ static inline void load_stack_canary_segment(void)
static inline void setup_stack_canary_segment(int cpu)
{ }

+static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
+{ }
+
static inline void load_stack_canary_segment(void)
{
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ffbd9a3d78d8..a11bd53c6911 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -51,7 +51,6 @@
#include <linux/err.h>
#include <linux/nmi.h>
#include <linux/tboot.h>
-#include <linux/stackprotector.h>
#include <linux/gfp.h>
#include <linux/cpuidle.h>
#include <linux/numa.h>
@@ -80,6 +79,7 @@
#include <asm/cpu_device_id.h>
#include <asm/spec-ctrl.h>
#include <asm/hw_irq.h>
+#include <asm/stackprotector.h>

/* representing HT siblings of each logical CPU */
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
@@ -259,21 +259,10 @@ static void notrace start_secondary(void *unused)
/* enable local interrupts */
local_irq_enable();

- /* to prevent fake stack check failure in clock setup */
- boot_init_stack_canary();
-
x86_cpuinit.setup_percpu_clockev();

wmb();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
-
- /*
- * Prevent tail call to cpu_startup_entry() because the stack protector
- * guard has been changed a couple of function calls up, in
- * boot_init_stack_canary() and must not be checked before tail calling
- * another function.
- */
- prevent_tail_call_optimization();
}

/**
@@ -1011,6 +1000,7 @@ int common_cpu_up(unsigned int cpu, struct task_struct *idle)
alternatives_enable_smp();

per_cpu(current_task, cpu) = idle;
+ cpu_init_stack_canary(cpu, idle);

/* Initialize the interrupt stack(s) */
ret = irq_init_percpu_irqstack(cpu);
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 171aff1b11f2..9ea598dcc132 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -92,9 +92,7 @@ static void cpu_bringup(void)
asmlinkage __visible void cpu_bringup_and_idle(void)
{
cpu_bringup();
- boot_init_stack_canary();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
- prevent_tail_call_optimization();
}

void xen_smp_intr_free_pv(unsigned int cpu)

base-commit: 83cdaef93988a6bc6875623781de571b2694fe02
--
2.26.2