[patch V2 32/37] x86/microcode: Rendezvous and load in NMI
From: Thomas Gleixner
Date: Sat Aug 12 2023 - 16:02:39 EST
From: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
stop_machine() does not prevent the spin-waiting sibling from handling an
NMI, which is obviously violating the whole concept of rendezvous.
Implement a static branch right in the beginning of the NMI handler which
is NOOPed except when enabled by the late loading mechanism.
The later loader enables the static branch before stop_machine() is
invoked. Each CPU has an nmi_enable in its control structure which
indicates whether the CPU should go into the update routine.
This is required to bridge the gap between enabling the branch and actually
being at the point where it makes sense.
Each CPU which arrives in the stopper thread function sets that flag and
issues a self NMI right after that. If the NMI function sees the flag
clear, it returns. If it's set it clears the flag and enters the rendezvous.
This is safe against a real NMI which hits in between setting the flag and
sending the NMI to itself. The real NMI will be swallowed by the microcode
update and the self NMI will then let stuff continue. Otherwise this would
end up with a spurious NMI.
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
---
arch/x86/include/asm/microcode.h | 12 ++++++++
arch/x86/kernel/cpu/microcode/core.c | 42 ++++++++++++++++++++++++++++---
arch/x86/kernel/cpu/microcode/intel.c | 1
arch/x86/kernel/cpu/microcode/internal.h | 3 +-
arch/x86/kernel/nmi.c | 4 ++
5 files changed, 57 insertions(+), 5 deletions(-)
---
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -75,4 +75,16 @@ void show_ucode_info_early(void);
static inline void show_ucode_info_early(void) { }
#endif /* !CONFIG_CPU_SUP_INTEL */
+bool microcode_nmi_handler(void);
+
+#ifdef CONFIG_MICROCODE_LATE_LOADING
+DECLARE_STATIC_KEY_FALSE(microcode_nmi_handler_enable);
+static __always_inline bool microcode_nmi_handler_enabled(void)
+{
+ return static_branch_unlikely(µcode_nmi_handler_enable);
+}
+#else
+static __always_inline bool microcode_nmi_handler_enabled(void) { return false; }
+#endif
+
#endif /* _ASM_X86_MICROCODE_H */
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -23,6 +23,7 @@
#include <linux/miscdevice.h>
#include <linux/capability.h>
#include <linux/firmware.h>
+#include <linux/cpumask.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/mutex.h>
@@ -31,6 +32,7 @@
#include <linux/fs.h>
#include <linux/mm.h>
+#include <asm/apic.h>
#include <asm/cpu_device_id.h>
#include <asm/perf_event.h>
#include <asm/processor.h>
@@ -334,8 +336,10 @@ struct ucode_ctrl {
enum sibling_ctrl ctrl;
enum ucode_state result;
unsigned int ctrl_cpu;
+ bool nmi_enabled;
};
+DEFINE_STATIC_KEY_FALSE(microcode_nmi_handler_enable);
static DEFINE_PER_CPU(struct ucode_ctrl, ucode_ctrl);
static atomic_t late_cpus_in;
@@ -349,7 +353,8 @@ static bool wait_for_cpus(atomic_t *cnt)
if (!atomic_read(cnt))
return true;
udelay(1);
- if (!(timeout % 1000))
+ /* If invoked directly, tickle the NMI watchdog */
+ if (!microcode_ops->use_nmi && !(timeout % 1000))
touch_nmi_watchdog();
}
/* Prevent the late comers to make progress and let them time out */
@@ -365,7 +370,8 @@ static bool wait_for_ctrl(void)
if (this_cpu_read(ucode_ctrl.ctrl) != SCTRL_WAIT)
return true;
udelay(1);
- if (!(timeout % 1000))
+ /* If invoked directly, tickle the NMI watchdog */
+ if (!microcode_ops->use_nmi && !(timeout % 1000))
touch_nmi_watchdog();
}
return false;
@@ -443,7 +449,7 @@ static void ucode_load_primary(unsigned
}
}
-static int ucode_load_cpus_stopped(void *unused)
+static bool microcode_update_handler(void)
{
unsigned int cpu = smp_processor_id();
@@ -452,7 +458,29 @@ static int ucode_load_cpus_stopped(void
else
ucode_load_secondary(cpu);
- /* No point to wait here. The CPUs will all wait in stop_machine(). */
+ touch_nmi_watchdog();
+ return true;
+}
+
+bool microcode_nmi_handler(void)
+{
+ if (!this_cpu_read(ucode_ctrl.nmi_enabled))
+ return false;
+
+ this_cpu_write(ucode_ctrl.nmi_enabled, false);
+ return microcode_update_handler();
+}
+
+static int ucode_load_cpus_stopped(void *unused)
+{
+ if (microcode_ops->use_nmi) {
+ /* Enable the NMI handler and raise NMI */
+ this_cpu_write(ucode_ctrl.nmi_enabled, true);
+ apic->send_IPI(smp_processor_id(), NMI_VECTOR);
+ } else {
+ /* Just invoke the handler directly */
+ microcode_update_handler();
+ }
return 0;
}
@@ -473,8 +501,14 @@ static int ucode_load_late_stop_cpus(voi
*/
store_cpu_caps(&prev_info);
+ if (microcode_ops->use_nmi)
+ static_branch_enable_cpuslocked(µcode_nmi_handler_enable);
+
stop_machine_cpuslocked(ucode_load_cpus_stopped, NULL, cpu_online_mask);
+ if (microcode_ops->use_nmi)
+ static_branch_disable_cpuslocked(µcode_nmi_handler_enable);
+
/* Analyze the results */
for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) {
switch (per_cpu(ucode_ctrl.result, cpu)) {
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -688,6 +688,7 @@ static struct microcode_ops microcode_in
.collect_cpu_info = collect_cpu_info,
.apply_microcode = apply_microcode_late,
.finalize_late_load = finalize_late_load,
+ .use_nmi = IS_ENABLED(CONFIG_X86_64),
};
static __init void calc_llc_size_per_core(struct cpuinfo_x86 *c)
--- a/arch/x86/kernel/cpu/microcode/internal.h
+++ b/arch/x86/kernel/cpu/microcode/internal.h
@@ -31,7 +31,8 @@ struct microcode_ops {
enum ucode_state (*apply_microcode)(int cpu);
int (*collect_cpu_info)(int cpu, struct cpu_signature *csig);
void (*finalize_late_load)(int result);
- unsigned int nmi_safe : 1;
+ unsigned int nmi_safe : 1,
+ use_nmi : 1;
};
extern struct ucode_cpu_info ucode_cpu_info[];
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -33,6 +33,7 @@
#include <asm/reboot.h>
#include <asm/cache.h>
#include <asm/nospec-branch.h>
+#include <asm/microcode.h>
#include <asm/sev.h>
#define CREATE_TRACE_POINTS
@@ -343,6 +344,9 @@ static noinstr void default_do_nmi(struc
instrumentation_begin();
+ if (microcode_nmi_handler_enabled() && microcode_nmi_handler())
+ goto out;
+
handled = nmi_handle(NMI_LOCAL, regs);
__this_cpu_add(nmi_stats.normal, handled);
if (handled) {