[PATCH v2 09/11] x86/microcode: Use stop-machine NMI facility

From: Chang S. Bae

Date: Mon Mar 30 2026 - 22:17:35 EST


The existing NMI-based loading logic explicitly sends NMIs to online CPUs
and invokes microcode_update_handler() from the NMI context. The
stop-machine NMI variant already provides the mechanism on x86.

Replace the custom NMI control logic with stop_machine_nmi_cpuslocked().

Signed-off-by: Chang S. Bae <chang.seok.bae@xxxxxxxxx>
---
V1 -> V2: Select that stop-machine build option
---
arch/x86/Kconfig | 1 +
arch/x86/include/asm/microcode.h | 1 -
arch/x86/kernel/cpu/microcode/core.c | 19 +++----------------
arch/x86/kernel/nmi.c | 3 ---
4 files changed, 4 insertions(+), 20 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 0b5f30d769ff..0f7e88ba7433 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1338,6 +1338,7 @@ config MICROCODE_LATE_LOADING
bool "Late microcode loading (DANGEROUS)"
default n
depends on MICROCODE && SMP
+ select STOP_MACHINE_NMI
help
Loading microcode late, when the system is up and executing instructions
is a tricky business and should be avoided if possible. Just the sequence
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 3c317d155771..62d10c43da9c 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -79,7 +79,6 @@ static inline u32 intel_get_microcode_revision(void)
}
#endif /* !CONFIG_CPU_SUP_INTEL */

-bool microcode_nmi_handler(void);
void microcode_offline_nmi_handler(void);

#ifdef CONFIG_MICROCODE_LATE_LOADING
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index abd640b1d286..ebcc73e67af1 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -563,22 +563,9 @@ static noinstr int microcode_update_handler(void *unused)
* path which must be NMI safe until the primary thread completed the
* update.
*/
-bool noinstr microcode_nmi_handler(void)
+static noinstr int microcode_nmi_handler(void *data)
{
- if (!raw_cpu_read(ucode_ctrl.nmi_enabled))
- return false;
-
- raw_cpu_write(ucode_ctrl.nmi_enabled, false);
- return microcode_update_handler(NULL) == 0;
-}
-
-static int stop_cpu_in_nmi(void *unused)
-{
- /* Enable the NMI handler and raise NMI */
- this_cpu_write(ucode_ctrl.nmi_enabled, true);
- apic->send_IPI(smp_processor_id(), NMI_VECTOR);
-
- return 0;
+ return microcode_update_handler(data);
}

static int load_late_stop_cpus(bool is_safe)
@@ -616,7 +603,7 @@ static int load_late_stop_cpus(bool is_safe)

if (microcode_ops->use_nmi) {
static_branch_enable_cpuslocked(&microcode_nmi_handler_enable);
- stop_machine_cpuslocked(stop_cpu_in_nmi, NULL, cpu_online_mask);
+ stop_machine_nmi_cpuslocked(microcode_nmi_handler, NULL, cpu_online_mask);
static_branch_disable_cpuslocked(&microcode_nmi_handler_enable);
} else {
stop_machine_cpuslocked(microcode_update_handler, NULL, cpu_online_mask);
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index b7ea2907142c..324f4353be88 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -386,9 +386,6 @@ static noinstr void default_do_nmi(struct pt_regs *regs)
if (stop_machine_nmi_handler())
goto out;

- if (microcode_nmi_handler_enabled() && microcode_nmi_handler())
- goto out;
-
/*
* CPU-specific NMI must be processed before non-CPU-specific
* NMI, otherwise we may lose it, because the CPU-specific
--
2.51.0