[tip: x86/microcode] x86/microcode: Replace the all-in-one rendevous handler
From: tip-bot2 for Thomas Gleixner
Date: Tue Oct 24 2023 - 09:21:52 EST
The following commit has been merged into the x86/microcode branch of tip:
Commit-ID: 0bf871651211b58c7b19f40b746b646d5311e2ec
Gitweb: https://git.kernel.org/tip/0bf871651211b58c7b19f40b746b646d5311e2ec
Author: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
AuthorDate: Mon, 02 Oct 2023 14:00:03 +02:00
Committer: Borislav Petkov (AMD) <bp@xxxxxxxxx>
CommitterDate: Tue, 24 Oct 2023 15:05:55 +02:00
x86/microcode: Replace the all-in-one rendevous handler
with a new handler which just separates the control flow of primary and
secondary CPUs.
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Signed-off-by: Borislav Petkov (AMD) <bp@xxxxxxxxx>
Link: https://lore.kernel.org/r/20231002115903.433704135@xxxxxxxxxxxxx
---
arch/x86/kernel/cpu/microcode/core.c | 51 ++++-----------------------
1 file changed, 9 insertions(+), 42 deletions(-)
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 1ff38f9..1c2710b 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -268,7 +268,7 @@ struct microcode_ctrl {
};
static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl);
-static atomic_t late_cpus_in, late_cpus_out;
+static atomic_t late_cpus_in;
static bool wait_for_cpus(atomic_t *cnt)
{
@@ -304,7 +304,7 @@ static bool wait_for_ctrl(void)
return false;
}
-static __maybe_unused void load_secondary(unsigned int cpu)
+static void load_secondary(unsigned int cpu)
{
unsigned int ctrl_cpu = this_cpu_read(ucode_ctrl.ctrl_cpu);
enum ucode_state ret;
@@ -339,7 +339,7 @@ static __maybe_unused void load_secondary(unsigned int cpu)
this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE);
}
-static __maybe_unused void load_primary(unsigned int cpu)
+static void load_primary(unsigned int cpu)
{
struct cpumask *secondaries = topology_sibling_cpumask(cpu);
enum sibling_ctrl ctrl;
@@ -376,46 +376,14 @@ static __maybe_unused void load_primary(unsigned int cpu)
static int load_cpus_stopped(void *unused)
{
- int cpu = smp_processor_id();
- enum ucode_state ret;
-
- /*
- * Wait for all CPUs to arrive. A load will not be attempted unless all
- * CPUs show up.
- * */
- if (!wait_for_cpus(&late_cpus_in)) {
- this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT);
- return 0;
- }
-
- /*
- * On an SMT system, it suffices to load the microcode on one sibling of
- * the core because the microcode engine is shared between the threads.
- * Synchronization still needs to take place so that no concurrent
- * loading attempts happen on multiple threads of an SMT core. See
- * below.
- */
- if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu)
- goto wait_for_siblings;
+ unsigned int cpu = smp_processor_id();
- ret = microcode_ops->apply_microcode(cpu);
- this_cpu_write(ucode_ctrl.result, ret);
-
-wait_for_siblings:
- if (!wait_for_cpus(&late_cpus_out))
- panic("Timeout during microcode update!\n");
-
- /*
- * At least one thread has completed update on each core.
- * For others, simply call the update to make sure the
- * per-cpu cpuinfo can be updated with right microcode
- * revision.
- */
- if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu)
- return 0;
+ if (this_cpu_read(ucode_ctrl.ctrl_cpu) == cpu)
+ load_primary(cpu);
+ else
+ load_secondary(cpu);
- ret = microcode_ops->apply_microcode(cpu);
- this_cpu_write(ucode_ctrl.result, ret);
+ /* No point to wait here. The CPUs will all wait in stop_machine(). */
return 0;
}
@@ -429,7 +397,6 @@ static int load_late_stop_cpus(void)
pr_err("You should switch to early loading, if possible.\n");
atomic_set(&late_cpus_in, num_online_cpus());
- atomic_set(&late_cpus_out, num_online_cpus());
/*
* Take a snapshot before the microcode update in order to compare and