[PATCH] RISC-V: Enable HOTPLUG_PARALLEL for secondary CPUs

From: Anup Patel
Date: Fri Sep 05 2025 - 08:25:40 EST


The core kernel already supports parallel bringup of secondary
CPUs (aka HOTPLUG_PARALLEL). The x86 and MIPS architectures
already use HOTPLUG_PARALLEL and ARM is also moving toward it.

On RISC-V, there is no arch specific global data accessed in the
RISC-V secondary CPU bringup path so enabling HOTPLUG_PARALLEL for
RISC-V would only requires:
1) Providing RISC-V specific arch_cpuhp_kick_ap_alive()
2) Calling cpuhp_ap_sync_alive() from smp_callin()

This patch is tested natively with OpenSBI on QEMU RV64 virt machine
with 64 cores and also tested with KVM RISC-V guest with 32 VCPUs.

Signed-off-by: Anup Patel <apatel@xxxxxxxxxxxxxxxx>
---
arch/riscv/Kconfig | 2 +-
arch/riscv/kernel/smpboot.c | 15 +++++++++++++++
2 files changed, 16 insertions(+), 1 deletion(-)

diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index a4b233a0659e..d5800d6f9a15 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -196,7 +196,7 @@ config RISCV
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
- select HOTPLUG_CORE_SYNC_DEAD if HOTPLUG_CPU
+ select HOTPLUG_PARALLEL if HOTPLUG_CPU
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
select KASAN_VMALLOC if KASAN
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index 601a321e0f17..d85916a3660c 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -39,7 +39,9 @@

#include "head.h"

+#ifndef CONFIG_HOTPLUG_PARALLEL
static DECLARE_COMPLETION(cpu_running);
+#endif

void __init smp_prepare_cpus(unsigned int max_cpus)
{
@@ -179,6 +181,12 @@ static int start_secondary_cpu(int cpu, struct task_struct *tidle)
return -EOPNOTSUPP;
}

+#ifdef CONFIG_HOTPLUG_PARALLEL
+int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle)
+{
+ return start_secondary_cpu(cpu, tidle);
+}
+#else
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int ret = 0;
@@ -199,6 +207,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)

return ret;
}
+#endif

void __init smp_cpus_done(unsigned int max_cpus)
{
@@ -225,6 +234,10 @@ asmlinkage __visible void smp_callin(void)
mmgrab(mm);
current->active_mm = mm;

+#ifdef CONFIG_HOTPLUG_PARALLEL
+ cpuhp_ap_sync_alive();
+#endif
+
store_cpu_topology(curr_cpuid);
notify_cpu_starting(curr_cpuid);

@@ -243,7 +256,9 @@ asmlinkage __visible void smp_callin(void)
*/
local_flush_icache_all();
local_flush_tlb_all();
+#ifndef CONFIG_HOTPLUG_PARALLEL
complete(&cpu_running);
+#endif
/*
* Disable preemption before enabling interrupts, so we don't try to
* schedule a CPU that hasn't actually started yet.
--
2.43.0