[PATCH] smp: add missing kernel-doc comments

From: Randy Dunlap

Date: Thu Mar 05 2026 - 01:36:22 EST


Add missing kernel-doc comments and rearrange the order of others to
prevent all kernel-doc warnings.

- add function Returns: sections or format existing comments as kernel-doc
- add missing function parameter comments
- use "/**" for smp_call_function_any() and on_each_cpu_cond_mask()
- correct the commented function name for on_each_cpu_cond_mask()
- use correct format for function short descriptions
- add all kernel-doc comments for smp_call_on_cpu()
- remove kernel-doc comments for raw_smp_processor_id() since there is
no prototype for it here (other than !SMP)
- in smp.h, rearrange some lines so that the kernel-doc comments for
smp_processor_id() are immediately before the macro (to prevent
kernel-doc warnings)

Signed-off-by: Randy Dunlap <rdunlap@xxxxxxxxxxxxx>
---
Cc: Thomas Gleixner <tglx@xxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>

include/linux/smp.h | 38 +++++++++++++++++++++-----------------
kernel/smp.c | 34 ++++++++++++++++++++++++----------
2 files changed, 45 insertions(+), 27 deletions(-)

--- linux-next-20260304.orig/kernel/smp.c
+++ linux-next-20260304/kernel/smp.c
@@ -215,7 +215,7 @@ static atomic_t n_csd_lock_stuck;
/**
* csd_lock_is_stuck - Has a CSD-lock acquisition been stuck too long?
*
- * Returns @true if a CSD-lock acquisition is stuck and has been stuck
+ * Returns: @true if a CSD-lock acquisition is stuck and has been stuck
* long enough for a "non-responsive CSD lock" message to be printed.
*/
bool csd_lock_is_stuck(void)
@@ -625,13 +625,14 @@ void flush_smp_call_function_queue(void)
local_irq_restore(flags);
}

-/*
+/**
* smp_call_function_single - Run a function on a specific CPU
+ * @cpu: Specific target CPU for this function.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait until function has completed on other CPUs.
*
- * Returns 0 on success, else a negative status code.
+ * Returns: %0 on success, else a negative status code.
*/
int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
int wait)
@@ -738,14 +739,14 @@ out:
}
EXPORT_SYMBOL_GPL(smp_call_function_single_async);

-/*
+/**
* smp_call_function_any - Run a function on any of the given cpus
* @mask: The mask of cpus it can run on.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait until function has completed.
*
- * Returns 0 on success, else a negative status code (if no cpus were online).
+ * Returns: %0 on success, else a negative status code (if no cpus were online).
*
* Selection preference:
* 1) current cpu if in @mask
@@ -880,7 +881,7 @@ static void smp_call_function_many_cond(
}

/**
- * smp_call_function_many(): Run a function on a set of CPUs.
+ * smp_call_function_many() - Run a function on a set of CPUs.
* @mask: The set of cpus to run on (only runs on online subset).
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
@@ -902,7 +903,7 @@ void smp_call_function_many(const struct
EXPORT_SYMBOL(smp_call_function_many);

/**
- * smp_call_function(): Run a function on all other CPUs.
+ * smp_call_function() - Run a function on all other CPUs.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed
@@ -1009,8 +1010,8 @@ void __init smp_init(void)
smp_cpus_done(setup_max_cpus);
}

-/*
- * on_each_cpu_cond(): Call a function on each processor for which
+/**
+ * on_each_cpu_cond_mask() - Call a function on each processor for which
* the supplied function cond_func returns true, optionally waiting
* for all the required CPUs to finish. This may include the local
* processor.
@@ -1024,6 +1025,7 @@ void __init smp_init(void)
* @info: An arbitrary pointer to pass to both functions.
* @wait: If true, wait (atomically) until function has
* completed on other CPUs.
+ * @mask: The set of cpus to run on (only runs on online subset).
*
* Preemption is disabled to protect against CPUs going offline but not online.
* CPUs going online during the call will not be seen or sent an IPI.
@@ -1095,7 +1097,7 @@ EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus)
* scheduled, for any of the CPUs in the @mask. It does not guarantee
* correctness as it only provides a racy snapshot.
*
- * Returns true if there is a pending IPI scheduled and false otherwise.
+ * Returns: true if there is a pending IPI scheduled and false otherwise.
*/
bool cpus_peek_for_pending_ipi(const struct cpumask *mask)
{
@@ -1145,6 +1147,18 @@ static void smp_call_on_cpu_callback(str
complete(&sscs->done);
}

+/**
+ * smp_call_on_cpu() - Call a function on a specific CPU and wait
+ * for it to return.
+ * @cpu: The CPU to run on.
+ * @func: The function to run
+ * @par: An arbitrary pointer parameter for @func.
+ * @phys: If @true, force to run on physical @cpu. See
+ * &struct smp_call_on_cpu_struct for more info.
+ *
+ * Returns: %-ENXIO if the @cpu is invalid; otherwise the return value
+ * from @func.
+ */
int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
{
struct smp_call_on_cpu_struct sscs = {
--- linux-next-20260304.orig/include/linux/smp.h
+++ linux-next-20260304/include/linux/smp.h
@@ -73,7 +73,7 @@ static inline void on_each_cpu(smp_call_
}

/**
- * on_each_cpu_mask(): Run a function on processors specified by
+ * on_each_cpu_mask() - Run a function on processors specified by
* cpumask, which may include the local processor.
* @mask: The set of cpus to run on (only runs on online subset).
* @func: The function to run. This must be fast and non-blocking.
@@ -239,13 +239,30 @@ static inline int get_boot_cpu_id(void)

#endif /* !SMP */

-/**
+/*
* raw_smp_processor_id() - get the current (unstable) CPU id
*
- * For then you know what you are doing and need an unstable
+ * raw_smp_processor_id() is arch-specific/arch-defined and
+ * may be a macro or a static inline function.
+ *
+ * For when you know what you are doing and need an unstable
* CPU id.
*/

+/*
+ * Allow the architecture to differentiate between a stable and unstable read.
+ * For example, x86 uses an IRQ-safe asm-volatile read for the unstable but a
+ * regular asm read for the stable.
+ */
+#ifndef __smp_processor_id
+#define __smp_processor_id() raw_smp_processor_id()
+#endif
+
+#ifdef CONFIG_DEBUG_PREEMPT
+ extern unsigned int debug_smp_processor_id(void);
+# define smp_processor_id() debug_smp_processor_id()
+
+#else
/**
* smp_processor_id() - get the current (stable) CPU id
*
@@ -258,23 +275,10 @@ static inline int get_boot_cpu_id(void)
* - preemption is disabled;
* - the task is CPU affine.
*
- * When CONFIG_DEBUG_PREEMPT; we verify these assumption and WARN
+ * When CONFIG_DEBUG_PREEMPT=y, we verify these assumptions and WARN
* when smp_processor_id() is used when the CPU id is not stable.
*/

-/*
- * Allow the architecture to differentiate between a stable and unstable read.
- * For example, x86 uses an IRQ-safe asm-volatile read for the unstable but a
- * regular asm read for the stable.
- */
-#ifndef __smp_processor_id
-#define __smp_processor_id() raw_smp_processor_id()
-#endif
-
-#ifdef CONFIG_DEBUG_PREEMPT
- extern unsigned int debug_smp_processor_id(void);
-# define smp_processor_id() debug_smp_processor_id()
-#else
# define smp_processor_id() __smp_processor_id()
#endif