[PATCH v4] workqueue: add cmdline parameter `workqueue.unbound_cpus` to further constrain wq_unbound_cpumask at boot time
From: tiozhang
Date: Wed Jun 28 2023 - 07:19:34 EST
Motivation of doing this is to better improve boot times for devices when
we want to prevent our workqueue works from running on some specific CPUs,
e,g, some CPUs are busy with interrupts.
Signed-off-by: tiozhang <tiozhang@xxxxxxxxxxxxxx>
---
.../admin-guide/kernel-parameters.txt | 7 +++++++
kernel/workqueue.c | 20 +++++++++++++++++++
2 files changed, 27 insertions(+)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index a465d5242774..a88b133ab09b 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -6780,6 +6780,13 @@
disables both lockup detectors. Default is 10
seconds.
+ workqueue.unbound_cpus=
+ [KNL,SMP] Specify to constrain one or some CPUs
+ to use in unbound workqueues.
+ Format: <cpu-list>
+ By default, all online CPUs are available for
+ unbound workqueues.
+
workqueue.watchdog_thresh=
If CONFIG_WQ_WATCHDOG is configured, workqueue can
warn stall conditions and dump internal state to
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 7cd5f5e7e0a1..29e8254edd63 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -329,6 +329,9 @@ static bool workqueue_freezing; /* PL: have wqs started freezing? */
/* PL: allowable cpus for unbound wqs and work items */
static cpumask_var_t wq_unbound_cpumask;
+/* for further constrain wq_unbound_cpumask by cmdline parameter*/
+static struct cpumask wq_cmdline_cpumask __initdata;
+
/* CPU where unbound work was last round robin scheduled from this CPU */
static DEFINE_PER_CPU(int, wq_rr_cpu_last);
@@ -6006,6 +6009,9 @@ void __init workqueue_init_early(void)
cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_WQ));
cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_DOMAIN));
+ if (!cpumask_empty(&wq_cmdline_cpumask))
+ cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, &wq_cmdline_cpumask);
+
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
/* initialize CPU pools */
@@ -6129,3 +6135,17 @@ void __init workqueue_init(void)
*/
void __warn_flushing_systemwide_wq(void) { }
EXPORT_SYMBOL(__warn_flushing_systemwide_wq);
+
+static int __init workqueue_unbound_cpus_setup(char *str)
+{
+ int ret;
+
+ ret = cpulist_parse(str, &wq_cmdline_cpumask);
+ if (ret < 0) {
+ cpumask_clear(&wq_cmdline_cpumask);
+ pr_warn("workqueue.unbound_cpus: incorrect CPU range\n");
+ }
+
+ return ret;
+}
+__setup("workqueue.unbound_cpus=", workqueue_unbound_cpus_setup);
--
2.17.1