[PATCH v3] workqueue: add cmdline parameter `workqueue_unbound_cpus` to further constrain wq_unbound_cpumask at boot time
From: tiozhang
Date: Sat Jun 24 2023 - 23:16:01 EST
Motivation of doing this is to better improve boot times for devices when
we want to prevent our workqueue works from running on some specific CPUs,
e,g, some CPUs are busy with interrupts.
Signed-off-by: tiozhang <tiozhang@xxxxxxxxxxxxxx>
---
.../admin-guide/kernel-parameters.txt | 8 +++++++
kernel/workqueue.c | 24 +++++++++++++++++++
2 files changed, 32 insertions(+)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index a465d5242774..7f2fe8c60d5c 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -6780,6 +6780,14 @@
disables both lockup detectors. Default is 10
seconds.
+ workqueue_unbound_cpus=
+ [KNL,SMP]
+ Format: <cpu-list>
+ Specify to constrain one or some CPUs to use in
+ unbound workqueues.
+ By default, all online CPUs are available for
+ unbound workqueues.
+
workqueue.watchdog_thresh=
If CONFIG_WQ_WATCHDOG is configured, workqueue can
warn stall conditions and dump internal state to
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 7cd5f5e7e0a1..c247725b0873 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -329,6 +329,9 @@ static bool workqueue_freezing; /* PL: have wqs started freezing? */
/* PL: allowable cpus for unbound wqs and work items */
static cpumask_var_t wq_unbound_cpumask;
+/* for further constrain wq_unbound_cpumask by cmdline parameter*/
+static cpumask_var_t wq_cmdline_cpumask;
+
/* CPU where unbound work was last round robin scheduled from this CPU */
static DEFINE_PER_CPU(int, wq_rr_cpu_last);
@@ -6006,6 +6009,10 @@ void __init workqueue_init_early(void)
cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_WQ));
cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_DOMAIN));
+ if (!cpumask_empty(wq_cmdline_cpumask))
+ cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, wq_cmdline_cpumask);
+ free_bootmem_cpumask_var(wq_cmdline_cpumask);
+
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
/* initialize CPU pools */
@@ -6129,3 +6136,20 @@ void __init workqueue_init(void)
*/
void __warn_flushing_systemwide_wq(void) { }
EXPORT_SYMBOL(__warn_flushing_systemwide_wq);
+
+static int __init workqueue_unbound_cpus_setup(char *str)
+{
+ cpumask_var_t cpumask;
+
+ alloc_bootmem_cpumask_var(&wq_cmdline_cpumask);
+ alloc_bootmem_cpumask_var(&cpumask);
+ if (cpulist_parse(str, cpumask) < 0)
+ pr_warn("workqueue_unbound_cpus: incorrect CPU range\n");
+ else
+ cpumask_copy(wq_cmdline_cpumask, cpumask);
+
+ free_bootmem_cpumask_var(cpumask);
+
+ return 0;
+}
+__setup("workqueue_unbound_cpus=", workqueue_unbound_cpus_setup);
--
2.17.1