[PATCH RFC 2/5] workqueue: add WQ_AFFN_CACHE_SHARD affinity scope
From: Breno Leitao
Date: Thu Mar 12 2026 - 12:23:24 EST
On systems where many CPUs share one LLC, unbound workqueues using
WQ_AFFN_CACHE collapse to a single worker pool, causing heavy spinlock
contention on pool->lock. For example, Chuck Lever measured 39% of
cycles lost to native_queued_spin_lock_slowpath on a 12-core shared-L3
NFS-over-RDMA system.
The existing affinity hierarchy (cpu, smt, cache, numa, system) offers
no intermediate option between per-LLC and per-SMT-core granularity.
Add WQ_AFFN_CACHE_SHARD, which subdivides each LLC into groups of at
most wq_cache_shard_size CPUs (default 8, tunable via boot parameter).
CPUs are distributed across shards as evenly as possible -- for example,
72 CPUs with max shard size 8 produces 9 shards of 8 each.
The implementation follows the same comparator pattern as other affinity
scopes: cpu_cache_shard_id() computes a per-CPU shard index on the fly
from the already-initialized WQ_AFFN_CACHE topology, and
cpus_share_cache_shard() is passed to init_pod_type().
Benchmark on NVIDIA Grace (72 CPUs, single LLC, 50k items/thread):
cpu 3433158 items/sec p50=16416 p90=17376 p95=17664 ns
smt 3449709 items/sec p50=16576 p90=17504 p95=17792 ns
cache_shard 2939917 items/sec p50=8192 p90=11488 p95=12512 ns
cache 602096 items/sec p50=53056 p90=56320 p95=57248 ns
numa 599090 items/sec p50=53152 p90=56448 p95=57376 ns
system 598865 items/sec p50=53184 p90=56481 p95=57408 ns
cache_shard delivers ~5x the throughput and ~6.5x lower p50 latency
compared to cache scope on this 72-core single-LLC system.
Suggested-by: Tejun Heo <tj@xxxxxxxxxx>
Signed-off-by: Breno Leitao <leitao@xxxxxxxxxx>
---
include/linux/workqueue.h | 1 +
kernel/workqueue.c | 60 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 61 insertions(+)
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index a4749f56398fd..41c946109c7d0 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -133,6 +133,7 @@ enum wq_affn_scope {
WQ_AFFN_CPU, /* one pod per CPU */
WQ_AFFN_SMT, /* one pod poer SMT */
WQ_AFFN_CACHE, /* one pod per LLC */
+ WQ_AFFN_CACHE_SHARD, /* synthetic sub-LLC shards */
WQ_AFFN_NUMA, /* one pod per NUMA node */
WQ_AFFN_SYSTEM, /* one pod across the whole system */
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 028afc3d14e59..6be884eb3450d 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -409,6 +409,7 @@ static const char * const wq_affn_names[WQ_AFFN_NR_TYPES] = {
[WQ_AFFN_CPU] = "cpu",
[WQ_AFFN_SMT] = "smt",
[WQ_AFFN_CACHE] = "cache",
+ [WQ_AFFN_CACHE_SHARD] = "cache_shard",
[WQ_AFFN_NUMA] = "numa",
[WQ_AFFN_SYSTEM] = "system",
};
@@ -431,6 +432,9 @@ module_param_named(cpu_intensive_warning_thresh, wq_cpu_intensive_warning_thresh
static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
module_param_named(power_efficient, wq_power_efficient, bool, 0444);
+static unsigned int wq_cache_shard_size = 8;
+module_param_named(cache_shard_size, wq_cache_shard_size, uint, 0444);
+
static bool wq_online; /* can kworkers be created yet? */
static bool wq_topo_initialized __read_mostly = false;
@@ -8106,6 +8110,56 @@ static bool __init cpus_share_numa(int cpu0, int cpu1)
return cpu_to_node(cpu0) == cpu_to_node(cpu1);
}
+/**
+ * cpu_cache_shard_id - compute the shard index for a CPU within its LLC pod
+ * @cpu: the CPU to look up
+ *
+ * Returns a shard index that is unique within the CPU's LLC pod. CPUs in
+ * the same LLC are divided into shards no larger than wq_cache_shard_size,
+ * distributed as evenly as possible. E.g., 20 CPUs with max shard size 8
+ * gives 3 shards of 7+7+6.
+ */
+static int __init cpu_cache_shard_id(int cpu)
+{
+ struct wq_pod_type *cache_pt = &wq_pod_types[WQ_AFFN_CACHE];
+ const struct cpumask *pod_cpus;
+ int nr_cpus, nr_shards, shard_size, remainder, c;
+ int pos = 0;
+
+ /* CPUs in the same LLC as @cpu */
+ pod_cpus = cache_pt->pod_cpus[cache_pt->cpu_pod[cpu]];
+ /* Total number of CPUs sharing this LLC */
+ nr_cpus = cpumask_weight(pod_cpus);
+ /* Number of shards to split this LLC into */
+ nr_shards = DIV_ROUND_UP(nr_cpus, wq_cache_shard_size);
+ /* Minimum number of CPUs per shard */
+ shard_size = nr_cpus / nr_shards;
+ /* First @remainder shards get one extra CPU */
+ remainder = nr_cpus % nr_shards;
+
+ /* Find position of @cpu within its cache pod */
+ for_each_cpu(c, pod_cpus) {
+ if (c == cpu)
+ break;
+ pos++;
+ }
+
+ /*
+ * Map position to shard index. The first @remainder shards have
+ * (shard_size + 1) CPUs, the rest have @shard_size CPUs.
+ */
+ if (pos < remainder * (shard_size + 1))
+ return pos / (shard_size + 1);
+ return remainder + (pos - remainder * (shard_size + 1)) / shard_size;
+}
+
+static bool __init cpus_share_cache_shard(int cpu0, int cpu1)
+{
+ if (!cpus_share_cache(cpu0, cpu1))
+ return false;
+ return cpu_cache_shard_id(cpu0) == cpu_cache_shard_id(cpu1);
+}
+
/**
* workqueue_init_topology - initialize CPU pods for unbound workqueues
*
@@ -8118,9 +8172,15 @@ void __init workqueue_init_topology(void)
struct workqueue_struct *wq;
int cpu;
+ if (!wq_cache_shard_size) {
+ pr_warn("workqueue: cache_shard_size must be > 0, setting to 1\n");
+ wq_cache_shard_size = 1;
+ }
+
init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share);
init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt);
init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache);
+ init_pod_type(&wq_pod_types[WQ_AFFN_CACHE_SHARD], cpus_share_cache_shard);
init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa);
wq_topo_initialized = true;
--
2.52.0