[Patch v4 08/22] sched/cache: Introduce per CPU's tasks LLC preference counter
From: Tim Chen
Date: Wed Apr 01 2026 - 17:47:40 EST
The lowest level of sched domain for each CPU is assigned an
array where each element tracks the number of tasks preferring
a given LLC, indexed from 0 to max_lid. Since each CPU
has its dedicated sd, this implies that each CPU will have
a dedicated task LLC preference counter.
For example, sd->llc_counts[3] = 2 signifies that there
are 2 tasks on this runqueue which prefer to run within LLC3.
The load balancer can use this information to identify busy
runqueues and migrate tasks to their preferred LLC domains.
This array will be reallocated at runtime during sched domain
rebuild.
Introduce the buffer allocation mechanism, and the statistics
will be calculated in the subsequent patch.
Note: the LLC preference statistics of each CPU are reset on
sched domain rebuild and may under count temporarily, until the
CPU becomes idle and the count is cleared. This is a trade off
to avoid complex data synchronization across sched domain builds.
Suggested-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Suggested-by: K Prateek Nayak <kprateek.nayak@xxxxxxx>
Co-developed-by: Chen Yu <yu.c.chen@xxxxxxxxx>
Signed-off-by: Chen Yu <yu.c.chen@xxxxxxxxx>
Signed-off-by: Tim Chen <tim.c.chen@xxxxxxxxxxxxxxx>
---
Notes:
v3->v4:
Rename pf to llc_counter to better reflect its usage;
Record its size (max_llcs) per sched domain;
Publish the llc_counter and its size together in
cpu_attach_domain().
(Peter Zijlstra)
include/linux/sched/topology.h | 13 ++++++++
kernel/sched/topology.c | 61 +++++++++++++++++++++++++++++++++-
2 files changed, 73 insertions(+), 1 deletion(-)
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index a4e2fb31f2fd..73153a3d9036 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -102,6 +102,19 @@ struct sched_domain {
u64 max_newidle_lb_cost;
unsigned long last_decay_max_lb_cost;
+#ifdef CONFIG_SCHED_CACHE
+ unsigned int llc_max;
+ /*
+ * per LLC preference counter
+ * __counted_by cannot be used here because
+ * when the percpu sched_domain is being allocated,
+ * llc_max is unknown, and thus the actual size
+ * of the sched_domain(including the llc_counts elements)
+ * is undetermined.
+ */
+ unsigned int *llc_counts;
+#endif
+
#ifdef CONFIG_SCHEDSTATS
/* sched_balance_rq() stats */
unsigned int lb_count[CPU_MAX_IDLE_TYPES];
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index edf6d7ec73ca..995a42cb4697 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -634,6 +634,11 @@ static void destroy_sched_domain(struct sched_domain *sd)
if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
kfree(sd->shared);
+
+#ifdef CONFIG_SCHED_CACHE
+ /* only the bottom sd has llc_counts array */
+ kfree(sd->llc_counts);
+#endif
kfree(sd);
}
@@ -753,10 +758,18 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
if (sd && sd_degenerate(sd)) {
tmp = sd;
sd = sd->parent;
- destroy_sched_domain(tmp);
+
if (sd) {
struct sched_group *sg = sd->groups;
+#ifdef CONFIG_SCHED_CACHE
+ /* move buffer to parent as child is being destroyed */
+ sd->llc_counts = tmp->llc_counts;
+ sd->llc_max = tmp->llc_max;
+ /* make sure destroy_sched_domain() does not free it */
+ tmp->llc_counts = NULL;
+ tmp->llc_max = 0;
+#endif
/*
* sched groups hold the flags of the child sched
* domain for convenience. Clear such flags since
@@ -768,6 +781,8 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
sd->child = NULL;
}
+
+ destroy_sched_domain(tmp);
}
sched_domain_debug(sd, cpu);
@@ -793,6 +808,48 @@ enum s_alloc {
sa_none,
};
+#ifdef CONFIG_SCHED_CACHE
+static bool alloc_sd_llc(const struct cpumask *cpu_map,
+ struct s_data *d)
+{
+ struct sched_domain *sd;
+ unsigned int *p;
+ int i;
+
+ for_each_cpu(i, cpu_map) {
+ sd = *per_cpu_ptr(d->sd, i);
+ if (!sd)
+ goto err;
+
+ p = kcalloc(max_lid + 1, sizeof(unsigned int), GFP_KERNEL);
+ if (!p)
+ goto err;
+
+ sd->llc_counts = p;
+ sd->llc_max = max_lid;
+ }
+
+ return true;
+err:
+ for_each_cpu(i, cpu_map) {
+ sd = *per_cpu_ptr(d->sd, i);
+ if (sd) {
+ sd->llc_max = 0;
+ kfree(sd->llc_counts);
+ sd->llc_counts = NULL;
+ }
+ }
+
+ return false;
+}
+#else
+static bool alloc_sd_llc(const struct cpumask *cpu_map,
+ struct s_data *d)
+{
+ return false;
+}
+#endif
+
/*
* Return the canonical balance CPU for this group, this is the first CPU
* of this group that's also in the balance mask.
@@ -2759,6 +2816,8 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
}
}
+ alloc_sd_llc(cpu_map, &d);
+
/* Attach the domains */
rcu_read_lock();
for_each_cpu(i, cpu_map) {
--
2.32.0