[PATCH v3 17/21] sched/cache: Enable cache aware scheduling for multi LLCs NUMA node

From: Tim Chen

Date: Tue Feb 10 2026 - 17:18:11 EST


From: Chen Yu <yu.c.chen@xxxxxxxxx>

Introduce sched_cache_present to enable cache aware scheduling for
multi LLCs NUMA node Cache-aware load balancing should only be
enabled if there are more than 1 LLCs within 1 NUMA node.
sched_cache_present is introduced to indicate whether this
platform supports this topology.

Suggested-by: Libo Chen <libchen@xxxxxxxxxxxxxxx>
Suggested-by: Adam Li <adamli@xxxxxxxxxxxxxxxxxxxxxx>
Co-developed-by: Tim Chen <tim.c.chen@xxxxxxxxxxxxxxx>
Signed-off-by: Tim Chen <tim.c.chen@xxxxxxxxxxxxxxx>
Signed-off-by: Chen Yu <yu.c.chen@xxxxxxxxx>
---

Notes:
v2->v3:
No change.

kernel/sched/sched.h | 3 ++-
kernel/sched/topology.c | 18 ++++++++++++++++--
2 files changed, 18 insertions(+), 3 deletions(-)

diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c18e59f320a6..59ac04625842 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -3916,11 +3916,12 @@ static inline void mm_cid_switch_to(struct task_struct *prev, struct task_struct
#endif /* !CONFIG_SCHED_MM_CID */

#ifdef CONFIG_SCHED_CACHE
+DECLARE_STATIC_KEY_FALSE(sched_cache_present);
extern int max_llcs;

static inline bool sched_cache_enabled(void)
{
- return false;
+ return static_branch_unlikely(&sched_cache_present);
}
#endif
extern void init_sched_mm(struct task_struct *p);
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index dae78b5915a7..9104fed25351 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -801,6 +801,7 @@ enum s_alloc {
};

#ifdef CONFIG_SCHED_CACHE
+DEFINE_STATIC_KEY_FALSE(sched_cache_present);
static bool alloc_sd_pref(const struct cpumask *cpu_map,
struct s_data *d)
{
@@ -2604,6 +2605,7 @@ static int
build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr)
{
enum s_alloc alloc_state = sa_none;
+ bool has_multi_llcs = false;
struct sched_domain *sd;
struct s_data d;
struct rq *rq = NULL;
@@ -2731,10 +2733,12 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
* between LLCs and memory channels.
*/
nr_llcs = sd->span_weight / child->span_weight;
- if (nr_llcs == 1)
+ if (nr_llcs == 1) {
imb = sd->span_weight >> 3;
- else
+ } else {
imb = nr_llcs;
+ has_multi_llcs = true;
+ }
imb = max(1U, imb);
sd->imb_numa_nr = imb;

@@ -2796,6 +2800,16 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att

ret = 0;
error:
+#ifdef CONFIG_SCHED_CACHE
+ /*
+ * TBD: check before writing to it. sched domain rebuild
+ * is not in the critical path, leave as-is for now.
+ */
+ if (!ret && has_multi_llcs)
+ static_branch_enable_cpuslocked(&sched_cache_present);
+ else
+ static_branch_disable_cpuslocked(&sched_cache_present);
+#endif
__free_domain_allocs(&d, alloc_state, cpu_map);

return ret;
--
2.32.0