[Patch v4 17/22] sched/cache: Avoid cache-aware scheduling for memory-heavy processes
From: Tim Chen
Date: Wed Apr 01 2026 - 17:48:11 EST
From: Chen Yu <yu.c.chen@xxxxxxxxx>
Prateek and Tingyin reported that memory-intensive workloads (such as
stream) can saturate memory bandwidth and caches on the preferred LLC
when sched_cache aggregates too many threads.
To mitigate this, estimate a process's memory footprint by comparing
its RSS (anonymous and shared pages) to the size of the LLC. If RSS
exceeds the LLC size, skip cache-aware scheduling.
Note that RSS is only an approximation of the memory footprint.
By default, the comparison is strict, but a later patch will allow
users to provide a hint to adjust this threshold.
According to the test from Adam, some systems do not have shared L3
but with shared L2 as clusters. In this case, the L2 becomes the LLC[1].
Link[1]: https://lore.kernel.org/all/3cb6ebc7-a2fd-42b3-8739-b00e28a09cb6@xxxxxxxxxxxxxxxxxxxxxx/
Suggested-by: K Prateek Nayak <kprateek.nayak@xxxxxxx>
Suggested-by: Vern Hao <vernhao@xxxxxxxxxxx>
Signed-off-by: Chen Yu <yu.c.chen@xxxxxxxxx>
Co-developed-by: Tim Chen <tim.c.chen@xxxxxxxxxxxxxxx>
Signed-off-by: Tim Chen <tim.c.chen@xxxxxxxxxxxxxxx>
---
Notes:
v3->v4:
No change.
include/linux/cacheinfo.h | 21 ++++++++++-------
kernel/sched/fair.c | 48 +++++++++++++++++++++++++++++++++++----
2 files changed, 56 insertions(+), 13 deletions(-)
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index c8f4f0a0b874..82d0d59ca0e1 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -113,18 +113,11 @@ int acpi_get_cache_info(unsigned int cpu,
const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf);
-/*
- * Get the cacheinfo structure for the cache associated with @cpu at
- * level @level.
- * cpuhp lock must be held.
- */
-static inline struct cacheinfo *get_cpu_cacheinfo_level(int cpu, int level)
+static inline struct cacheinfo *_get_cpu_cacheinfo_level(int cpu, int level)
{
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
int i;
- lockdep_assert_cpus_held();
-
for (i = 0; i < ci->num_leaves; i++) {
if (ci->info_list[i].level == level) {
if (ci->info_list[i].attributes & CACHE_ID)
@@ -136,6 +129,18 @@ static inline struct cacheinfo *get_cpu_cacheinfo_level(int cpu, int level)
return NULL;
}
+/*
+ * Get the cacheinfo structure for the cache associated with @cpu at
+ * level @level.
+ * cpuhp lock must be held.
+ */
+static inline struct cacheinfo *get_cpu_cacheinfo_level(int cpu, int level)
+{
+ lockdep_assert_cpus_held();
+
+ return _get_cpu_cacheinfo_level(cpu, level);
+}
+
/*
* Get the id of the cache associated with @cpu at level @level.
* cpuhp lock must be held.
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 077ae7875e2e..a2d1b8b2a188 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1316,6 +1316,37 @@ static inline bool valid_llc_buf(struct sched_domain *sd,
return true;
}
+static bool exceed_llc_capacity(struct mm_struct *mm, int cpu)
+{
+ struct cacheinfo *ci;
+ u64 rss, llc;
+
+ /*
+ * get_cpu_cacheinfo_level() can not be used
+ * because it requires the cpu_hotplug_lock
+ * to be held. Use _get_cpu_cacheinfo_level()
+ * directly because the 'cpu' can not be
+ * offlined at the moment.
+ */
+ ci = _get_cpu_cacheinfo_level(cpu, 3);
+ if (!ci) {
+ /*
+ * On system without L3 but with shared L2,
+ * L2 becomes the LLC.
+ */
+ ci = _get_cpu_cacheinfo_level(cpu, 2);
+ if (!ci)
+ return true;
+ }
+
+ llc = ci->size;
+
+ rss = get_mm_counter(mm, MM_ANONPAGES) +
+ get_mm_counter(mm, MM_SHMEMPAGES);
+
+ return (llc < (rss * PAGE_SIZE));
+}
+
static bool exceed_llc_nr(struct mm_struct *mm, int cpu)
{
return !fits_capacity((mm->sc_stat.nr_running_avg * cpu_smt_num_threads),
@@ -1514,7 +1545,8 @@ void account_mm_sched(struct rq *rq, struct task_struct *p, s64 delta_exec)
if (time_after(epoch,
READ_ONCE(mm->sc_stat.epoch) + EPOCH_LLC_AFFINITY_TIMEOUT) ||
get_nr_threads(p) <= 1 ||
- exceed_llc_nr(mm, cpu_of(rq))) {
+ exceed_llc_nr(mm, cpu_of(rq)) ||
+ exceed_llc_capacity(mm, cpu_of(rq))) {
if (mm->sc_stat.cpu != -1)
mm->sc_stat.cpu = -1;
}
@@ -1619,8 +1651,8 @@ static inline void update_avg_scale(u64 *avg, u64 sample)
static void task_cache_work(struct callback_head *work)
{
+ int cpu, m_a_cpu = -1, nr_running = 0, curr_cpu;
struct task_struct *p = current, *cur;
- int cpu, m_a_cpu = -1, nr_running = 0;
unsigned long curr_m_a_occ = 0;
struct mm_struct *mm = p->mm;
unsigned long m_a_occ = 0;
@@ -1633,7 +1665,9 @@ static void task_cache_work(struct callback_head *work)
if (p->flags & PF_EXITING)
return;
- if (get_nr_threads(p) <= 1) {
+ curr_cpu = task_cpu(p);
+ if (get_nr_threads(p) <= 1 ||
+ exceed_llc_capacity(mm, curr_cpu)) {
if (mm->sc_stat.cpu != -1)
mm->sc_stat.cpu = -1;
@@ -10144,8 +10178,12 @@ static enum llc_mig can_migrate_llc_task(int src_cpu, int dst_cpu,
if (cpu < 0 || cpus_share_cache(src_cpu, dst_cpu))
return mig_unrestricted;
- /* skip cache aware load balance for single/too many threads */
- if (get_nr_threads(p) <= 1 || exceed_llc_nr(mm, dst_cpu)) {
+ /*
+ * Skip cache aware load balance for single/too many threads
+ * or large memory RSS.
+ */
+ if (get_nr_threads(p) <= 1 || exceed_llc_nr(mm, dst_cpu) ||
+ exceed_llc_capacity(mm, dst_cpu)) {
if (mm->sc_stat.cpu != -1)
mm->sc_stat.cpu = -1;
return mig_unrestricted;
--
2.32.0