[RFC][PATCH 3/8] sched/fair: Add cgroup_mode: UP
From: Peter Zijlstra
Date: Tue Mar 17 2026 - 06:53:00 EST
Instead of calculating the proportional fraction of tg->shares for
each CPU, just give each CPU the full measure, ignoring these pesky
SMP problems.
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
kernel/sched/debug.c | 3 ++-
kernel/sched/fair.c | 19 ++++++++++++++++++-
2 files changed, 20 insertions(+), 2 deletions(-)
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -588,9 +588,10 @@ static void debugfs_fair_server_init(voi
}
#ifdef CONFIG_FAIR_GROUP_SCHED
-int cgroup_mode = 0;
+int cgroup_mode = 1;
static const char *cgroup_mode_str[] = {
+ "up",
"smp",
};
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4152,7 +4152,7 @@ static inline int throttled_hierarchy(st
*
* hence icky!
*/
-static long calc_group_shares(struct cfs_rq *cfs_rq)
+static long calc_smp_shares(struct cfs_rq *cfs_rq)
{
long tg_weight, tg_shares, load, shares;
struct task_group *tg = cfs_rq->tg;
@@ -4187,6 +4187,23 @@ static long calc_group_shares(struct cfs
}
/*
+ * Ignore this pesky SMP stuff, use (4).
+ */
+static long calc_up_shares(struct cfs_rq *cfs_rq)
+{
+ struct task_group *tg = cfs_rq->tg;
+ return READ_ONCE(tg->shares);
+}
+
+static long calc_group_shares(struct cfs_rq *cfs_rq)
+{
+ if (cgroup_mode == 0)
+ return calc_up_shares(cfs_rq);
+
+ return calc_smp_shares(cfs_rq);
+}
+
+/*
* Recomputes the group entity based on the current state of its group
* runqueue.
*/