[RFC PATCH 3/9] sched: Always initialize rt_rq's task_group
From: Michal Koutný
Date: Mon Dec 16 2024 - 15:14:09 EST
rt_rq->tg may be NULL which denotes the root task_group.
Store the pointer to root_task_group directly so that callers may use
rt_rq->tg homogenously.
root_task_group exists always with CONFIG_CGROUPS_SCHED,
CONFIG_RT_GROUP_SCHED depends on that.
This changes root level rt_rq's default limit from infinity to the
value of (originally) global RT throttling.
Signed-off-by: Michal Koutný <mkoutny@xxxxxxxx>
---
kernel/sched/rt.c | 7 ++-----
kernel/sched/sched.h | 2 ++
2 files changed, 4 insertions(+), 5 deletions(-)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 1940301c40f7d..41fed8865cb09 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -89,6 +89,7 @@ void init_rt_rq(struct rt_rq *rt_rq)
rt_rq->rt_throttled = 0;
rt_rq->rt_runtime = 0;
raw_spin_lock_init(&rt_rq->rt_runtime_lock);
+ rt_rq->tg = &root_task_group;
#endif
}
@@ -484,9 +485,6 @@ static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
{
- if (!rt_rq->tg)
- return RUNTIME_INF;
-
return rt_rq->rt_runtime;
}
@@ -1156,8 +1154,7 @@ inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
if (rt_se_boosted(rt_se))
rt_rq->rt_nr_boosted++;
- if (rt_rq->tg)
- start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
+ start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
}
static void
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 76f5f53a645fc..38325bd32a0e0 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -820,6 +820,8 @@ struct rt_rq {
unsigned int rt_nr_boosted;
struct rq *rq;
+#endif
+#ifdef CONFIG_CGROUP_SCHED
struct task_group *tg;
#endif
};
--
2.47.1