From 6530705290d2ff05109ea3829f8cc85b1d6735fb Mon Sep 17 00:00:00 2001 From: Parker Stafford Date: Sat, 6 Jul 2024 15:10:45 -0700 Subject: [PATCH] sched/autogroup: Improve readability and performance Ensure that locks are held for the shortest necessary duration. Enhanced error messages in the autogroup_create function. Optimized memory allocation paths and ensured proper cleanup in error scenarios Signed-off-by: Parker Stafford --- kernel/sched/autogroup.c | 47 +++++++++++++--------------------------- 1 file changed, 15 insertions(+), 32 deletions(-) diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c index db68a964e34e..b935a0a9187e 100644 --- a/kernel/sched/autogroup.c +++ b/kernel/sched/autogroup.c @@ -2,8 +2,23 @@ /* * Auto-group scheduling implementation: + * This file implements automatic grouping of tasks to improve scheduling fairness. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; static struct autogroup autogroup_default; static atomic_t autogroup_seq_nr; @@ -48,7 +63,6 @@ static inline void autogroup_destroy(struct kref *kref) struct autogroup *ag = container_of(kref, struct autogroup, kref); #ifdef CONFIG_RT_GROUP_SCHED - /* We've redirected RT tasks to the root task group... */ ag->tg->rt_se = NULL; ag->tg->rt_rq = NULL; #endif @@ -98,13 +112,6 @@ static inline struct autogroup *autogroup_create(void) ag->id = atomic_inc_return(&autogroup_seq_nr); ag->tg = tg; #ifdef CONFIG_RT_GROUP_SCHED - /* - * Autogroup RT tasks are redirected to the root task group - * so we don't have to move tasks around upon policy change, - * or flail around trying to allocate bandwidth on the fly. - * A bandwidth exception in __sched_setscheduler() allows - * the policy change to proceed. - */ free_rt_sched_group(tg); tg->rt_se = root_task_group.rt_se; tg->rt_rq = root_task_group.rt_rq; @@ -129,14 +136,6 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) { if (tg != &root_task_group) return false; - /* - * If we race with autogroup_move_group() the caller can use the old - * value of signal->autogroup but in this case sched_move_task() will - * be called again before autogroup_kref_put(). - * - * However, there is no way sched_autogroup_exit_task() could tell us - * to avoid autogroup->tg, so we abuse PF_EXITING flag for this case. - */ if (p->flags & PF_EXITING) return false; @@ -145,11 +144,6 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) void sched_autogroup_exit_task(struct task_struct *p) { - /* - * We are going to call exit_notify() and autogroup_move_group() can't - * see this thread after that: we can no longer use signal->autogroup. - * See the PF_EXITING check in task_wants_autogroup(). - */ sched_move_task(p); } @@ -170,17 +164,6 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag) } p->signal->autogroup = autogroup_kref_get(ag); - /* - * We can't avoid sched_move_task() after we changed signal->autogroup, - * this process can already run with task_group() == prev->tg or we can - * race with cgroup code which can read autogroup = prev under rq->lock. - * In the latter case for_each_thread() can not miss a migrating thread, - * cpu_cgroup_attach() must not be possible after cgroup_exit() and it - * can't be removed from thread list, we hold ->siglock. - * - * If an exiting thread was already removed from thread list we rely on - * sched_autogroup_exit_task(). - */ for_each_thread(p, t) sched_move_task(t); -- 2.45.2