[tip: sched/core] sched: Simplify set_user_nice()

From: tip-bot2 for Peter Zijlstra
Date: Tue Sep 12 2023 - 06:34:34 EST


The following commit has been merged into the sched/core branch of tip:

Commit-ID: 7db0c4c81425e6e0d69dcb56e8c5e99d63f4ef07
Gitweb: https://git.kernel.org/tip/7db0c4c81425e6e0d69dcb56e8c5e99d63f4ef07
Author: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
AuthorDate: Fri, 09 Jun 2023 20:52:55 +02:00
Committer: root <root@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx>
CommitterDate: Sat, 09 Sep 2023 15:10:14 +02:00

sched: Simplify set_user_nice()

Use guards to reduce gotos and simplify control flow.

Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
kernel/sched/core.c | 13 ++++++-------
kernel/sched/sched.h | 5 +++++
2 files changed, 11 insertions(+), 7 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index efe3848..e4d8b7a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7188,9 +7188,8 @@ static inline int rt_effective_prio(struct task_struct *p, int prio)
void set_user_nice(struct task_struct *p, long nice)
{
bool queued, running;
- int old_prio;
- struct rq_flags rf;
struct rq *rq;
+ int old_prio;

if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
return;
@@ -7198,7 +7197,9 @@ void set_user_nice(struct task_struct *p, long nice)
* We have to be careful, if called from sys_setpriority(),
* the task might be in the middle of scheduling on another CPU.
*/
- rq = task_rq_lock(p, &rf);
+ CLASS(task_rq_lock, rq_guard)(p);
+ rq = rq_guard.rq;
+
update_rq_clock(rq);

/*
@@ -7209,8 +7210,9 @@ void set_user_nice(struct task_struct *p, long nice)
*/
if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
p->static_prio = NICE_TO_PRIO(nice);
- goto out_unlock;
+ return;
}
+
queued = task_on_rq_queued(p);
running = task_current(rq, p);
if (queued)
@@ -7233,9 +7235,6 @@ void set_user_nice(struct task_struct *p, long nice)
* lowered its priority, then reschedule its CPU:
*/
p->sched_class->prio_changed(rq, p, old_prio);
-
-out_unlock:
- task_rq_unlock(rq, p, &rf);
}
EXPORT_SYMBOL(set_user_nice);

diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 3a01b7a..62013c4 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1657,6 +1657,11 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
}

+DEFINE_LOCK_GUARD_1(task_rq_lock, struct task_struct,
+ _T->rq = task_rq_lock(_T->lock, &_T->rf),
+ task_rq_unlock(_T->rq, _T->lock, &_T->rf),
+ struct rq *rq; struct rq_flags rf)
+
static inline void
rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
__acquires(rq->lock)