[PATCH 1/1] sched: simplify find_lock_later_rq and find_lock_lowest_rq code

From: Zhen Lei
Date: Tue Nov 28 2017 - 00:58:03 EST


For each function, it's exactly only one place need to return the found
rq, all other cases are NULL. So there is no need to explicitly set the
local variable later_rq/lowest_rq to NULL.

Signed-off-by: Zhen Lei <thunder.leizhen@xxxxxxxxxx>
---
kernel/sched/deadline.c | 12 ++++--------
kernel/sched/rt.c | 12 ++++--------
2 files changed, 8 insertions(+), 16 deletions(-)

diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 2473736..a2ace81 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1876,7 +1876,7 @@ static int find_later_rq(struct task_struct *task)
/* Locks the rq it finds */
static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
{
- struct rq *later_rq = NULL;
+ struct rq *later_rq;
int tries;
int cpu;

@@ -1890,15 +1890,13 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)

if (later_rq->dl.dl_nr_running &&
!dl_time_before(task->dl.deadline,
- later_rq->dl.earliest_dl.curr)) {
+ later_rq->dl.earliest_dl.curr))
/*
* Target rq has tasks of equal or earlier deadline,
* retrying does not release any lock and is unlikely
* to yield a different result.
*/
- later_rq = NULL;
break;
- }

/* Retry if something changed. */
if (double_lock_balance(rq, later_rq)) {
@@ -1908,7 +1906,6 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
!dl_task(task) ||
!task_on_rq_queued(task))) {
double_unlock_balance(rq, later_rq);
- later_rq = NULL;
break;
}
}
@@ -1921,14 +1918,13 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
if (!later_rq->dl.dl_nr_running ||
dl_time_before(task->dl.deadline,
later_rq->dl.earliest_dl.curr))
- break;
+ return later_rq;

/* Otherwise we try again. */
double_unlock_balance(rq, later_rq);
- later_rq = NULL;
}

- return later_rq;
+ return NULL;
}

static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 4056c19..cf78bbb 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1696,7 +1696,7 @@ static int find_lowest_rq(struct task_struct *task)
/* Will lock the rq it finds */
static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
{
- struct rq *lowest_rq = NULL;
+ struct rq *lowest_rq;
int tries;
int cpu;

@@ -1708,15 +1708,13 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)

lowest_rq = cpu_rq(cpu);

- if (lowest_rq->rt.highest_prio.curr <= task->prio) {
+ if (lowest_rq->rt.highest_prio.curr <= task->prio)
/*
* Target rq has tasks of equal or higher priority,
* retrying does not release any lock and is unlikely
* to yield a different result.
*/
- lowest_rq = NULL;
break;
- }

/* if the prio of this runqueue changed, try again */
if (double_lock_balance(rq, lowest_rq)) {
@@ -1733,21 +1731,19 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
!task_on_rq_queued(task))) {

double_unlock_balance(rq, lowest_rq);
- lowest_rq = NULL;
break;
}
}

/* If this rq is still suitable use it. */
if (lowest_rq->rt.highest_prio.curr > task->prio)
- break;
+ return lowest_rq;

/* try again */
double_unlock_balance(rq, lowest_rq);
- lowest_rq = NULL;
}

- return lowest_rq;
+ return NULL;
}

static struct task_struct *pick_next_pushable_task(struct rq *rq)
--
1.8.3