[PATCH 2/3 v2] sched: Move sched fair prio comparison to fair.c

From: Tim Chen
Date: Wed Jul 24 2019 - 16:58:18 EST


Consolidate the task priority comparison of the fair class
to fair.c. A simple code reorganization and there are no functional changes.

Signed-off-by: Tim Chen <tim.c.chen@xxxxxxxxxxxxxxx>
---
kernel/sched/core.c | 21 ++-------------------
kernel/sched/fair.c | 21 +++++++++++++++++++++
kernel/sched/sched.h | 1 +
3 files changed, 24 insertions(+), 19 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 50453e1329f3..0f893853766c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -105,25 +105,8 @@ static inline bool prio_less(struct task_struct *a, struct task_struct *b)
if (pa == -1) /* dl_prio() doesn't work because of stop_class above */
return !dl_time_before(a->dl.deadline, b->dl.deadline);

- if (pa == MAX_RT_PRIO + MAX_NICE) { /* fair */
- u64 a_vruntime = a->se.vruntime;
- u64 b_vruntime = b->se.vruntime;
-
- /*
- * Normalize the vruntime if tasks are in different cpus.
- */
- if (task_cpu(a) != task_cpu(b)) {
- b_vruntime -= task_cfs_rq(b)->min_vruntime;
- b_vruntime += task_cfs_rq(a)->min_vruntime;
-
- trace_printk("(%d:%Lu,%Lu,%Lu) <> (%d:%Lu,%Lu,%Lu)\n",
- a->pid, a_vruntime, a->se.vruntime, task_cfs_rq(a)->min_vruntime,
- b->pid, b_vruntime, b->se.vruntime, task_cfs_rq(b)->min_vruntime);
-
- }
-
- return !((s64)(a_vruntime - b_vruntime) <= 0);
- }
+ if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */
+ return prio_less_fair(a, b);

return false;
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 02bff10237d4..e289b6e1545b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -602,6 +602,27 @@ static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
return delta;
}

+bool prio_less_fair(struct task_struct *a, struct task_struct *b)
+{
+ u64 a_vruntime = a->se.vruntime;
+ u64 b_vruntime = b->se.vruntime;
+
+ /*
+ * Normalize the vruntime if tasks are in different cpus.
+ */
+ if (task_cpu(a) != task_cpu(b)) {
+ b_vruntime -= task_cfs_rq(b)->min_vruntime;
+ b_vruntime += task_cfs_rq(a)->min_vruntime;
+
+ trace_printk("(%d:%Lu,%Lu,%Lu) <> (%d:%Lu,%Lu,%Lu)\n",
+ a->pid, a_vruntime, a->se.vruntime, task_cfs_rq(a)->min_vruntime,
+ b->pid, b_vruntime, b->se.vruntime, task_cfs_rq(b)->min_vruntime);
+
+ }
+
+ return !((s64)(a_vruntime - b_vruntime) <= 0);
+}
+
/*
* The idea is to set a period in which each task runs once.
*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index e91c188a452c..bdabe7ce1152 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1015,6 +1015,7 @@ static inline raw_spinlock_t *rq_lockp(struct rq *rq)
}

extern void queue_core_balance(struct rq *rq);
+extern bool prio_less_fair(struct task_struct *a, struct task_struct *b);

#else /* !CONFIG_SCHED_CORE */

--
2.20.1

------------------------patch 3---------------------------