Re: [PATCH -tip 02/32] sched: Introduce sched_class::pick_task()

From: Singh, Balbir
Date: Thu Nov 19 2020 - 18:56:34 EST


On 18/11/20 10:19 am, Joel Fernandes (Google) wrote:
> From: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
>
> Because sched_class::pick_next_task() also implies
> sched_class::set_next_task() (and possibly put_prev_task() and
> newidle_balance) it is not state invariant. This makes it unsuitable
> for remote task selection.
>

The change makes sense, a small suggestion below

> Tested-by: Julien Desfossez <jdesfossez@xxxxxxxxxxxxxxxx>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
> Signed-off-by: Vineeth Remanan Pillai <viremana@xxxxxxxxxxxxxxxxxxx>
> Signed-off-by: Julien Desfossez <jdesfossez@xxxxxxxxxxxxxxxx>
> Signed-off-by: Joel Fernandes (Google) <joel@xxxxxxxxxxxxxxxxx>
> ---
> kernel/sched/deadline.c | 16 ++++++++++++++--
> kernel/sched/fair.c | 32 +++++++++++++++++++++++++++++++-
> kernel/sched/idle.c | 8 ++++++++
> kernel/sched/rt.c | 15 +++++++++++++--
> kernel/sched/sched.h | 3 +++
> kernel/sched/stop_task.c | 14 ++++++++++++--
> 6 files changed, 81 insertions(+), 7 deletions(-)
>
> diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
> index 0f2ea0a3664c..abfc8b505d0d 100644
> --- a/kernel/sched/deadline.c
> +++ b/kernel/sched/deadline.c
> @@ -1867,7 +1867,7 @@ static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
> return rb_entry(left, struct sched_dl_entity, rb_node);
> }
>
> -static struct task_struct *pick_next_task_dl(struct rq *rq)
> +static struct task_struct *pick_task_dl(struct rq *rq)
> {
> struct sched_dl_entity *dl_se;
> struct dl_rq *dl_rq = &rq->dl;
> @@ -1879,7 +1879,18 @@ static struct task_struct *pick_next_task_dl(struct rq *rq)
> dl_se = pick_next_dl_entity(rq, dl_rq);
> BUG_ON(!dl_se);
> p = dl_task_of(dl_se);
> - set_next_task_dl(rq, p, true);
> +
> + return p;
> +}
> +
> +static struct task_struct *pick_next_task_dl(struct rq *rq)
> +{
> + struct task_struct *p;
> +
> + p = pick_task_dl(rq);
> + if (p)
> + set_next_task_dl(rq, p, true);
> +
> return p;
> }
>
> @@ -2551,6 +2562,7 @@ DEFINE_SCHED_CLASS(dl) = {
>
> #ifdef CONFIG_SMP
> .balance = balance_dl,
> + .pick_task = pick_task_dl,
> .select_task_rq = select_task_rq_dl,
> .migrate_task_rq = migrate_task_rq_dl,
> .set_cpus_allowed = set_cpus_allowed_dl,
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 52ddfec7cea6..12cf068eeec8 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -4459,7 +4459,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
> * Avoid running the skip buddy, if running something else can
> * be done without getting too unfair.
> */
> - if (cfs_rq->skip == se) {
> + if (cfs_rq->skip && cfs_rq->skip == se) {
> struct sched_entity *second;
>
> if (se == curr) {
> @@ -7017,6 +7017,35 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
> set_last_buddy(se);
> }
>
> +#ifdef CONFIG_SMP
> +static struct task_struct *pick_task_fair(struct rq *rq)
> +{
> + struct cfs_rq *cfs_rq = &rq->cfs;
> + struct sched_entity *se;
> +
> + if (!cfs_rq->nr_running)
> + return NULL;
> +
> + do {
> + struct sched_entity *curr = cfs_rq->curr;
> +
> + se = pick_next_entity(cfs_rq, NULL);
> +
> + if (curr) {
> + if (se && curr->on_rq)
> + update_curr(cfs_rq);
> +
> + if (!se || entity_before(curr, se))
> + se = curr;
> + }

Do we want to optimize this a bit

if (curr) {
if (!se || entity_before(curr, se))
se = curr;

if ((se != curr) && curr->on_rq)
update_curr(cfs_rq);

}

Balbir