Re: [PATCH 17/24] sched/fair: Implement delayed dequeue

From: Mike Galbraith
Date: Wed Nov 06 2024 - 09:15:50 EST


On Wed, 2024-11-06 at 14:53 +0100, Peter Zijlstra wrote:
>
> So... I was trying to make that prettier and ended up with something
> like this:

Passing ENQUEUE_DELAYED to dequeue_task() looks funky until you check
the value, but otherwise yeah, when applied that looks better to me.

>
> ---
>  kernel/sched/core.c  | 46 ++++++++++++++++++++++++++++------------------
>  kernel/sched/sched.h |  5 +++++
>  2 files changed, 33 insertions(+), 18 deletions(-)
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 54d82c21fc8e..b083c6385e88 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -3774,28 +3774,38 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
>   */
>  static int ttwu_runnable(struct task_struct *p, int wake_flags)
>  {
> -       struct rq_flags rf;
> -       struct rq *rq;
> -       int ret = 0;
> +       CLASS(__task_rq_lock, rq_guard)(p);
> +       struct rq *rq = rq_guard.rq;
>  
> -       rq = __task_rq_lock(p, &rf);
> -       if (task_on_rq_queued(p)) {
> -               update_rq_clock(rq);
> -               if (p->se.sched_delayed)
> -                       enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
> -               if (!task_on_cpu(rq, p)) {
> -                       /*
> -                        * When on_rq && !on_cpu the task is preempted, see if
> -                        * it should preempt the task that is current now.
> -                        */
> -                       wakeup_preempt(rq, p, wake_flags);
> +       if (!task_on_rq_queued(p))
> +               return 0;
> +
> +       update_rq_clock(rq);
> +       if (p->se.sched_delayed) {
> +               int queue_flags = ENQUEUE_DELAYED | ENQUEUE_NOCLOCK;
> +
> +               /*
> +                * Since sched_delayed means we cannot be current anywhere,
> +                * dequeue it here and have it fall through to the
> +                * select_task_rq() case further along the ttwu() path.
> +                */
> +               if (rq->nr_running > 1 && p->nr_cpus_allowed > 1) {
> +                       dequeue_task(rq, p, DEQUEUE_SLEEP | queue_flags);
> +                       return 0;
>                 }
> -               ttwu_do_wakeup(p);
> -               ret = 1;
> +
> +               enqueue_task(rq, p, queue_flags);
>         }
> -       __task_rq_unlock(rq, &rf);
> +       if (!task_on_cpu(rq, p)) {
> +               /*
> +                * When on_rq && !on_cpu the task is preempted, see if
> +                * it should preempt the task that is current now.
> +                */
> +               wakeup_preempt(rq, p, wake_flags);
> +       }
> +       ttwu_do_wakeup(p);
>  
> -       return ret;
> +       return 1;
>  }
>  
>  #ifdef CONFIG_SMP
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 21b1780c6695..1714ac38500f 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -1787,6 +1787,11 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
>         raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
>  }
>  
> +DEFINE_LOCK_GUARD_1(__task_rq_lock, struct task_struct,
> +                   _T->rq = __task_rq_lock(_T->lock, &_T->rf),
> +                   __task_rq_unlock(_T->rq, &_T->rf),
> +                   struct rq *rq; struct rq_flags rf)
> +
>  DEFINE_LOCK_GUARD_1(task_rq_lock, struct task_struct,
>                     _T->rq = task_rq_lock(_T->lock, &_T->rf),
>                     task_rq_unlock(_T->rq, _T->lock, &_T->rf),