Re: [PATCH v2] sched: fair: Prevent negative lag increase during delayed dequeue
From: Vincent Guittot
Date: Thu Apr 02 2026 - 09:29:32 EST
On Thu, 2 Apr 2026 at 15:14, Peter Zijlstra <peterz@xxxxxxxxxxxxx> wrote:
>
> On Tue, Mar 31, 2026 at 06:23:52PM +0200, Vincent Guittot wrote:
> > +/*
> > + * Delayed dequeue aims to reduce the negative lag of a dequeued task.
> > + * While updating the lag of an entity, check that negative lag didn't increase
> > + * during the delayed dequeue period which would be unfair.
> > + * Similarly, check that the entity didn't gain positive lag when DELAY_ZERO is
> > + * set.
> > + *
> > + * Return true if the lag has been adjusted.
> > + */
> > +static bool update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
> > {
> > + s64 vlag;
> > +
> > WARN_ON_ONCE(!se->on_rq);
> >
> > - se->vlag = entity_lag(cfs_rq, se, avg_vruntime(cfs_rq));
> > + vlag = entity_lag(cfs_rq, se, avg_vruntime(cfs_rq));
> > +
> > + if (se->sched_delayed)
> > + /* previous vlag < 0 otherwise se would not be delayed */
> > + se->vlag = clamp(vlag, se->vlag, sched_feat(DELAY_ZERO) ? 0 : S64_MAX);
> > + else
> > + se->vlag = vlag;
> > +
> > + return (vlag != se->vlag);
> > }
>
> Would you mind terribly if I write this like so?
np, that's looks good too for me
>
> ---
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -841,29 +841,32 @@ static s64 entity_lag(struct cfs_rq *cfs
> }
>
> /*
> - * Delayed dequeue aims to reduce the negative lag of a dequeued task.
> - * While updating the lag of an entity, check that negative lag didn't increase
> + * Delayed dequeue aims to reduce the negative lag of a dequeued task. While
> + * updating the lag of an entity, check that negative lag didn't increase
> * during the delayed dequeue period which would be unfair.
> - * Similarly, check that the entity didn't gain positive lag when DELAY_ZERO is
> - * set.
> + * Similarly, check that the entity didn't gain positive lag when DELAY_ZERO
> + * is set.
> *
> * Return true if the lag has been adjusted.
> */
> -static bool update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
> +static __always_inline
> +bool update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
> {
> - s64 vlag;
> + s64 vlag = entity_lag(cfs_rq, se, avg_vruntime(cfs_rq));
> + bool ret;
>
> WARN_ON_ONCE(!se->on_rq);
>
> - vlag = entity_lag(cfs_rq, se, avg_vruntime(cfs_rq));
> -
> - if (se->sched_delayed)
> + if (se->sched_delayed) {
> /* previous vlag < 0 otherwise se would not be delayed */
> - se->vlag = clamp(vlag, se->vlag, sched_feat(DELAY_ZERO) ? 0 : S64_MAX);
> - else
> - se->vlag = vlag;
> + vlag = max(vlag, se->vlag);
> + if (sched_feat(DELAY_ZERO))
> + vlag = min(vlag, 0);
> + }
> + ret = (vlag == se->vlag);
> + se->vlag = vlag;
>
> - return (vlag != se->vlag);
> + return ret;
> }
>
> /*
>