Re: [PATCH v2 4/7] sched/fair: Fix lag clamp

From: Vincent Guittot

Date: Mon Feb 23 2026 - 05:59:01 EST


On Thu, 19 Feb 2026 at 09:10, Peter Zijlstra <peterz@xxxxxxxxxxxxx> wrote:
>
> Vincent reported that he was seeing undue lag clamping in a mixed
> slice workload. Implement the max_slice tracking as per the todo
> comment.
>
> Fixes: 147f3efaa241 ("sched/fair: Implement an EEVDF-like scheduling policy")
> Reported-off-by: Vincent Guittot <vincent.guittot@xxxxxxxxxx>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
> Tested-by: Vincent Guittot <vincent.guittot@xxxxxxxxxx>
> Tested-by: K Prateek Nayak <kprateek.nayak@xxxxxxx>
> Tested-by: Shubhang Kaushik <shubhang@xxxxxxxxxxxxxxxxxxxxxx>
> Link: https://patch.msgid.link/20250422101628.GA33555@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

Reviewed-by: Vincent Guittot <vincent.guittot@xxxxxxxxxx>


> ---
> include/linux/sched.h | 1 +
> kernel/sched/fair.c | 39 +++++++++++++++++++++++++++++++++++----
> 2 files changed, 36 insertions(+), 4 deletions(-)
>
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -574,6 +574,7 @@ struct sched_entity {
> u64 deadline;
> u64 min_vruntime;
> u64 min_slice;
> + u64 max_slice;
>
> struct list_head group_node;
> unsigned char on_rq;
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -748,6 +748,8 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
> return cfs_rq->zero_vruntime;
> }
>
> +static inline u64 cfs_rq_max_slice(struct cfs_rq *cfs_rq);
> +
> /*
> * lag_i = S - s_i = w_i * (V - v_i)
> *
> @@ -761,17 +763,16 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
> * EEVDF gives the following limit for a steady state system:
> *
> * -r_max < lag < max(r_max, q)
> - *
> - * XXX could add max_slice to the augmented data to track this.
> */
> static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
> {
> + u64 max_slice = cfs_rq_max_slice(cfs_rq) + TICK_NSEC;
> s64 vlag, limit;
>
> WARN_ON_ONCE(!se->on_rq);
>
> vlag = avg_vruntime(cfs_rq) - se->vruntime;
> - limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
> + limit = calc_delta_fair(max_slice, se);
>
> se->vlag = clamp(vlag, -limit, limit);
> }
> @@ -829,6 +830,21 @@ static inline u64 cfs_rq_min_slice(struc
> return min_slice;
> }
>
> +static inline u64 cfs_rq_max_slice(struct cfs_rq *cfs_rq)
> +{
> + struct sched_entity *root = __pick_root_entity(cfs_rq);
> + struct sched_entity *curr = cfs_rq->curr;
> + u64 max_slice = 0ULL;
> +
> + if (curr && curr->on_rq)
> + max_slice = curr->slice;
> +
> + if (root)
> + max_slice = max(max_slice, root->max_slice);
> +
> + return max_slice;
> +}
> +
> static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
> {
> return entity_before(__node_2_se(a), __node_2_se(b));
> @@ -853,6 +869,15 @@ static inline void __min_slice_update(st
> }
> }
>
> +static inline void __max_slice_update(struct sched_entity *se, struct rb_node *node)
> +{
> + if (node) {
> + struct sched_entity *rse = __node_2_se(node);
> + if (rse->max_slice > se->max_slice)
> + se->max_slice = rse->max_slice;
> + }
> +}
> +
> /*
> * se->min_vruntime = min(se->vruntime, {left,right}->min_vruntime)
> */
> @@ -860,6 +885,7 @@ static inline bool min_vruntime_update(s
> {
> u64 old_min_vruntime = se->min_vruntime;
> u64 old_min_slice = se->min_slice;
> + u64 old_max_slice = se->max_slice;
> struct rb_node *node = &se->run_node;
>
> se->min_vruntime = se->vruntime;
> @@ -870,8 +896,13 @@ static inline bool min_vruntime_update(s
> __min_slice_update(se, node->rb_right);
> __min_slice_update(se, node->rb_left);
>
> + se->max_slice = se->slice;
> + __max_slice_update(se, node->rb_right);
> + __max_slice_update(se, node->rb_left);
> +
> return se->min_vruntime == old_min_vruntime &&
> - se->min_slice == old_min_slice;
> + se->min_slice == old_min_slice &&
> + se->max_slice == old_max_slice;
> }
>
> RB_DECLARE_CALLBACKS(static, min_vruntime_cb, struct sched_entity,
>
>