Re: [PATCH 19/24] sched/eevdf: Fixup PELT vs DELAYED_DEQUEUE
From: Peter Zijlstra
Date: Wed Aug 14 2024 - 03:28:34 EST
On Wed, Aug 14, 2024 at 12:18:07AM +0200, Peter Zijlstra wrote:
> On Tue, Aug 13, 2024 at 02:43:56PM +0200, Valentin Schneider wrote:
> > On 27/07/24 12:27, Peter Zijlstra wrote:
> > > Note that tasks that are kept on the runqueue to burn off negative
> > > lag, are not in fact runnable anymore, they'll get dequeued the moment
> > > they get picked.
> > >
> > > As such, don't count this time towards runnable.
> > >
> > > Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
> > > ---
> > > kernel/sched/fair.c | 2 ++
> > > kernel/sched/sched.h | 6 ++++++
> > > 2 files changed, 8 insertions(+)
> > >
> > > --- a/kernel/sched/fair.c
> > > +++ b/kernel/sched/fair.c
> > > @@ -5388,6 +5388,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, st
> > > if (cfs_rq->next == se)
> > > cfs_rq->next = NULL;
> > > se->sched_delayed = 1;
> > > + update_load_avg(cfs_rq, se, 0);
> >
> > Shouldn't this be before setting ->sched_delayed? accumulate_sum() should
> > see the time delta as spent being runnable.
> >
> > > return false;
> > > }
> > > }
> > > @@ -6814,6 +6815,7 @@ requeue_delayed_entity(struct sched_enti
> > > }
> > >
> > > se->sched_delayed = 0;
> > > + update_load_avg(cfs_rq, se, 0);
> >
> > Ditto on the ordering
>
> Bah, so I remember thinking about it and then I obviously go and do it
> the exact wrong way around eh? Let me double check this tomorrow morning
> with the brain slightly more awake :/
OK, so I went over it again and I ended up with the below diff -- which
assuming I didn't make a giant mess of things *again*, I should go fold
back into various other patches ...
---
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1b15dbfb1ce5..fa8907f2c716 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5461,14 +5461,10 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
bool sleep = flags & DEQUEUE_SLEEP;
+ update_curr(cfs_rq);
+
if (flags & DEQUEUE_DELAYED) {
- /*
- * DEQUEUE_DELAYED is typically called from pick_next_entity()
- * at which point we've already done update_curr() and do not
- * want to do so again.
- */
SCHED_WARN_ON(!se->sched_delayed);
- se->sched_delayed = 0;
} else {
bool delay = sleep;
/*
@@ -5479,14 +5475,13 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
delay = false;
SCHED_WARN_ON(delay && se->sched_delayed);
- update_curr(cfs_rq);
if (sched_feat(DELAY_DEQUEUE) && delay &&
!entity_eligible(cfs_rq, se)) {
if (cfs_rq->next == se)
cfs_rq->next = NULL;
- se->sched_delayed = 1;
update_load_avg(cfs_rq, se, 0);
+ se->sched_delayed = 1;
return false;
}
}
@@ -5536,6 +5531,12 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
update_min_vruntime(cfs_rq);
+ if (flags & DEQUEUE_DELAYED) {
+ se->sched_delayed = 0;
+ if (sched_feat(DELAY_ZERO) && se->vlag > 0)
+ se->vlag = 0;
+ }
+
if (cfs_rq->nr_running == 0)
update_idle_cfs_rq_clock_pelt(cfs_rq);
@@ -5611,11 +5612,6 @@ pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq)
struct sched_entity *se = pick_eevdf(cfs_rq);
if (se->sched_delayed) {
dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
- SCHED_WARN_ON(se->sched_delayed);
- SCHED_WARN_ON(se->on_rq);
- if (sched_feat(DELAY_ZERO) && se->vlag > 0)
- se->vlag = 0;
-
return NULL;
}
return se;
@@ -6906,7 +6902,7 @@ requeue_delayed_entity(struct sched_entity *se)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
/*
- * se->sched_delayed should imply both: se->on_rq == 1.
+ * se->sched_delayed should imply: se->on_rq == 1.
* Because a delayed entity is one that is still on
* the runqueue competing until elegibility.
*/
@@ -6927,8 +6923,8 @@ requeue_delayed_entity(struct sched_entity *se)
}
}
- se->sched_delayed = 0;
update_load_avg(cfs_rq, se, 0);
+ se->sched_delayed = 0;
}
/*