[PATCH 15/17] [RFC] sched/eevdf: Sleeper bonus
From: Peter Zijlstra
Date: Tue Mar 28 2023 - 07:07:53 EST
Add a sleeper bonus hack, but keep it default disabled. This should
allow easy testing if regressions are due to this.
Specifically; this 'restores' performance for things like starve and
stress-futex, stress-nanosleep that rely on sleeper bonus to compete
against an always running parent (the fair 67%/33% split vs the
50%/50% bonus thing).
OTOH this completely destroys latency and hackbench (as in 5x worse).
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
kernel/sched/fair.c | 47 ++++++++++++++++++++++++++++++++++++++++-------
kernel/sched/features.h | 1 +
kernel/sched/sched.h | 3 ++-
3 files changed, 43 insertions(+), 8 deletions(-)
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4819,7 +4819,7 @@ static inline void update_misfit_status(
#endif /* CONFIG_SMP */
static void
-place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
+place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
u64 vslice = calc_delta_fair(se->slice, se);
u64 vruntime = avg_vruntime(cfs_rq);
@@ -4878,22 +4878,55 @@ place_entity(struct cfs_rq *cfs_rq, stru
if (WARN_ON_ONCE(!load))
load = 1;
lag = div_s64(lag, load);
+
+ vruntime -= lag;
+ }
+
+ /*
+ * Base the deadline on the 'normal' EEVDF placement policy in an
+ * attempt to not let the bonus crud below wreck things completely.
+ */
+ se->deadline = vruntime;
+
+ /*
+ * The whole 'sleeper' bonus hack... :-/ This is strictly unfair.
+ *
+ * By giving a sleeping task a little boost, it becomes possible for a
+ * 50% task to compete equally with a 100% task. That is, strictly fair
+ * that setup would result in a 67% / 33% split. Sleeper bonus will
+ * change that to 50% / 50%.
+ *
+ * This thing hurts my brain, because tasks leaving with negative lag
+ * will move 'time' backward, so comparing against a historical
+ * se->vruntime is dodgy as heck.
+ */
+ if (sched_feat(PLACE_BONUS) &&
+ (flags & ENQUEUE_WAKEUP) && !(flags & ENQUEUE_MIGRATED)) {
+ /*
+ * If se->vruntime is ahead of vruntime, something dodgy
+ * happened and we cannot give bonus due to not having valid
+ * history.
+ */
+ if ((s64)(se->vruntime - vruntime) < 0) {
+ vruntime -= se->slice/2;
+ vruntime = max_vruntime(se->vruntime, vruntime);
+ }
}
- se->vruntime = vruntime - lag;
+ se->vruntime = vruntime;
/*
* When joining the competition; the exisiting tasks will be,
* on average, halfway through their slice, as such start tasks
* off with half a slice to ease into the competition.
*/
- if (sched_feat(PLACE_DEADLINE_INITIAL) && initial)
+ if (sched_feat(PLACE_DEADLINE_INITIAL) && (flags & ENQUEUE_INITIAL))
vslice /= 2;
/*
* EEVDF: vd_i = ve_i + r_i/w_i
*/
- se->deadline = se->vruntime + vslice;
+ se->deadline += vslice;
}
static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
@@ -4910,7 +4943,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, st
* update_curr().
*/
if (curr)
- place_entity(cfs_rq, se, 0);
+ place_entity(cfs_rq, se, flags);
update_curr(cfs_rq);
@@ -4937,7 +4970,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, st
* we can place the entity.
*/
if (!curr)
- place_entity(cfs_rq, se, 0);
+ place_entity(cfs_rq, se, flags);
account_entity_enqueue(cfs_rq, se);
@@ -11933,7 +11966,7 @@ static void task_fork_fair(struct task_s
curr = cfs_rq->curr;
if (curr)
update_curr(cfs_rq);
- place_entity(cfs_rq, se, 1);
+ place_entity(cfs_rq, se, ENQUEUE_INITIAL);
rq_unlock(rq, &rf);
}
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -7,6 +7,7 @@
SCHED_FEAT(PLACE_LAG, true)
SCHED_FEAT(PLACE_FUDGE, true)
SCHED_FEAT(PLACE_DEADLINE_INITIAL, true)
+SCHED_FEAT(PLACE_BONUS, false)
/*
* Prefer to schedule the task we woke last (assuming it failed
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2143,7 +2143,7 @@ extern const u32 sched_prio_to_wmult[40
* ENQUEUE_HEAD - place at front of runqueue (tail if not specified)
* ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
* ENQUEUE_MIGRATED - the task was migrated during wakeup
- *
+ * ENQUEUE_INITIAL - place a new task (fork/clone)
*/
#define DEQUEUE_SLEEP 0x01
@@ -2163,6 +2163,7 @@ extern const u32 sched_prio_to_wmult[40
#else
#define ENQUEUE_MIGRATED 0x00
#endif
+#define ENQUEUE_INITIAL 0x80
#define RETRY_TASK ((void *)-1UL)