[PATCH 16/17] [RFC] sched/eevdf: Minimal vavg option
From: Peter Zijlstra
Date: Tue Mar 28 2023 - 07:08:05 EST
Alternative means of tracking min_vruntime to minimize the deltas
going into avg_vruntime -- note that because vavg move backwards this
is all sorts of tricky.
Also more expensive because of extra divisions... Not found this
convincing.
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
kernel/sched/fair.c | 51 ++++++++++++++++++++++++++++--------------------
kernel/sched/features.h | 2 +
2 files changed, 32 insertions(+), 21 deletions(-)
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -732,28 +732,37 @@ static u64 __update_min_vruntime(struct
static void update_min_vruntime(struct cfs_rq *cfs_rq)
{
- struct sched_entity *se = __pick_first_entity(cfs_rq);
- struct sched_entity *curr = cfs_rq->curr;
-
- u64 vruntime = cfs_rq->min_vruntime;
-
- if (curr) {
- if (curr->on_rq)
- vruntime = curr->vruntime;
- else
- curr = NULL;
+ if (sched_feat(MINIMAL_VA)) {
+ u64 vruntime = avg_vruntime(cfs_rq);
+ s64 delta = (s64)(vruntime - cfs_rq->min_vruntime);
+
+ avg_vruntime_update(cfs_rq, delta);
+
+ u64_u32_store(cfs_rq->min_vruntime, vruntime);
+ } else {
+ struct sched_entity *se = __pick_first_entity(cfs_rq);
+ struct sched_entity *curr = cfs_rq->curr;
+
+ u64 vruntime = cfs_rq->min_vruntime;
+
+ if (curr) {
+ if (curr->on_rq)
+ vruntime = curr->vruntime;
+ else
+ curr = NULL;
+ }
+
+ if (se) {
+ if (!curr)
+ vruntime = se->vruntime;
+ else
+ vruntime = min_vruntime(vruntime, se->vruntime);
+ }
+
+ /* ensure we never gain time by being placed backwards. */
+ u64_u32_store(cfs_rq->min_vruntime,
+ __update_min_vruntime(cfs_rq, vruntime));
}
-
- if (se) {
- if (!curr)
- vruntime = se->vruntime;
- else
- vruntime = min_vruntime(vruntime, se->vruntime);
- }
-
- /* ensure we never gain time by being placed backwards. */
- u64_u32_store(cfs_rq->min_vruntime,
- __update_min_vruntime(cfs_rq, vruntime));
}
static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -9,6 +9,8 @@ SCHED_FEAT(PLACE_FUDGE, true)
SCHED_FEAT(PLACE_DEADLINE_INITIAL, true)
SCHED_FEAT(PLACE_BONUS, false)
+SCHED_FEAT(MINIMAL_VA, false)
+
/*
* Prefer to schedule the task we woke last (assuming it failed
* wakeup-preemption), since its likely going to consume data we