Re: [PATCH v2] sched/fair: sanitize vruntime of entity being migrated

From: Peter Zijlstra
Date: Tue Mar 14 2023 - 08:09:16 EST


On Tue, Mar 14, 2023 at 08:41:30AM +0100, Vincent Guittot wrote:

> I'm going to use something a bit different from your proposal below by
> merging initial and flag
> static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity
> *se, int flags)
>
> with flags:
> 0 for initial placement
> ENQUEUE_WAKEUP for wakeup
> ENQUEUE_MIGRATED for migrated task

So when a task is not running for a long time (our case at hand), then
there's two cases:

- it wakes up locally and place_entity() gets to reset vruntime;
- it wakes up remotely and migrate_task_rq_fair() can reset vruntime.

So if we can rely on ENQUEUE_MIGRATED to differentiate between these
cases, when wouldn't something like this work?

---

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7a1b1f855b96..a0d00b6a8bc6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4648,11 +4648,31 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
#endif
}

+static bool reset_vruntime(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ const s64 limit = 60LL * NSEC_PER_SEC;
+ s64 sleep_time;
+
+ /*
+ * Pull vruntime of the entity being placed to the base level of
+ * cfs_rq, to prevent boosting it if placed backwards. If the entity
+ * slept for a long time, don't even try to compare its vruntime with
+ * the base as it may be too far off and the comparison may get
+ * inversed due to s64 overflow.
+ */
+ sleep_time = rq_clock_task(rq_of(cfs_rq)) - se->exec_start;
+ if (unlikely(sleep_time > limit)) {
+ se->vruntime = cfs_rq->min_vruntime - calc_delta_fair(limit, se);
+ return true;
+ }
+
+ return false;
+}
+
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
u64 vruntime = cfs_rq->min_vruntime;
- u64 sleep_time;

/*
* The 'current' period is already promised to the current tasks,
@@ -4682,18 +4702,8 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
vruntime -= thresh;
}

- /*
- * Pull vruntime of the entity being placed to the base level of
- * cfs_rq, to prevent boosting it if placed backwards. If the entity
- * slept for a long time, don't even try to compare its vruntime with
- * the base as it may be too far off and the comparison may get
- * inversed due to s64 overflow.
- */
- sleep_time = rq_clock_task(rq_of(cfs_rq)) - se->exec_start;
- if ((s64)sleep_time > 60LL * NSEC_PER_SEC)
- se->vruntime = vruntime;
- else
- se->vruntime = max_vruntime(se->vruntime, vruntime);
+ /* ensure we don't gain time by being placed backwards */
+ se->vruntime = max_vruntime(se->vruntime, vruntime);
}

static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
@@ -4768,8 +4778,11 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
update_cfs_group(se);
account_entity_enqueue(cfs_rq, se);

- if (flags & ENQUEUE_WAKEUP)
+ if (flags & ENQUEUE_WAKEUP) {
+ if (!(flags & ENQUEUE_MIGRATED))
+ reset_vruntime(cfs_rq, se);
place_entity(cfs_rq, se, 0);
+ }

check_schedstat_required();
update_stats_enqueue_fair(cfs_rq, se, flags);
@@ -7625,6 +7638,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
{
struct sched_entity *se = &p->se;
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);

/*
* As blocked tasks retain absolute vruntime the migration needs to
@@ -7632,11 +7646,8 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
* min_vruntime -- the latter is done by enqueue_entity() when placing
* the task on the new runqueue.
*/
- if (READ_ONCE(p->__state) == TASK_WAKING) {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
-
+ if (READ_ONCE(p->__state) == TASK_WAKING || reset_vruntime(cfs_rq, se))
se->vruntime -= u64_u32_load(cfs_rq->min_vruntime);
- }

if (!task_on_rq_migrating(p)) {
remove_entity_load_avg(se);