[PATCH 8/8] sched/fair: delete superfluous SKIP_AGE_LOAD

From: Chengming Zhou
Date: Sat Jul 09 2022 - 11:16:19 EST


All three attach_entity_cfs_rq() types:

1. task migrate CPU
2. task migrate cgroup
3. task switched to fair

have its sched_avg last_update_time reset to 0 when
attach_entity_cfs_rq() -> update_load_avg(), so it makes
no difference whether SKIP_AGE_LOAD is set or not.

This patch delete the superfluous SKIP_AGE_LOAD, and the unused
feature ATTACH_AGE_LOAD together.

Signed-off-by: Chengming Zhou <zhouchengming@xxxxxxxxxxxxx>
---
kernel/sched/fair.c | 18 ++++++------------
kernel/sched/features.h | 1 -
2 files changed, 6 insertions(+), 13 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b0bde895ba96..b91643a2143e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3956,9 +3956,8 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
* Optional action to be done while updating the load average
*/
#define UPDATE_TG 0x1
-#define SKIP_AGE_LOAD 0x2
-#define DO_ATTACH 0x4
-#define DO_DETACH 0x8
+#define DO_ATTACH 0x2
+#define DO_DETACH 0x4

/* Update task and its cfs_rq load average */
static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
@@ -3970,7 +3969,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
* Track task load average for carrying it to new CPU after migrated, and
* track group sched_entity load average for task_h_load calc in migration
*/
- if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
+ if (se->avg.last_update_time)
__update_load_avg_se(now, cfs_rq, se);

decayed = update_cfs_rq_load_avg(now, cfs_rq);
@@ -4253,7 +4252,6 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
}

#define UPDATE_TG 0x0
-#define SKIP_AGE_LOAD 0x0
#define DO_ATTACH 0x0
#define DO_DETACH 0x0

@@ -11484,9 +11482,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se)
struct cfs_rq *cfs_rq = cfs_rq_of(se);

/* Catch up with the cfs_rq and remove our load when we leave */
- update_load_avg(cfs_rq, se, 0);
- detach_entity_load_avg(cfs_rq, se);
- update_tg_load_avg(cfs_rq);
+ update_load_avg(cfs_rq, se, UPDATE_TG | DO_DETACH);
propagate_entity_cfs_rq(se);
}

@@ -11494,10 +11490,8 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);

- /* Synchronize entity with its cfs_rq */
- update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
- attach_entity_load_avg(cfs_rq, se);
- update_tg_load_avg(cfs_rq);
+ /* Synchronize entity with its cfs_rq and attach our load */
+ update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
propagate_entity_cfs_rq(se);
}

diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index ee7f23c76bd3..fb92431d496f 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -85,7 +85,6 @@ SCHED_FEAT(RT_PUSH_IPI, true)

SCHED_FEAT(RT_RUNTIME_SHARE, false)
SCHED_FEAT(LB_MIN, false)
-SCHED_FEAT(ATTACH_AGE_LOAD, true)

SCHED_FEAT(WA_IDLE, true)
SCHED_FEAT(WA_WEIGHT, true)
--
2.36.1