Re: [PATCH 1/2] sched/fair: keep load_avg and load_sum synced
From: Peter Zijlstra
Date: Thu May 27 2021 - 09:06:54 EST
On Thu, May 27, 2021 at 02:29:15PM +0200, Vincent Guittot wrote:
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -3509,7 +3509,8 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
> se->avg.load_sum = runnable_sum;
> se->avg.load_avg = load_avg;
> add_positive(&cfs_rq->avg.load_avg, delta_avg);
> - add_positive(&cfs_rq->avg.load_sum, delta_sum);
> + cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
If I'm not mistaken, this makes delta_sum unused, so we can remove it
entirely, see below.
> +
This extra blank space, we really need that? :-)
> }
---
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 161b92aa1c79..2b99e687fe7a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3453,10 +3453,9 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
static inline void
update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
{
- long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
+ long running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
unsigned long load_avg;
u64 load_sum = 0;
- s64 delta_sum;
u32 divider;
if (!runnable_sum)
@@ -3503,13 +3502,11 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
load_sum = (s64)se_weight(se) * runnable_sum;
load_avg = div_s64(load_sum, divider);
- delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
- delta_avg = load_avg - se->avg.load_avg;
-
se->avg.load_sum = runnable_sum;
se->avg.load_avg = load_avg;
- add_positive(&cfs_rq->avg.load_avg, delta_avg);
- add_positive(&cfs_rq->avg.load_sum, delta_sum);
+
+ add_positive(&cfs_rq->avg.load_avg, (long)(load_avg - se->avg.load_avg));
+ cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
}
static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)