[PATCH] sched/fair.c: add helper func for util_avg and runnable_avg calc
From: Hui Su
Date: Thu Apr 22 2021 - 05:14:50 EST
add helper func for util_avg and runnable_avg calc when entity
enqueue and dequeue. No functional change.
without this change:
size vmlinux
text data bss dec hex filename
19889268 6632812 2429160 28951240 1b9c2c8 vmlinux
size kernel/sched/fair.o
text data bss dec hex filename
40044 1569 96 41709 a2ed kernel/sched/fair.o
ubuntu@zeku_server:~/workspace/linux-stable $
with this change:
size vmlinux
text data bss dec hex filename
19889268 6632812 2429160 28951240 1b9c2c8 vmlinux
size kernel/sched/fair.o
text data bss dec hex filename
40044 1569 96 41709 a2ed kernel/sched/fair.o
Signed-off-by: Hui Su <suhui@xxxxxxxx>
---
kernel/sched/fair.c | 48 +++++++++++++++++++++++++++++++++++++--------
1 file changed, 40 insertions(+), 8 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 794c2cb945f8..bb8777d98ad9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3068,11 +3068,47 @@ dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
}
+
+static inline void
+enqueue_util_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ cfs_rq->avg.util_avg += se->avg.util_avg;
+ cfs_rq->avg.util_sum += se->avg.util_sum;
+}
+
+static inline void
+dequeue_util_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
+ sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
+}
+
+static inline void
+enqueue_runnable_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ cfs_rq->avg.runnable_avg += se->avg.runnable_avg;
+ cfs_rq->avg.runnable_sum += se->avg.runnable_sum;
+}
+
+static inline void
+dequeue_runnable_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
+ sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
+}
#else
static inline void
enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
static inline void
dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
+static inline void
+enqueue_util_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
+static inline void
+dequeue_util_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
+static inline void
+enqueue_runnable_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
+static inline void
+dequeue_runnable_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
#endif
static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
@@ -3729,10 +3765,8 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
}
enqueue_load_avg(cfs_rq, se);
- cfs_rq->avg.util_avg += se->avg.util_avg;
- cfs_rq->avg.util_sum += se->avg.util_sum;
- cfs_rq->avg.runnable_avg += se->avg.runnable_avg;
- cfs_rq->avg.runnable_sum += se->avg.runnable_sum;
+ enqueue_util_avg(cfs_rq, se);
+ enqueue_runnable_avg(cfs_rq, se);
add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
@@ -3752,10 +3786,8 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
dequeue_load_avg(cfs_rq, se);
- sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
- sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
- sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
- sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
+ dequeue_util_avg(cfs_rq, se);
+ dequeue_runnable_avg(cfs_rq, se);
add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
--
2.25.1