[PATCH] sched: sync with the cfs_rq when changing sched class

From: Byungchul Park
Date: Thu Aug 13 2015 - 01:29:52 EST


currently, a task load is synced with its cfs_rq, only when it
leaves from fair class. we also need to sync it with cfs_rq when
it returns back to fair class, too.

in addition, i created two functions for attaching/detaching a se
load from/to its cfs_rq, and let them use those functions. and i
place that function call to where a se is attached/detached
to/from cfs_rq.

Signed-off-by: Byungchul Park <byungchul.park@xxxxxxx>
---
kernel/sched/fair.c | 78 +++++++++++++++++++++++++--------------------------
1 file changed, 39 insertions(+), 39 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 979ca2c..72d13af 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2709,6 +2709,31 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg)
update_tg_load_avg(cfs_rq, 0);
}

+static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ se->avg.last_update_time = cfs_rq->avg.last_update_time;
+ cfs_rq->avg.load_avg += se->avg.load_avg;
+ cfs_rq->avg.load_sum += se->avg.load_sum;
+ cfs_rq->avg.util_avg += se->avg.util_avg;
+ cfs_rq->avg.util_sum += se->avg.util_sum;
+}
+
+static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
+ &se->avg, se->on_rq * scale_load_down(se->load.weight),
+ cfs_rq->curr == se, NULL);
+
+ cfs_rq->avg.load_avg =
+ max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
+ cfs_rq->avg.load_sum =
+ max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0);
+ cfs_rq->avg.util_avg =
+ max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
+ cfs_rq->avg.util_sum =
+ max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0);
+}
+
/* Add the load generated by se into cfs_rq's load average */
static inline void
enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -2717,27 +2742,20 @@ enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
u64 now = cfs_rq_clock_task(cfs_rq);
int migrated = 0, decayed;

- if (sa->last_update_time == 0) {
- sa->last_update_time = now;
+ if (sa->last_update_time == 0)
migrated = 1;
- }
- else {
+ else
__update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
- se->on_rq * scale_load_down(se->load.weight),
- cfs_rq->curr == se, NULL);
- }
+ se->on_rq * scale_load_down(se->load.weight),
+ cfs_rq->curr == se, NULL);

decayed = update_cfs_rq_load_avg(now, cfs_rq);

cfs_rq->runnable_load_avg += sa->load_avg;
cfs_rq->runnable_load_sum += sa->load_sum;

- if (migrated) {
- cfs_rq->avg.load_avg += sa->load_avg;
- cfs_rq->avg.load_sum += sa->load_sum;
- cfs_rq->avg.util_avg += sa->util_avg;
- cfs_rq->avg.util_sum += sa->util_sum;
- }
+ if (migrated)
+ attach_entity_load_avg(cfs_rq, se);

if (decayed || migrated)
update_tg_load_avg(cfs_rq, 0);
@@ -7911,17 +7929,7 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)

#ifdef CONFIG_SMP
/* Catch up with the cfs_rq and remove our load when we leave */
- __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq), &se->avg,
- se->on_rq * scale_load_down(se->load.weight), cfs_rq->curr == se, NULL);
-
- cfs_rq->avg.load_avg =
- max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
- cfs_rq->avg.load_sum =
- max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0);
- cfs_rq->avg.util_avg =
- max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
- cfs_rq->avg.util_sum =
- max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0);
+ detach_entity_load_avg(cfs_rq, se);
#endif
}

@@ -7938,6 +7946,11 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
*/
se->depth = se->parent ? se->parent->depth + 1 : 0;
#endif
+
+#ifdef CONFIG_SMP
+ /* synchronize task with its cfs_rq */
+ attach_entity_load_avg(cfs_rq_of(&p->se), &p->se);
+#endif
if (!task_on_rq_queued(p))
return;

@@ -8023,16 +8036,7 @@ static void task_move_group_fair(struct task_struct *p, int queued)

#ifdef CONFIG_SMP
/* synchronize task with its prev cfs_rq */
- if (!queued)
- __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
- &se->avg, se->on_rq * scale_load_down(se->load.weight),
- cfs_rq->curr == se, NULL);
-
- /* remove our load when we leave */
- cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
- cfs_rq->avg.load_sum = max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0);
- cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
- cfs_rq->avg.util_sum = max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0);
+ detach_entity_load_avg(cfs_rq, se);
#endif
set_task_rq(p, task_cpu(p));
se->depth = se->parent ? se->parent->depth + 1 : 0;
@@ -8042,11 +8046,7 @@ static void task_move_group_fair(struct task_struct *p, int queued)

#ifdef CONFIG_SMP
/* Virtually synchronize task with its new cfs_rq */
- p->se.avg.last_update_time = cfs_rq->avg.last_update_time;
- cfs_rq->avg.load_avg += p->se.avg.load_avg;
- cfs_rq->avg.load_sum += p->se.avg.load_sum;
- cfs_rq->avg.util_avg += p->se.avg.util_avg;
- cfs_rq->avg.util_sum += p->se.avg.util_sum;
+ attach_entity_load_avg(cfs_rq, se);
#endif
}

--
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/