Re: [PATCH v3 4/5] sched/fair: Move load and util avgs from wake_up_new_task() to sched_fork()

From: Vincent Guittot
Date: Wed Jun 01 2016 - 08:24:55 EST


On 1 June 2016 at 05:41, Yuyang Du <yuyang.du@xxxxxxxxx> wrote:
> Move new task initialization to sched_fork(). For initial non-fair class
> task, the first switched_to_fair() will do the attach correctly.

Not sure to catch the explanation. you have only moved and renamed
init_entity_runnable_average but you speak about initial non-fair
class
>
> Suggested-by: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
> Signed-off-by: Yuyang Du <yuyang.du@xxxxxxxxx>
> ---
> kernel/sched/core.c | 5 +++--
> kernel/sched/fair.c | 14 +++++---------
> kernel/sched/sched.h | 2 +-
> 3 files changed, 9 insertions(+), 12 deletions(-)
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index ae5b8a8..77a8a2b 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -2370,6 +2370,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
> if (p->sched_class->task_fork)
> p->sched_class->task_fork(p);
>
> + /* Initialize new task's sched averages */
> + init_entity_sched_avg(&p->se);
> +
> /*
> * The child is not yet in the pid-hash so no cgroup attach races,
> * and the cgroup is pinned to this child due to cgroup_fork()
> @@ -2510,8 +2513,6 @@ void wake_up_new_task(struct task_struct *p)
> struct rq_flags rf;
> struct rq *rq;
>
> - /* Initialize new task's runnable average */
> - init_entity_runnable_average(&p->se);
> raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
> #ifdef CONFIG_SMP
> /*
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 0b4914d..eb9041c 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -668,8 +668,8 @@ static unsigned long task_h_load(struct task_struct *p);
> #define LOAD_AVG_MAX 47742 /* maximum possible load avg */
> #define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_AVG_MAX */
>
> -/* Give new sched_entity start runnable values to heavy its load in infant time */
> -void init_entity_runnable_average(struct sched_entity *se)
> +/* Give new sched_entity start load values to heavy its load in infant time */
> +void init_entity_sched_avg(struct sched_entity *se)
> {
> struct sched_avg *sa = &se->avg;
>
> @@ -738,12 +738,8 @@ void post_init_entity_util_avg(struct sched_entity *se)
> static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
> static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
> #else
> -void init_entity_runnable_average(struct sched_entity *se)
> -{
> -}
> -void post_init_entity_util_avg(struct sched_entity *se)
> -{
> -}
> +void init_entity_sched_avg(struct sched_entity *se) { }
> +void post_init_entity_util_avg(struct sched_entity *se) { }
> #endif
>
> /*
> @@ -8520,7 +8516,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
>
> init_cfs_rq(cfs_rq);
> init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
> - init_entity_runnable_average(se);
> + init_entity_sched_avg(se);
> post_init_entity_util_avg(se);
> }
>
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index c139ec4..bc9c99e 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -1321,7 +1321,7 @@ extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
>
> unsigned long to_ratio(u64 period, u64 runtime);
>
> -extern void init_entity_runnable_average(struct sched_entity *se);
> +extern void init_entity_sched_avg(struct sched_entity *se);
> extern void post_init_entity_util_avg(struct sched_entity *se);
>
> #ifdef CONFIG_NO_HZ_FULL
> --
> 1.7.9.5
>