[RFC][PATCH 1/3] sched: rename sched_entity to sched_cfs_entity.

From: Dario Faggioli
Date: Tue Jan 04 2011 - 11:01:15 EST


Using the name `struct sched_entity' for the scheduling entity
of the fair scheduling class is not fair to the other scheduling
classes, which have to use things like `struct sched_rt_entity'.

Moreover, changing that would make it possible to put all the
`struct sched_*_entity' of all the scheduling classes in a more
general structure (which will also contains all the common fields).
This would make the code easier to understand and, if we manage in
using an union for the class-specific fields, it would also save
some memory.

Therefore, this commit turns `sched_entity' into `sched_cfs_entity',
wherever it is being used.

Signed-off-by: Dario Faggioli <raistlin@xxxxxxxx>
---
fs/proc/base.c | 2 +-
include/linux/init_task.h | 4 +-
include/linux/sched.h | 6 +-
kernel/delayacct.c | 2 +-
kernel/exit.c | 2 +-
kernel/posix-cpu-timers.c | 10 +-
kernel/sched.c | 152 ++++++------
kernel/sched_debug.c | 117 +++++-----
kernel/sched_fair.c | 611 +++++++++++++++++++++++----------------------
kernel/sched_rt.c | 26 +-
kernel/sched_stoptask.c | 2 +-
11 files changed, 479 insertions(+), 455 deletions(-)

diff --git a/fs/proc/base.c b/fs/proc/base.c
index 08cba2c..9591d6e 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -356,7 +356,7 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
static int proc_pid_schedstat(struct task_struct *task, char *buffer)
{
return sprintf(buffer, "%llu %llu %lu\n",
- (unsigned long long)task->se.sum_exec_runtime,
+ (unsigned long long)task->cfs.sum_exec_runtime,
(unsigned long long)task->sched_info.run_delay,
task->sched_info.pcount);
}
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index caa151f..8baee0b 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -142,8 +142,8 @@ extern struct cred init_cred;
.cpus_allowed = CPU_MASK_ALL, \
.mm = NULL, \
.active_mm = &init_mm, \
- .se = { \
- .group_node = LIST_HEAD_INIT(tsk.se.group_node), \
+ .cfs = { \
+ .group_node = LIST_HEAD_INIT(tsk.cfs.group_node), \
}, \
.rt = { \
.run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 777cd01..55adf37 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1137,7 +1137,7 @@ struct sched_statistics {
};
#endif

-struct sched_entity {
+struct sched_cfs_entity {
struct load_weight load; /* for load-balancing */
struct rb_node run_node;
struct list_head group_node;
@@ -1155,7 +1155,7 @@ struct sched_entity {
#endif

#ifdef CONFIG_FAIR_GROUP_SCHED
- struct sched_entity *parent;
+ struct sched_cfs_entity *parent;
/* rq on which this entity is (to be) queued: */
struct cfs_rq *cfs_rq;
/* rq "owned" by this entity/group: */
@@ -1206,7 +1206,7 @@ struct task_struct {
int prio, static_prio, normal_prio;
unsigned int rt_priority;
const struct sched_class *sched_class;
- struct sched_entity se;
+ struct sched_cfs_entity cfs;
struct sched_rt_entity rt;

#ifdef CONFIG_PREEMPT_NOTIFIERS
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index ead9b61..f314f56 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -128,7 +128,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
*/
t1 = tsk->sched_info.pcount;
t2 = tsk->sched_info.run_delay;
- t3 = tsk->se.sum_exec_runtime;
+ t3 = tsk->cfs.sum_exec_runtime;

d->cpu_count += t1;

diff --git a/kernel/exit.c b/kernel/exit.c
index 676149a..01087fc 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -132,7 +132,7 @@ static void __exit_signal(struct task_struct *tsk)
sig->inblock += task_io_get_inblock(tsk);
sig->oublock += task_io_get_oublock(tsk);
task_io_accounting_add(&sig->ioac, &tsk->ioac);
- sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
+ sig->sum_sched_runtime += tsk->cfs.sum_exec_runtime;
}

sig->nr_threads--;
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 05bb717..801169c 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -248,7 +248,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
do {
times->utime = cputime_add(times->utime, t->utime);
times->stime = cputime_add(times->stime, t->stime);
- times->sum_exec_runtime += t->se.sum_exec_runtime;
+ times->sum_exec_runtime += t->cfs.sum_exec_runtime;
} while_each_thread(tsk, t);
out:
rcu_read_unlock();
@@ -508,7 +508,7 @@ static void cleanup_timers(struct list_head *head,
void posix_cpu_timers_exit(struct task_struct *tsk)
{
cleanup_timers(tsk->cpu_timers,
- tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
+ tsk->utime, tsk->stime, tsk->cfs.sum_exec_runtime);

}
void posix_cpu_timers_exit_group(struct task_struct *tsk)
@@ -518,7 +518,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk)
cleanup_timers(tsk->signal->cpu_timers,
cputime_add(tsk->utime, sig->utime),
cputime_add(tsk->stime, sig->stime),
- tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
+ tsk->cfs.sum_exec_runtime + sig->sum_sched_runtime);
}

static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
@@ -949,7 +949,7 @@ static void check_thread_timers(struct task_struct *tsk,
struct cpu_timer_list *t = list_first_entry(timers,
struct cpu_timer_list,
entry);
- if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
+ if (!--maxfire || tsk->cfs.sum_exec_runtime < t->expires.sched) {
tsk->cputime_expires.sched_exp = t->expires.sched;
break;
}
@@ -1276,7 +1276,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
struct task_cputime task_sample = {
.utime = tsk->utime,
.stime = tsk->stime,
- .sum_exec_runtime = tsk->se.sum_exec_runtime
+ .sum_exec_runtime = tsk->cfs.sum_exec_runtime
};

if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
diff --git a/kernel/sched.c b/kernel/sched.c
index 5ec0615..5d68fb0 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -251,7 +251,7 @@ struct task_group {

#ifdef CONFIG_FAIR_GROUP_SCHED
/* schedulable entities of this group on each cpu */
- struct sched_entity **se;
+ struct sched_cfs_entity **cfs_se;
/* runqueue "owned" by this group on each cpu */
struct cfs_rq **cfs_rq;
unsigned long shares;
@@ -326,7 +326,7 @@ struct cfs_rq {
* 'curr' points to currently running entity on this cfs_rq.
* It is set to NULL otherwise (i.e when none are currently running).
*/
- struct sched_entity *curr, *next, *last;
+ struct sched_cfs_entity *curr, *next, *last;

unsigned int nr_spread_over;

@@ -622,8 +622,8 @@ static inline struct task_group *task_group(struct task_struct *p)
static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
{
#ifdef CONFIG_FAIR_GROUP_SCHED
- p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
- p->se.parent = task_group(p)->se[cpu];
+ p->cfs.cfs_rq = task_group(p)->cfs_rq[cpu];
+ p->cfs.parent = task_group(p)->cfs_se[cpu];
#endif

#ifdef CONFIG_RT_GROUP_SCHED
@@ -1555,7 +1555,7 @@ static int tg_load_down(struct task_group *tg, void *data)
load = cpu_rq(cpu)->load.weight;
} else {
load = tg->parent->cfs_rq[cpu]->h_load;
- load *= tg->se[cpu]->load.weight;
+ load *= tg->cfs_se[cpu]->load.weight;
load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
}

@@ -1733,13 +1733,13 @@ static void set_load_weight(struct task_struct *p)
* SCHED_IDLE tasks get minimal weight:
*/
if (p->policy == SCHED_IDLE) {
- p->se.load.weight = WEIGHT_IDLEPRIO;
- p->se.load.inv_weight = WMULT_IDLEPRIO;
+ p->cfs.load.weight = WEIGHT_IDLEPRIO;
+ p->cfs.load.inv_weight = WMULT_IDLEPRIO;
return;
}

- p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
- p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
+ p->cfs.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
+ p->cfs.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
}

static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
@@ -1747,7 +1747,7 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
update_rq_clock(rq);
sched_info_queued(p);
p->sched_class->enqueue_task(rq, p, flags);
- p->se.on_rq = 1;
+ p->cfs.on_rq = 1;
}

static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
@@ -1755,7 +1755,7 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
update_rq_clock(rq);
sched_info_dequeued(p);
p->sched_class->dequeue_task(rq, p, flags);
- p->se.on_rq = 0;
+ p->cfs.on_rq = 0;
}

/*
@@ -2058,7 +2058,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
* A queue event has occurred, and we're going to schedule. In
* this case, we can save a useless back to back clock update.
*/
- if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr))
+ if (rq->curr->cfs.on_rq && test_tsk_need_resched(rq->curr))
rq->skip_clock_update = 1;
}

@@ -2081,8 +2081,8 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
* Buddy candidates are cache hot:
*/
if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
- (&p->se == cfs_rq_of(&p->se)->next ||
- &p->se == cfs_rq_of(&p->se)->last))
+ (&p->cfs == cfs_rq_of(&p->cfs)->next ||
+ &p->cfs == cfs_rq_of(&p->cfs)->last))
return 1;

if (sysctl_sched_migration_cost == -1)
@@ -2090,7 +2090,7 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
if (sysctl_sched_migration_cost == 0)
return 0;

- delta = now - p->se.exec_start;
+ delta = now - p->cfs.exec_start;

return delta < (s64)sysctl_sched_migration_cost;
}
@@ -2109,7 +2109,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
trace_sched_migrate_task(p, new_cpu);

if (task_cpu(p) != new_cpu) {
- p->se.nr_migrations++;
+ p->cfs.nr_migrations++;
perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
}

@@ -2133,7 +2133,7 @@ static bool migrate_task(struct task_struct *p, struct rq *rq)
* If the task is not on a runqueue (and not running), then
* the next wake-up will properly place the task.
*/
- return p->se.on_rq || task_running(rq, p);
+ return p->cfs.on_rq || task_running(rq, p);
}

/*
@@ -2193,7 +2193,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
rq = task_rq_lock(p, &flags);
trace_sched_wait_task(p);
running = task_running(rq, p);
- on_rq = p->se.on_rq;
+ on_rq = p->cfs.on_rq;
ncsw = 0;
if (!match_state || p->state == match_state)
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
@@ -2358,15 +2358,15 @@ static inline void ttwu_activate(struct task_struct *p, struct rq *rq,
bool is_sync, bool is_migrate, bool is_local,
unsigned long en_flags)
{
- schedstat_inc(p, se.statistics.nr_wakeups);
+ schedstat_inc(p, cfs.statistics.nr_wakeups);
if (is_sync)
- schedstat_inc(p, se.statistics.nr_wakeups_sync);
+ schedstat_inc(p, cfs.statistics.nr_wakeups_sync);
if (is_migrate)
- schedstat_inc(p, se.statistics.nr_wakeups_migrate);
+ schedstat_inc(p, cfs.statistics.nr_wakeups_migrate);
if (is_local)
- schedstat_inc(p, se.statistics.nr_wakeups_local);
+ schedstat_inc(p, cfs.statistics.nr_wakeups_local);
else
- schedstat_inc(p, se.statistics.nr_wakeups_remote);
+ schedstat_inc(p, cfs.statistics.nr_wakeups_remote);

activate_task(rq, p, en_flags);
}
@@ -2428,7 +2428,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
if (!(p->state & state))
goto out;

- if (p->se.on_rq)
+ if (p->cfs.on_rq)
goto out_running;

cpu = task_cpu(p);
@@ -2523,7 +2523,7 @@ static void try_to_wake_up_local(struct task_struct *p)
if (!(p->state & TASK_NORMAL))
return;

- if (!p->se.on_rq) {
+ if (!p->cfs.on_rq) {
if (likely(!task_running(rq, p))) {
schedstat_inc(rq, ttwu_count);
schedstat_inc(rq, ttwu_local);
@@ -2564,18 +2564,18 @@ int wake_up_state(struct task_struct *p, unsigned int state)
*/
static void __sched_fork(struct task_struct *p)
{
- p->se.exec_start = 0;
- p->se.sum_exec_runtime = 0;
- p->se.prev_sum_exec_runtime = 0;
- p->se.nr_migrations = 0;
+ p->cfs.exec_start = 0;
+ p->cfs.sum_exec_runtime = 0;
+ p->cfs.prev_sum_exec_runtime = 0;
+ p->cfs.nr_migrations = 0;

#ifdef CONFIG_SCHEDSTATS
- memset(&p->se.statistics, 0, sizeof(p->se.statistics));
+ memset(&p->cfs.statistics, 0, sizeof(p->cfs.statistics));
#endif

INIT_LIST_HEAD(&p->rt.run_list);
- p->se.on_rq = 0;
- INIT_LIST_HEAD(&p->se.group_node);
+ p->cfs.on_rq = 0;
+ INIT_LIST_HEAD(&p->cfs.group_node);

#ifdef CONFIG_PREEMPT_NOTIFIERS
INIT_HLIST_HEAD(&p->preempt_notifiers);
@@ -3447,7 +3447,7 @@ static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)

if (task_current(rq, p)) {
update_rq_clock(rq);
- ns = rq->clock_task - p->se.exec_start;
+ ns = rq->clock_task - p->cfs.exec_start;
if ((s64)ns < 0)
ns = 0;
}
@@ -3480,7 +3480,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
u64 ns = 0;

rq = task_rq_lock(p, &flags);
- ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
+ ns = p->cfs.sum_exec_runtime + do_task_delta_exec(p, rq);
task_rq_unlock(rq, &flags);

return ns;
@@ -3709,7 +3709,7 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
/*
* Use CFS's precise accounting:
*/
- rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
+ rtime = nsecs_to_cputime(p->cfs.sum_exec_runtime);

if (total) {
u64 temp = rtime;
@@ -3897,7 +3897,7 @@ static inline void schedule_debug(struct task_struct *prev)

static void put_prev_task(struct rq *rq, struct task_struct *prev)
{
- if (prev->se.on_rq)
+ if (prev->cfs.on_rq)
update_rq_clock(rq);
prev->sched_class->put_prev_task(rq, prev);
}
@@ -4556,7 +4556,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
trace_sched_pi_setprio(p, prio);
oldprio = p->prio;
prev_class = p->sched_class;
- on_rq = p->se.on_rq;
+ on_rq = p->cfs.on_rq;
running = task_current(rq, p);
if (on_rq)
dequeue_task(rq, p, 0);
@@ -4605,7 +4605,7 @@ void set_user_nice(struct task_struct *p, long nice)
p->static_prio = NICE_TO_PRIO(nice);
goto out_unlock;
}
- on_rq = p->se.on_rq;
+ on_rq = p->cfs.on_rq;
if (on_rq)
dequeue_task(rq, p, 0);

@@ -4739,7 +4739,7 @@ static struct task_struct *find_process_by_pid(pid_t pid)
static void
__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
{
- BUG_ON(p->se.on_rq);
+ BUG_ON(p->cfs.on_rq);

p->policy = policy;
p->rt_priority = prio;
@@ -4888,7 +4888,7 @@ recheck:
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
goto recheck;
}
- on_rq = p->se.on_rq;
+ on_rq = p->cfs.on_rq;
running = task_current(rq, p);
if (on_rq)
deactivate_task(rq, p, 0);
@@ -5539,7 +5539,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)

__sched_fork(idle);
idle->state = TASK_RUNNING;
- idle->se.exec_start = sched_clock();
+ idle->cfs.exec_start = sched_clock();

cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
/*
@@ -5747,7 +5747,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
* If we're not on a rq, the next wake-up will ensure we're
* placed properly.
*/
- if (p->se.on_rq) {
+ if (p->cfs.on_rq) {
deactivate_task(rq_src, p, 0);
set_task_cpu(p, dest_cpu);
activate_task(rq_dest, p, 0);
@@ -7839,27 +7839,27 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)

#ifdef CONFIG_FAIR_GROUP_SCHED
static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
- struct sched_entity *se, int cpu,
- struct sched_entity *parent)
+ struct sched_cfs_entity *cfs_se, int cpu,
+ struct sched_cfs_entity *parent)
{
struct rq *rq = cpu_rq(cpu);
tg->cfs_rq[cpu] = cfs_rq;
init_cfs_rq(cfs_rq, rq);
cfs_rq->tg = tg;

- tg->se[cpu] = se;
- /* se could be NULL for init_task_group */
- if (!se)
+ tg->cfs_se[cpu] = cfs_se;
+ /* cfs_se could be NULL for init_task_group */
+ if (!cfs_se)
return;

if (!parent)
- se->cfs_rq = &rq->cfs;
+ cfs_se->cfs_rq = &rq->cfs;
else
- se->cfs_rq = parent->my_q;
+ cfs_se->cfs_rq = parent->my_q;

- se->my_q = cfs_rq;
- update_load_set(&se->load, 0);
- se->parent = parent;
+ cfs_se->my_q = cfs_rq;
+ update_load_set(&cfs_se->load, 0);
+ cfs_se->parent = parent;
}
#endif

@@ -7908,7 +7908,7 @@ void __init sched_init(void)
ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);

#ifdef CONFIG_FAIR_GROUP_SCHED
- init_task_group.se = (struct sched_entity **)ptr;
+ init_task_group.cfs_se = (struct sched_cfs_entity **)ptr;
ptr += nr_cpu_ids * sizeof(void **);

init_task_group.cfs_rq = (struct cfs_rq **)ptr;
@@ -7980,7 +7980,7 @@ void __init sched_init(void)
* A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
*
* We achieve this by letting init_task_group's tasks sit
- * directly in rq->cfs (i.e init_task_group->se[] = NULL).
+ * directly in rq->cfs (i.e init_task_group->cfs_se[] = NULL).
*/
init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, NULL);
#endif
@@ -8116,7 +8116,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
{
int on_rq;

- on_rq = p->se.on_rq;
+ on_rq = p->cfs.on_rq;
if (on_rq)
deactivate_task(rq, p, 0);
__setscheduler(rq, p, SCHED_NORMAL, 0);
@@ -8140,11 +8140,11 @@ void normalize_rt_tasks(void)
if (!p->mm)
continue;

- p->se.exec_start = 0;
+ p->cfs.exec_start = 0;
#ifdef CONFIG_SCHEDSTATS
- p->se.statistics.wait_start = 0;
- p->se.statistics.sleep_start = 0;
- p->se.statistics.block_start = 0;
+ p->cfs.statistics.wait_start = 0;
+ p->cfs.statistics.sleep_start = 0;
+ p->cfs.statistics.block_start = 0;
#endif

if (!rt_task(p)) {
@@ -8226,27 +8226,27 @@ static void free_fair_sched_group(struct task_group *tg)
for_each_possible_cpu(i) {
if (tg->cfs_rq)
kfree(tg->cfs_rq[i]);
- if (tg->se)
- kfree(tg->se[i]);
+ if (tg->cfs_se)
+ kfree(tg->cfs_se[i]);
}

kfree(tg->cfs_rq);
- kfree(tg->se);
+ kfree(tg->cfs_se);
}

static
int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
{
struct cfs_rq *cfs_rq;
- struct sched_entity *se;
+ struct sched_cfs_entity *cfs_se;
struct rq *rq;
int i;

tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
if (!tg->cfs_rq)
goto err;
- tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
- if (!tg->se)
+ tg->cfs_se = kzalloc(sizeof(cfs_se) * nr_cpu_ids, GFP_KERNEL);
+ if (!tg->cfs_se)
goto err;

tg->shares = NICE_0_LOAD;
@@ -8259,12 +8259,12 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
if (!cfs_rq)
goto err;

- se = kzalloc_node(sizeof(struct sched_entity),
- GFP_KERNEL, cpu_to_node(i));
- if (!se)
+ cfs_se = kzalloc_node(sizeof(struct sched_cfs_entity),
+ GFP_KERNEL, cpu_to_node(i));
+ if (!cfs_se)
goto err_free_rq;

- init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
+ init_tg_cfs_entry(tg, cfs_rq, cfs_se, i, parent->cfs_se[i]);
}

return 1;
@@ -8447,7 +8447,7 @@ void sched_destroy_group(struct task_group *tg)

/* change task's runqueue when it moves between groups.
* The caller of this function should have put the task in its new group
- * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
+ * by now. This function just updates tsk->cfs.cfs_rq and tsk->cfs.parent to
* reflect its new group.
*/
void sched_move_task(struct task_struct *tsk)
@@ -8459,7 +8459,7 @@ void sched_move_task(struct task_struct *tsk)
rq = task_rq_lock(tsk, &flags);

running = task_current(rq, tsk);
- on_rq = tsk->se.on_rq;
+ on_rq = tsk->cfs.on_rq;

if (on_rq)
dequeue_task(rq, tsk, 0);
@@ -8493,7 +8493,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
/*
* We can't change the weight of the root cgroup.
*/
- if (!tg->se[0])
+ if (!tg->cfs_se[0])
return -EINVAL;

if (shares < MIN_SHARES)
@@ -8508,13 +8508,13 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
tg->shares = shares;
for_each_possible_cpu(i) {
struct rq *rq = cpu_rq(i);
- struct sched_entity *se;
+ struct sched_cfs_entity *cfs_se;

- se = tg->se[i];
+ cfs_se = tg->cfs_se[i];
/* Propagate contribution to hierarchy */
raw_spin_lock_irqsave(&rq->lock, flags);
- for_each_sched_entity(se)
- update_cfs_shares(group_cfs_rq(se), 0);
+ for_each_sched_entity(cfs_se)
+ update_cfs_shares(group_cfs_rq(cfs_se), 0);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}

diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 1dfae3d..16d0b10 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -56,8 +56,9 @@ static unsigned long nsec_low(unsigned long long nsec)
#ifdef CONFIG_FAIR_GROUP_SCHED
static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
{
- struct sched_entity *se = tg->se[cpu];
- if (!se)
+ struct sched_cfs_entity *cfs_se = tg->cfs_se[cpu];
+
+ if (!cfs_se)
return;

#define P(F) \
@@ -65,22 +66,22 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
#define PN(F) \
SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))

- PN(se->exec_start);
- PN(se->vruntime);
- PN(se->sum_exec_runtime);
+ PN(cfs_se->exec_start);
+ PN(cfs_se->vruntime);
+ PN(cfs_se->sum_exec_runtime);
#ifdef CONFIG_SCHEDSTATS
- PN(se->statistics.wait_start);
- PN(se->statistics.sleep_start);
- PN(se->statistics.block_start);
- PN(se->statistics.sleep_max);
- PN(se->statistics.block_max);
- PN(se->statistics.exec_max);
- PN(se->statistics.slice_max);
- PN(se->statistics.wait_max);
- PN(se->statistics.wait_sum);
- P(se->statistics.wait_count);
+ PN(cfs_se->statistics.wait_start);
+ PN(cfs_se->statistics.sleep_start);
+ PN(cfs_se->statistics.block_start);
+ PN(cfs_se->statistics.sleep_max);
+ PN(cfs_se->statistics.block_max);
+ PN(cfs_se->statistics.exec_max);
+ PN(cfs_se->statistics.slice_max);
+ PN(cfs_se->statistics.wait_max);
+ PN(cfs_se->statistics.wait_sum);
+ P(cfs_se->statistics.wait_count);
#endif
- P(se->load.weight);
+ P(cfs_se->load.weight);
#undef PN
#undef P
}
@@ -96,14 +97,14 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)

SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
p->comm, p->pid,
- SPLIT_NS(p->se.vruntime),
+ SPLIT_NS(p->cfs.vruntime),
(long long)(p->nvcsw + p->nivcsw),
p->prio);
#ifdef CONFIG_SCHEDSTATS
SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
- SPLIT_NS(p->se.vruntime),
- SPLIT_NS(p->se.sum_exec_runtime),
- SPLIT_NS(p->se.statistics.sum_sleep_runtime));
+ SPLIT_NS(p->cfs.vruntime),
+ SPLIT_NS(p->cfs.sum_exec_runtime),
+ SPLIT_NS(p->cfs.statistics.sum_sleep_runtime));
#else
SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
@@ -127,7 +128,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
read_lock_irqsave(&tasklist_lock, flags);

do_each_thread(g, p) {
- if (!p->se.on_rq || task_cpu(p) != rq_cpu)
+ if (!p->cfs.on_rq || task_cpu(p) != rq_cpu)
continue;

print_task(m, rq, p);
@@ -141,7 +142,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
spread, rq0_min_vruntime, spread0;
struct rq *rq = cpu_rq(cpu);
- struct sched_entity *last;
+ struct sched_cfs_entity *last;
unsigned long flags;

SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
@@ -382,55 +383,55 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
#define PN(F) \
SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))

- PN(se.exec_start);
- PN(se.vruntime);
- PN(se.sum_exec_runtime);
+ PN(cfs.exec_start);
+ PN(cfs.vruntime);
+ PN(cfs.sum_exec_runtime);

nr_switches = p->nvcsw + p->nivcsw;

#ifdef CONFIG_SCHEDSTATS
- PN(se.statistics.wait_start);
- PN(se.statistics.sleep_start);
- PN(se.statistics.block_start);
- PN(se.statistics.sleep_max);
- PN(se.statistics.block_max);
- PN(se.statistics.exec_max);
- PN(se.statistics.slice_max);
- PN(se.statistics.wait_max);
- PN(se.statistics.wait_sum);
- P(se.statistics.wait_count);
- PN(se.statistics.iowait_sum);
- P(se.statistics.iowait_count);
+ PN(cfs.statistics.wait_start);
+ PN(cfs.statistics.sleep_start);
+ PN(cfs.statistics.block_start);
+ PN(cfs.statistics.sleep_max);
+ PN(cfs.statistics.block_max);
+ PN(cfs.statistics.exec_max);
+ PN(cfs.statistics.slice_max);
+ PN(cfs.statistics.wait_max);
+ PN(cfs.statistics.wait_sum);
+ P(cfs.statistics.wait_count);
+ PN(cfs.statistics.iowait_sum);
+ P(cfs.statistics.iowait_count);
P(sched_info.bkl_count);
- P(se.nr_migrations);
- P(se.statistics.nr_migrations_cold);
- P(se.statistics.nr_failed_migrations_affine);
- P(se.statistics.nr_failed_migrations_running);
- P(se.statistics.nr_failed_migrations_hot);
- P(se.statistics.nr_forced_migrations);
- P(se.statistics.nr_wakeups);
- P(se.statistics.nr_wakeups_sync);
- P(se.statistics.nr_wakeups_migrate);
- P(se.statistics.nr_wakeups_local);
- P(se.statistics.nr_wakeups_remote);
- P(se.statistics.nr_wakeups_affine);
- P(se.statistics.nr_wakeups_affine_attempts);
- P(se.statistics.nr_wakeups_passive);
- P(se.statistics.nr_wakeups_idle);
+ P(cfs.nr_migrations);
+ P(cfs.statistics.nr_migrations_cold);
+ P(cfs.statistics.nr_failed_migrations_affine);
+ P(cfs.statistics.nr_failed_migrations_running);
+ P(cfs.statistics.nr_failed_migrations_hot);
+ P(cfs.statistics.nr_forced_migrations);
+ P(cfs.statistics.nr_wakeups);
+ P(cfs.statistics.nr_wakeups_sync);
+ P(cfs.statistics.nr_wakeups_migrate);
+ P(cfs.statistics.nr_wakeups_local);
+ P(cfs.statistics.nr_wakeups_remote);
+ P(cfs.statistics.nr_wakeups_affine);
+ P(cfs.statistics.nr_wakeups_affine_attempts);
+ P(cfs.statistics.nr_wakeups_passive);
+ P(cfs.statistics.nr_wakeups_idle);

{
u64 avg_atom, avg_per_cpu;

- avg_atom = p->se.sum_exec_runtime;
+ avg_atom = p->cfs.sum_exec_runtime;
if (nr_switches)
do_div(avg_atom, nr_switches);
else
avg_atom = -1LL;

- avg_per_cpu = p->se.sum_exec_runtime;
- if (p->se.nr_migrations) {
+ avg_per_cpu = p->cfs.sum_exec_runtime;
+ if (p->cfs.nr_migrations) {
avg_per_cpu = div64_u64(avg_per_cpu,
- p->se.nr_migrations);
+ p->cfs.nr_migrations);
} else {
avg_per_cpu = -1LL;
}
@@ -445,7 +446,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
SEQ_printf(m, "%-35s:%21Ld\n",
"nr_involuntary_switches", (long long)p->nivcsw);

- P(se.load.weight);
+ P(cfs.load.weight);
P(policy);
P(prio);
#undef PN
@@ -467,6 +468,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
void proc_sched_set_task(struct task_struct *p)
{
#ifdef CONFIG_SCHEDSTATS
- memset(&p->se.statistics, 0, sizeof(p->se.statistics));
+ memset(&p->cfs.statistics, 0, sizeof(p->cfs.statistics));
#endif
}
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c62ebae..1ae7a17 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -111,33 +111,33 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
}

/* An entity is a task if it doesn't "own" a runqueue */
-#define entity_is_task(se) (!se->my_q)
+#define cfs_entity_is_task(cfs_se) (!cfs_se->my_q)

-static inline struct task_struct *task_of(struct sched_entity *se)
+static inline struct task_struct *cfs_task_of(struct sched_cfs_entity *cfs_se)
{
#ifdef CONFIG_SCHED_DEBUG
- WARN_ON_ONCE(!entity_is_task(se));
+ WARN_ON_ONCE(!cfs_entity_is_task(cfs_se));
#endif
- return container_of(se, struct task_struct, se);
+ return container_of(cfs_se, struct task_struct, cfs);
}

/* Walk up scheduling entities hierarchy */
-#define for_each_sched_entity(se) \
- for (; se; se = se->parent)
+#define for_each_sched_cfs_entity(cfs_se) \
+ for (; cfs_se; cfs_se = cfs_se->parent)

static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
{
- return p->se.cfs_rq;
+ return p->cfs.cfs_rq;
}

/* runqueue on which this entity is (to be) queued */
-static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
+static inline struct cfs_rq *cfs_rq_of(struct sched_cfs_entity *cfs_se)
{
- return se->cfs_rq;
+ return cfs_se->cfs_rq;
}

/* runqueue "owned" by this group */
-static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
+static inline struct cfs_rq *group_cfs_rq(struct sched_cfs_entity *grp)
{
return grp->my_q;
}
@@ -186,32 +186,35 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)

/* Do the two (enqueued) entities belong to the same group ? */
static inline int
-is_same_group(struct sched_entity *se, struct sched_entity *pse)
+is_same_group(struct sched_cfs_entity *cfs_se,
+ struct sched_cfs_entity *cfs_pse)
{
- if (se->cfs_rq == pse->cfs_rq)
+ if (cfs_se->cfs_rq == cfs_pse->cfs_rq)
return 1;

return 0;
}

-static inline struct sched_entity *parent_entity(struct sched_entity *se)
+static inline
+struct sched_cfs_entity *parent_entity(struct sched_cfs_entity *cfs_se)
{
- return se->parent;
+ return cfs_se->parent;
}

/* return depth at which a sched entity is present in the hierarchy */
-static inline int depth_se(struct sched_entity *se)
+static inline int depth_se(struct sched_cfs_entity *cfs_se)
{
int depth = 0;

- for_each_sched_entity(se)
+ for_each_sched_cfs_entity(cfs_se)
depth++;

return depth;
}

static void
-find_matching_se(struct sched_entity **se, struct sched_entity **pse)
+find_matching_se(struct sched_cfs_entity **cfs_se,
+ struct sched_cfs_entity **cfs_pse)
{
int se_depth, pse_depth;

@@ -223,30 +226,30 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
*/

/* First walk up until both entities are at same depth */
- se_depth = depth_se(*se);
- pse_depth = depth_se(*pse);
+ se_depth = depth_se(*cfs_se);
+ pse_depth = depth_se(*cfs_pse);

while (se_depth > pse_depth) {
se_depth--;
- *se = parent_entity(*se);
+ *cfs_se = parent_entity(*cfs_se);
}

while (pse_depth > se_depth) {
pse_depth--;
- *pse = parent_entity(*pse);
+ *cfs_pse = parent_entity(*cfs_pse);
}

- while (!is_same_group(*se, *pse)) {
- *se = parent_entity(*se);
- *pse = parent_entity(*pse);
+ while (!is_same_group(*cfs_se, *cfs_pse)) {
+ *cfs_se = parent_entity(*cfs_se);
+ *cfs_pse = parent_entity(*cfs_pse);
}
}

#else /* !CONFIG_FAIR_GROUP_SCHED */

-static inline struct task_struct *task_of(struct sched_entity *se)
+static inline struct task_struct *cfs_task_of(struct sched_cfs_entity *cfs_se)
{
- return container_of(se, struct task_struct, se);
+ return container_of(cfs_se, struct task_struct, cfs);
}

static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
@@ -254,26 +257,26 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
return container_of(cfs_rq, struct rq, cfs);
}

-#define entity_is_task(se) 1
+#define cfs_entity_is_task(cfs_se) 1

-#define for_each_sched_entity(se) \
- for (; se; se = NULL)
+#define for_each_sched_cfs_entity(cfs_se) \
+ for (; cfs_se; cfs_se = NULL)

static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
{
return &task_rq(p)->cfs;
}

-static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
+static inline struct cfs_rq *cfs_rq_of(struct sched_cfs_entity *cfs_se)
{
- struct task_struct *p = task_of(se);
+ struct task_struct *p = cfs_task_of(cfs_se);
struct rq *rq = task_rq(p);

return &rq->cfs;
}

/* runqueue "owned" by this group */
-static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
+static inline struct cfs_rq *group_cfs_rq(struct sched_cfs_entity *grp)
{
return NULL;
}
@@ -294,19 +297,20 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)

-static inline int
-is_same_group(struct sched_entity *se, struct sched_entity *pse)
+static inline int is_same_group(struct sched_cfs_entity *cfs_se,
+ struct sched_cfs_entity *cfs_pse)
{
return 1;
}

-static inline struct sched_entity *parent_entity(struct sched_entity *se)
+static inline
+struct sched_cfs_entity *parent_entity(struct sched_cfs_entity *cfs_se)
{
return NULL;
}

-static inline void
-find_matching_se(struct sched_entity **se, struct sched_entity **pse)
+static inline void find_matching_se(struct sched_cfs_entity **cfs_se,
+ struct sched_cfs_entity **cfs_pse)
{
}

@@ -335,15 +339,16 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
return min_vruntime;
}

-static inline int entity_before(struct sched_entity *a,
- struct sched_entity *b)
+static inline int entity_before(struct sched_cfs_entity *a,
+ struct sched_cfs_entity *b)
{
return (s64)(a->vruntime - b->vruntime) < 0;
}

-static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static inline s64
+entity_key(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
{
- return se->vruntime - cfs_rq->min_vruntime;
+ return cfs_se->vruntime - cfs_rq->min_vruntime;
}

static void update_min_vruntime(struct cfs_rq *cfs_rq)
@@ -354,14 +359,14 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
vruntime = cfs_rq->curr->vruntime;

if (cfs_rq->rb_leftmost) {
- struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
- struct sched_entity,
- run_node);
+ struct sched_cfs_entity *cfs_se = rb_entry(cfs_rq->rb_leftmost,
+ struct sched_cfs_entity,
+ run_node);

if (!cfs_rq->curr)
- vruntime = se->vruntime;
+ vruntime = cfs_se->vruntime;
else
- vruntime = min_vruntime(vruntime, se->vruntime);
+ vruntime = min_vruntime(vruntime, cfs_se->vruntime);
}

cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
@@ -370,12 +375,13 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
/*
* Enqueue an entity into the rb-tree:
*/
-static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void
+__enqueue_entity(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
{
struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
struct rb_node *parent = NULL;
- struct sched_entity *entry;
- s64 key = entity_key(cfs_rq, se);
+ struct sched_cfs_entity *entry;
+ s64 key = entity_key(cfs_rq, cfs_se);
int leftmost = 1;

/*
@@ -383,7 +389,7 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
*/
while (*link) {
parent = *link;
- entry = rb_entry(parent, struct sched_entity, run_node);
+ entry = rb_entry(parent, struct sched_cfs_entity, run_node);
/*
* We dont care about collisions. Nodes with
* the same key stay together.
@@ -401,42 +407,43 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
* used):
*/
if (leftmost)
- cfs_rq->rb_leftmost = &se->run_node;
+ cfs_rq->rb_leftmost = &cfs_se->run_node;

- rb_link_node(&se->run_node, parent, link);
- rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
+ rb_link_node(&cfs_se->run_node, parent, link);
+ rb_insert_color(&cfs_se->run_node, &cfs_rq->tasks_timeline);
}

-static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void
+__dequeue_entity(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
{
- if (cfs_rq->rb_leftmost == &se->run_node) {
+ if (cfs_rq->rb_leftmost == &cfs_se->run_node) {
struct rb_node *next_node;

- next_node = rb_next(&se->run_node);
+ next_node = rb_next(&cfs_se->run_node);
cfs_rq->rb_leftmost = next_node;
}

- rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
+ rb_erase(&cfs_se->run_node, &cfs_rq->tasks_timeline);
}

-static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
+static struct sched_cfs_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
{
struct rb_node *left = cfs_rq->rb_leftmost;

if (!left)
return NULL;

- return rb_entry(left, struct sched_entity, run_node);
+ return rb_entry(left, struct sched_cfs_entity, run_node);
}

-static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
+static struct sched_cfs_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
{
struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);

if (!last)
return NULL;

- return rb_entry(last, struct sched_entity, run_node);
+ return rb_entry(last, struct sched_cfs_entity, run_node);
}

/**************************************************************
@@ -472,10 +479,10 @@ int sched_proc_update_handler(struct ctl_table *table, int write,
* delta /= w
*/
static inline unsigned long
-calc_delta_fair(unsigned long delta, struct sched_entity *se)
+calc_delta_fair(unsigned long delta, struct sched_cfs_entity *cfs_se)
{
- if (unlikely(se->load.weight != NICE_0_LOAD))
- delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
+ if (unlikely(cfs_se->load.weight != NICE_0_LOAD))
+ delta = calc_delta_mine(delta, NICE_0_LOAD, &cfs_se->load);

return delta;
}
@@ -507,24 +514,24 @@ static u64 __sched_period(unsigned long nr_running)
*
* s = p*P[w/rw]
*/
-static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
{
- u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
+ u64 slice = __sched_period(cfs_rq->nr_running + !cfs_se->on_rq);

- for_each_sched_entity(se) {
+ for_each_sched_cfs_entity(cfs_se) {
struct load_weight *load;
struct load_weight lw;

- cfs_rq = cfs_rq_of(se);
+ cfs_rq = cfs_rq_of(cfs_se);
load = &cfs_rq->load;

- if (unlikely(!se->on_rq)) {
+ if (unlikely(!cfs_se->on_rq)) {
lw = cfs_rq->load;

- update_load_add(&lw, se->load.weight);
+ update_load_add(&lw, cfs_se->load.weight);
load = &lw;
}
- slice = calc_delta_mine(slice, se->load.weight, load);
+ slice = calc_delta_mine(slice, cfs_se->load.weight, load);
}
return slice;
}
@@ -534,9 +541,10 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
*
* vs = s/w
*/
-static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static u64
+sched_vslice(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
{
- return calc_delta_fair(sched_slice(cfs_rq, se), se);
+ return calc_delta_fair(sched_slice(cfs_rq, cfs_se), cfs_se);
}

static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update);
@@ -547,7 +555,7 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta);
* are not in our scheduling class.
*/
static inline void
-__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
+__update_curr(struct cfs_rq *cfs_rq, struct sched_cfs_entity *curr,
unsigned long delta_exec)
{
unsigned long delta_exec_weighted;
@@ -569,7 +577,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,

static void update_curr(struct cfs_rq *cfs_rq)
{
- struct sched_entity *curr = cfs_rq->curr;
+ struct sched_cfs_entity *curr = cfs_rq->curr;
u64 now = rq_of(cfs_rq)->clock_task;
unsigned long delta_exec;

@@ -588,8 +596,8 @@ static void update_curr(struct cfs_rq *cfs_rq)
__update_curr(cfs_rq, curr, delta_exec);
curr->exec_start = now;

- if (entity_is_task(curr)) {
- struct task_struct *curtask = task_of(curr);
+ if (cfs_entity_is_task(curr)) {
+ struct task_struct *curtask = cfs_task_of(curr);

trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
cpuacct_charge(curtask, delta_exec);
@@ -598,62 +606,66 @@ static void update_curr(struct cfs_rq *cfs_rq)
}

static inline void
-update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
+update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
{
- schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock);
+ schedstat_set(cfs_se->statistics.wait_start, rq_of(cfs_rq)->clock);
}

/*
* Task is being enqueued - update stats:
*/
-static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void
+update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
{
/*
* Are we enqueueing a waiting task? (for current tasks
* a dequeue/enqueue event is a NOP)
*/
- if (se != cfs_rq->curr)
- update_stats_wait_start(cfs_rq, se);
+ if (cfs_se != cfs_rq->curr)
+ update_stats_wait_start(cfs_rq, cfs_se);
}

static void
-update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
- schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
- rq_of(cfs_rq)->clock - se->statistics.wait_start));
- schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
- schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
- rq_of(cfs_rq)->clock - se->statistics.wait_start);
+update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
+{
+ schedstat_set(cfs_se->statistics.wait_max,
+ max(cfs_se->statistics.wait_max,
+ rq_of(cfs_rq)->clock-cfs_se->statistics.wait_start));
+ schedstat_set(cfs_se->statistics.wait_count,
+ cfs_se->statistics.wait_count + 1);
+ schedstat_set(cfs_se->statistics.wait_sum,
+ cfs_se->statistics.wait_sum + rq_of(cfs_rq)->clock -
+ cfs_se->statistics.wait_start);
#ifdef CONFIG_SCHEDSTATS
- if (entity_is_task(se)) {
- trace_sched_stat_wait(task_of(se),
- rq_of(cfs_rq)->clock - se->statistics.wait_start);
+ if (cfs_entity_is_task(cfs_se)) {
+ trace_sched_stat_wait(cfs_task_of(cfs_se),
+ rq_of(cfs_rq)->clock - cfs_se->statistics.wait_start);
}
#endif
- schedstat_set(se->statistics.wait_start, 0);
+ schedstat_set(cfs_se->statistics.wait_start, 0);
}

static inline void
-update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
+update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
{
/*
* Mark the end of the wait period if dequeueing a
* waiting task:
*/
- if (se != cfs_rq->curr)
- update_stats_wait_end(cfs_rq, se);
+ if (cfs_se != cfs_rq->curr)
+ update_stats_wait_end(cfs_rq, cfs_se);
}

/*
* We are picking a new current task - update its stats:
*/
static inline void
-update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
+update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
{
/*
* We are starting a new run period:
*/
- se->exec_start = rq_of(cfs_rq)->clock_task;
+ cfs_se->exec_start = rq_of(cfs_rq)->clock_task;
}

/**************************************************
@@ -674,27 +686,27 @@ add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
#endif

static void
-account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
- update_load_add(&cfs_rq->load, se->load.weight);
- if (!parent_entity(se))
- inc_cpu_load(rq_of(cfs_rq), se->load.weight);
- if (entity_is_task(se)) {
- add_cfs_task_weight(cfs_rq, se->load.weight);
- list_add(&se->group_node, &cfs_rq->tasks);
+account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
+{
+ update_load_add(&cfs_rq->load, cfs_se->load.weight);
+ if (!parent_entity(cfs_se))
+ inc_cpu_load(rq_of(cfs_rq), cfs_se->load.weight);
+ if (cfs_entity_is_task(cfs_se)) {
+ add_cfs_task_weight(cfs_rq, cfs_se->load.weight);
+ list_add(&cfs_se->group_node, &cfs_rq->tasks);
}
cfs_rq->nr_running++;
}

static void
-account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
- update_load_sub(&cfs_rq->load, se->load.weight);
- if (!parent_entity(se))
- dec_cpu_load(rq_of(cfs_rq), se->load.weight);
- if (entity_is_task(se)) {
- add_cfs_task_weight(cfs_rq, -se->load.weight);
- list_del_init(&se->group_node);
+account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
+{
+ update_load_sub(&cfs_rq->load, cfs_se->load.weight);
+ if (!parent_entity(cfs_se))
+ dec_cpu_load(rq_of(cfs_rq), cfs_se->load.weight);
+ if (cfs_entity_is_task(cfs_se)) {
+ add_cfs_task_weight(cfs_rq, -cfs_se->load.weight);
+ list_del_init(&cfs_se->group_node);
}
cfs_rq->nr_running--;
}
@@ -762,34 +774,35 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
list_del_leaf_cfs_rq(cfs_rq);
}

-static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+static void reweight_entity(struct cfs_rq *cfs_rq,
+ struct sched_cfs_entity *cfs_se,
unsigned long weight)
{
- if (se->on_rq) {
+ if (cfs_se->on_rq) {
/* commit outstanding execution time */
- if (cfs_rq->curr == se)
+ if (cfs_rq->curr == cfs_se)
update_curr(cfs_rq);
- account_entity_dequeue(cfs_rq, se);
+ account_entity_dequeue(cfs_rq, cfs_se);
}

- update_load_set(&se->load, weight);
+ update_load_set(&cfs_se->load, weight);

- if (se->on_rq)
- account_entity_enqueue(cfs_rq, se);
+ if (cfs_se->on_rq)
+ account_entity_enqueue(cfs_rq, cfs_se);
}

static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
{
struct task_group *tg;
- struct sched_entity *se;
+ struct sched_cfs_entity *cfs_se;
long load_weight, load, shares;

if (!cfs_rq)
return;

tg = cfs_rq->tg;
- se = tg->se[cpu_of(rq_of(cfs_rq))];
- if (!se)
+ cfs_se = tg->cfs_se[cpu_of(rq_of(cfs_rq))];
+ if (!cfs_se)
return;

load = cfs_rq->load.weight + weight_delta;
@@ -807,7 +820,7 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
if (shares > tg->shares)
shares = tg->shares;

- reweight_entity(cfs_rq_of(se), se, shares);
+ reweight_entity(cfs_rq_of(cfs_se), cfs_se, shares);
}

static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
@@ -831,47 +844,48 @@ static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
}
#endif /* CONFIG_FAIR_GROUP_SCHED */

-static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void
+enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
{
#ifdef CONFIG_SCHEDSTATS
struct task_struct *tsk = NULL;

- if (entity_is_task(se))
- tsk = task_of(se);
+ if (cfs_entity_is_task(cfs_se))
+ tsk = cfs_task_of(cfs_se);

- if (se->statistics.sleep_start) {
- u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start;
+ if (cfs_se->statistics.sleep_start) {
+ u64 delta = rq_of(cfs_rq)->clock - cfs_se->statistics.sleep_start;

if ((s64)delta < 0)
delta = 0;

- if (unlikely(delta > se->statistics.sleep_max))
- se->statistics.sleep_max = delta;
+ if (unlikely(delta > cfs_se->statistics.sleep_max))
+ cfs_se->statistics.sleep_max = delta;

- se->statistics.sleep_start = 0;
- se->statistics.sum_sleep_runtime += delta;
+ cfs_se->statistics.sleep_start = 0;
+ cfs_se->statistics.sum_sleep_runtime += delta;

if (tsk) {
account_scheduler_latency(tsk, delta >> 10, 1);
trace_sched_stat_sleep(tsk, delta);
}
}
- if (se->statistics.block_start) {
- u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start;
+ if (cfs_se->statistics.block_start) {
+ u64 delta = rq_of(cfs_rq)->clock - cfs_se->statistics.block_start;

if ((s64)delta < 0)
delta = 0;

- if (unlikely(delta > se->statistics.block_max))
- se->statistics.block_max = delta;
+ if (unlikely(delta > cfs_se->statistics.block_max))
+ cfs_se->statistics.block_max = delta;

- se->statistics.block_start = 0;
- se->statistics.sum_sleep_runtime += delta;
+ cfs_se->statistics.block_start = 0;
+ cfs_se->statistics.sum_sleep_runtime += delta;

if (tsk) {
if (tsk->in_iowait) {
- se->statistics.iowait_sum += delta;
- se->statistics.iowait_count++;
+ cfs_se->statistics.iowait_sum += delta;
+ cfs_se->statistics.iowait_count++;
trace_sched_stat_iowait(tsk, delta);
}

@@ -891,10 +905,11 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
#endif
}

-static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void
+check_spread(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
{
#ifdef CONFIG_SCHED_DEBUG
- s64 d = se->vruntime - cfs_rq->min_vruntime;
+ s64 d = cfs_se->vruntime - cfs_rq->min_vruntime;

if (d < 0)
d = -d;
@@ -905,7 +920,8 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
}

static void
-place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
+place_entity(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se,
+ int initial)
{
u64 vruntime = cfs_rq->min_vruntime;

@@ -916,7 +932,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
* stays open at the end.
*/
if (initial && sched_feat(START_DEBIT))
- vruntime += sched_vslice(cfs_rq, se);
+ vruntime += sched_vslice(cfs_rq, cfs_se);

/* sleeps up to a single latency don't count. */
if (!initial) {
@@ -933,88 +949,92 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
}

/* ensure we never gain time by being placed backwards. */
- vruntime = max_vruntime(se->vruntime, vruntime);
+ vruntime = max_vruntime(cfs_se->vruntime, vruntime);

- se->vruntime = vruntime;
+ cfs_se->vruntime = vruntime;
}

static void
-enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+enqueue_entity(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se,
+ int flags)
{
/*
* Update the normalized vruntime before updating min_vruntime
* through callig update_curr().
*/
if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
- se->vruntime += cfs_rq->min_vruntime;
+ cfs_se->vruntime += cfs_rq->min_vruntime;

/*
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
update_cfs_load(cfs_rq, 0);
- update_cfs_shares(cfs_rq, se->load.weight);
- account_entity_enqueue(cfs_rq, se);
+ update_cfs_shares(cfs_rq, cfs_se->load.weight);
+ account_entity_enqueue(cfs_rq, cfs_se);

if (flags & ENQUEUE_WAKEUP) {
- place_entity(cfs_rq, se, 0);
- enqueue_sleeper(cfs_rq, se);
+ place_entity(cfs_rq, cfs_se, 0);
+ enqueue_sleeper(cfs_rq, cfs_se);
}

- update_stats_enqueue(cfs_rq, se);
- check_spread(cfs_rq, se);
- if (se != cfs_rq->curr)
- __enqueue_entity(cfs_rq, se);
- se->on_rq = 1;
+ update_stats_enqueue(cfs_rq, cfs_se);
+ check_spread(cfs_rq, cfs_se);
+ if (cfs_se != cfs_rq->curr)
+ __enqueue_entity(cfs_rq, cfs_se);
+ cfs_se->on_rq = 1;

if (cfs_rq->nr_running == 1)
list_add_leaf_cfs_rq(cfs_rq);
}

-static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void
+__clear_buddies(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
{
- if (!se || cfs_rq->last == se)
+ if (!cfs_se || cfs_rq->last == cfs_se)
cfs_rq->last = NULL;

- if (!se || cfs_rq->next == se)
+ if (!cfs_se || cfs_rq->next == cfs_se)
cfs_rq->next = NULL;
}

-static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void
+clear_buddies(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
{
- for_each_sched_entity(se)
- __clear_buddies(cfs_rq_of(se), se);
+ for_each_sched_cfs_entity(cfs_se)
+ __clear_buddies(cfs_rq_of(cfs_se), cfs_se);
}

static void
-dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+dequeue_entity(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se,
+ int flags)
{
/*
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);

- update_stats_dequeue(cfs_rq, se);
+ update_stats_dequeue(cfs_rq, cfs_se);
if (flags & DEQUEUE_SLEEP) {
#ifdef CONFIG_SCHEDSTATS
- if (entity_is_task(se)) {
- struct task_struct *tsk = task_of(se);
+ if (cfs_entity_is_task(cfs_se)) {
+ struct task_struct *tsk = cfs_task_of(cfs_se);

if (tsk->state & TASK_INTERRUPTIBLE)
- se->statistics.sleep_start = rq_of(cfs_rq)->clock;
+ cfs_se->statistics.sleep_start = rq_of(cfs_rq)->clock;
if (tsk->state & TASK_UNINTERRUPTIBLE)
- se->statistics.block_start = rq_of(cfs_rq)->clock;
+ cfs_se->statistics.block_start = rq_of(cfs_rq)->clock;
}
#endif
}

- clear_buddies(cfs_rq, se);
+ clear_buddies(cfs_rq, cfs_se);

- if (se != cfs_rq->curr)
- __dequeue_entity(cfs_rq, se);
- se->on_rq = 0;
+ if (cfs_se != cfs_rq->curr)
+ __dequeue_entity(cfs_rq, cfs_se);
+ cfs_se->on_rq = 0;
update_cfs_load(cfs_rq, 0);
- account_entity_dequeue(cfs_rq, se);
+ account_entity_dequeue(cfs_rq, cfs_se);
update_min_vruntime(cfs_rq);
update_cfs_shares(cfs_rq, 0);

@@ -1024,14 +1044,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* movement in our normalized position.
*/
if (!(flags & DEQUEUE_SLEEP))
- se->vruntime -= cfs_rq->min_vruntime;
+ cfs_se->vruntime -= cfs_rq->min_vruntime;
}

/*
* Preempt the current task with a newly woken task if needed:
*/
static void
-check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_cfs_entity *curr)
{
unsigned long ideal_runtime, delta_exec;

@@ -1059,8 +1079,8 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
return;

if (cfs_rq->nr_running > 1) {
- struct sched_entity *se = __pick_next_entity(cfs_rq);
- s64 delta = curr->vruntime - se->vruntime;
+ struct sched_cfs_entity *cfs_se = __pick_next_entity(cfs_rq);
+ s64 delta = curr->vruntime - cfs_se->vruntime;

if (delta > ideal_runtime)
resched_task(rq_of(cfs_rq)->curr);
@@ -1068,58 +1088,59 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
}

static void
-set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+set_next_entity(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
{
/* 'current' is not kept within the tree. */
- if (se->on_rq) {
+ if (cfs_se->on_rq) {
/*
* Any task has to be enqueued before it get to execute on
* a CPU. So account for the time it spent waiting on the
* runqueue.
*/
- update_stats_wait_end(cfs_rq, se);
- __dequeue_entity(cfs_rq, se);
+ update_stats_wait_end(cfs_rq, cfs_se);
+ __dequeue_entity(cfs_rq, cfs_se);
}

- update_stats_curr_start(cfs_rq, se);
- cfs_rq->curr = se;
+ update_stats_curr_start(cfs_rq, cfs_se);
+ cfs_rq->curr = cfs_se;
#ifdef CONFIG_SCHEDSTATS
/*
* Track our maximum slice length, if the CPU's load is at
* least twice that of our own weight (i.e. dont track it
* when there are only lesser-weight tasks around):
*/
- if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
- se->statistics.slice_max = max(se->statistics.slice_max,
- se->sum_exec_runtime - se->prev_sum_exec_runtime);
+ if (rq_of(cfs_rq)->load.weight >= 2*cfs_se->load.weight) {
+ cfs_se->statistics.slice_max = max(cfs_se->statistics.slice_max,
+ cfs_se->sum_exec_runtime - cfs_se->prev_sum_exec_runtime);
}
#endif
- se->prev_sum_exec_runtime = se->sum_exec_runtime;
+ cfs_se->prev_sum_exec_runtime = cfs_se->sum_exec_runtime;
}

-static int
-wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
+static int wakeup_preempt_entity(struct sched_cfs_entity *curr,
+ struct sched_cfs_entity *cfs_se);

-static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
+static struct sched_cfs_entity *pick_next_entity(struct cfs_rq *cfs_rq)
{
- struct sched_entity *se = __pick_next_entity(cfs_rq);
- struct sched_entity *left = se;
+ struct sched_cfs_entity *cfs_se = __pick_next_entity(cfs_rq);
+ struct sched_cfs_entity *left = cfs_se;

if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
- se = cfs_rq->next;
+ cfs_se = cfs_rq->next;

/*
* Prefer last buddy, try to return the CPU to a preempted task.
*/
if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
- se = cfs_rq->last;
+ cfs_se = cfs_rq->last;

- clear_buddies(cfs_rq, se);
+ clear_buddies(cfs_rq, cfs_se);

- return se;
+ return cfs_se;
}

-static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
+static void
+put_prev_entity(struct cfs_rq *cfs_rq, struct sched_cfs_entity *prev)
{
/*
* If still on the runqueue then deactivate_task()
@@ -1138,7 +1159,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
}

static void
-entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
+entity_tick(struct cfs_rq *cfs_rq, struct sched_cfs_entity *curr, int queued)
{
/*
* Update run-time statistics of the 'current'.
@@ -1178,14 +1199,15 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
#ifdef CONFIG_SCHED_HRTICK
static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
{
- struct sched_entity *se = &p->se;
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ struct sched_cfs_entity *cfs_se = &p->cfs;
+ struct cfs_rq *cfs_rq = cfs_rq_of(cfs_se);

WARN_ON(task_rq(p) != rq);

if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
- u64 slice = sched_slice(cfs_rq, se);
- u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
+ u64 slice = sched_slice(cfs_rq, cfs_se);
+ u64 ran = cfs_se->sum_exec_runtime -
+ cfs_se->prev_sum_exec_runtime;
s64 delta = slice - ran;

if (delta < 0) {
@@ -1217,7 +1239,7 @@ static void hrtick_update(struct rq *rq)
if (curr->sched_class != &fair_sched_class)
return;

- if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
+ if (cfs_rq_of(&curr->cfs)->nr_running < sched_nr_latency)
hrtick_start_fair(rq, curr);
}
#else /* !CONFIG_SCHED_HRTICK */
@@ -1240,18 +1262,18 @@ static void
enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
{
struct cfs_rq *cfs_rq;
- struct sched_entity *se = &p->se;
+ struct sched_cfs_entity *cfs_se = &p->cfs;

- for_each_sched_entity(se) {
- if (se->on_rq)
+ for_each_sched_cfs_entity(cfs_se) {
+ if (cfs_se->on_rq)
break;
- cfs_rq = cfs_rq_of(se);
- enqueue_entity(cfs_rq, se, flags);
+ cfs_rq = cfs_rq_of(cfs_se);
+ enqueue_entity(cfs_rq, cfs_se, flags);
flags = ENQUEUE_WAKEUP;
}

- for_each_sched_entity(se) {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ for_each_sched_cfs_entity(cfs_se) {
+ struct cfs_rq *cfs_rq = cfs_rq_of(cfs_se);

update_cfs_load(cfs_rq, 0);
update_cfs_shares(cfs_rq, 0);
@@ -1268,11 +1290,11 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
{
struct cfs_rq *cfs_rq;
- struct sched_entity *se = &p->se;
+ struct sched_cfs_entity *cfs_se = &p->cfs;

- for_each_sched_entity(se) {
- cfs_rq = cfs_rq_of(se);
- dequeue_entity(cfs_rq, se, flags);
+ for_each_sched_cfs_entity(cfs_se) {
+ cfs_rq = cfs_rq_of(cfs_se);
+ dequeue_entity(cfs_rq, cfs_se, flags);

/* Don't dequeue parent if it has other entities besides us */
if (cfs_rq->load.weight)
@@ -1280,8 +1302,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
flags |= DEQUEUE_SLEEP;
}

- for_each_sched_entity(se) {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ for_each_sched_cfs_entity(cfs_se) {
+ struct cfs_rq *cfs_rq = cfs_rq_of(cfs_se);

update_cfs_load(cfs_rq, 0);
update_cfs_shares(cfs_rq, 0);
@@ -1299,7 +1321,7 @@ static void yield_task_fair(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
- struct sched_entity *rightmost, *se = &curr->se;
+ struct sched_cfs_entity *rightmost, *cfs_se = &curr->cfs;

/*
* Are we the only task in the tree?
@@ -1307,7 +1329,7 @@ static void yield_task_fair(struct rq *rq)
if (unlikely(cfs_rq->nr_running == 1))
return;

- clear_buddies(cfs_rq, se);
+ clear_buddies(cfs_rq, cfs_se);

if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
update_rq_clock(rq);
@@ -1325,7 +1347,7 @@ static void yield_task_fair(struct rq *rq)
/*
* Already in the rightmost position?
*/
- if (unlikely(!rightmost || entity_before(rightmost, se)))
+ if (unlikely(!rightmost || entity_before(rightmost, cfs_se)))
return;

/*
@@ -1333,17 +1355,17 @@ static void yield_task_fair(struct rq *rq)
* Upon rescheduling, sched_class::put_prev_task() will place
* 'current' within the tree based on its new key value.
*/
- se->vruntime = rightmost->vruntime + 1;
+ cfs_se->vruntime = rightmost->vruntime + 1;
}

#ifdef CONFIG_SMP

static void task_waking_fair(struct rq *rq, struct task_struct *p)
{
- struct sched_entity *se = &p->se;
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ struct sched_cfs_entity *cfs_se = &p->cfs;
+ struct cfs_rq *cfs_rq = cfs_rq_of(cfs_se);

- se->vruntime -= cfs_rq->min_vruntime;
+ cfs_se->vruntime -= cfs_rq->min_vruntime;
}

#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -1356,17 +1378,17 @@ static void task_waking_fair(struct rq *rq, struct task_struct *p)
*/
static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
{
- struct sched_entity *se = tg->se[cpu];
+ struct sched_cfs_entity *cfs_se = tg->cfs_se[cpu];

if (!tg->parent)
return wl;

- for_each_sched_entity(se) {
+ for_each_sched_cfs_entity(cfs_se) {
long S, rw, s, a, b;

- S = se->my_q->tg->shares;
- s = se->load.weight;
- rw = se->my_q->load.weight;
+ S = cfs_se->my_q->tg->shares;
+ s = cfs_se->load.weight;
+ rw = cfs_se->my_q->load.weight;

a = S*(rw + wl);
b = S*rw + s*wg;
@@ -1422,14 +1444,14 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
rcu_read_lock();
if (sync) {
tg = task_group(current);
- weight = current->se.load.weight;
+ weight = current->cfs.load.weight;

this_load += effective_load(tg, this_cpu, -weight, -weight);
load += effective_load(tg, prev_cpu, 0, -weight);
}

tg = task_group(p);
- weight = p->se.load.weight;
+ weight = p->cfs.load.weight;

/*
* In low-load situations, where prev_cpu is idle and this_cpu is idle
@@ -1465,7 +1487,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
if (sync && balanced)
return 1;

- schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
+ schedstat_inc(p, cfs.statistics.nr_wakeups_affine_attempts);
tl_per_task = cpu_avg_load_per_task(this_cpu);

if (balanced ||
@@ -1477,7 +1499,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
* there is no bad imbalance.
*/
schedstat_inc(sd, ttwu_move_affine);
- schedstat_inc(p, se.statistics.nr_wakeups_affine);
+ schedstat_inc(p, cfs.statistics.nr_wakeups_affine);

return 1;
}
@@ -1738,7 +1760,7 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
#endif /* CONFIG_SMP */

static unsigned long
-wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
+wakeup_gran(struct sched_cfs_entity *curr, struct sched_cfs_entity *cfs_se)
{
unsigned long gran = sysctl_sched_wakeup_granularity;

@@ -1755,8 +1777,8 @@ wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
* This is especially important for buddies when the leftmost
* task is higher priority than the buddy.
*/
- if (unlikely(se->load.weight != NICE_0_LOAD))
- gran = calc_delta_fair(gran, se);
+ if (unlikely(cfs_se->load.weight != NICE_0_LOAD))
+ gran = calc_delta_fair(gran, cfs_se);

return gran;
}
@@ -1775,34 +1797,34 @@ wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
* w(c, s3) = 1
*
*/
-static int
-wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
+static int wakeup_preempt_entity(struct sched_cfs_entity *curr,
+ struct sched_cfs_entity *cfs_se)
{
- s64 gran, vdiff = curr->vruntime - se->vruntime;
+ s64 gran, vdiff = curr->vruntime - cfs_se->vruntime;

if (vdiff <= 0)
return -1;

- gran = wakeup_gran(curr, se);
+ gran = wakeup_gran(curr, cfs_se);
if (vdiff > gran)
return 1;

return 0;
}

-static void set_last_buddy(struct sched_entity *se)
+static void set_last_buddy(struct sched_cfs_entity *cfs_se)
{
- if (likely(task_of(se)->policy != SCHED_IDLE)) {
- for_each_sched_entity(se)
- cfs_rq_of(se)->last = se;
+ if (likely(cfs_task_of(cfs_se)->policy != SCHED_IDLE)) {
+ for_each_sched_cfs_entity(cfs_se)
+ cfs_rq_of(cfs_se)->last = cfs_se;
}
}

-static void set_next_buddy(struct sched_entity *se)
+static void set_next_buddy(struct sched_cfs_entity *cfs_se)
{
- if (likely(task_of(se)->policy != SCHED_IDLE)) {
- for_each_sched_entity(se)
- cfs_rq_of(se)->next = se;
+ if (likely(cfs_task_of(cfs_se)->policy != SCHED_IDLE)) {
+ for_each_sched_cfs_entity(cfs_se)
+ cfs_rq_of(cfs_se)->next = cfs_se;
}
}

@@ -1812,15 +1834,15 @@ static void set_next_buddy(struct sched_entity *se)
static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
{
struct task_struct *curr = rq->curr;
- struct sched_entity *se = &curr->se, *pse = &p->se;
+ struct sched_cfs_entity *cfs_se = &curr->cfs, *cfs_pse = &p->cfs;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
int scale = cfs_rq->nr_running >= sched_nr_latency;

- if (unlikely(se == pse))
+ if (unlikely(cfs_se == cfs_pse))
return;

if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK))
- set_next_buddy(pse);
+ set_next_buddy(cfs_pse);

/*
* We can come here with TIF_NEED_RESCHED already set from new task
@@ -1844,9 +1866,9 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
return;

update_curr(cfs_rq);
- find_matching_se(&se, &pse);
- BUG_ON(!pse);
- if (wakeup_preempt_entity(se, pse) == 1)
+ find_matching_se(&cfs_se, &cfs_pse);
+ BUG_ON(!cfs_pse);
+ if (wakeup_preempt_entity(cfs_se, cfs_pse) == 1)
goto preempt;

return;
@@ -1862,29 +1884,29 @@ preempt:
* Also, during early boot the idle thread is in the fair class,
* for obvious reasons its a bad idea to schedule back to it.
*/
- if (unlikely(!se->on_rq || curr == rq->idle))
+ if (unlikely(!cfs_se->on_rq || curr == rq->idle))
return;

- if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
- set_last_buddy(se);
+ if (sched_feat(LAST_BUDDY) && scale && cfs_entity_is_task(cfs_se))
+ set_last_buddy(cfs_se);
}

static struct task_struct *pick_next_task_fair(struct rq *rq)
{
struct task_struct *p;
struct cfs_rq *cfs_rq = &rq->cfs;
- struct sched_entity *se;
+ struct sched_cfs_entity *cfs_se;

if (!cfs_rq->nr_running)
return NULL;

do {
- se = pick_next_entity(cfs_rq);
- set_next_entity(cfs_rq, se);
- cfs_rq = group_cfs_rq(se);
+ cfs_se = pick_next_entity(cfs_rq);
+ set_next_entity(cfs_rq, cfs_se);
+ cfs_rq = group_cfs_rq(cfs_se);
} while (cfs_rq);

- p = task_of(se);
+ p = cfs_task_of(cfs_se);
hrtick_start_fair(rq, p);

return p;
@@ -1895,12 +1917,12 @@ static struct task_struct *pick_next_task_fair(struct rq *rq)
*/
static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
{
- struct sched_entity *se = &prev->se;
+ struct sched_cfs_entity *cfs_se = &prev->cfs;
struct cfs_rq *cfs_rq;

- for_each_sched_entity(se) {
- cfs_rq = cfs_rq_of(se);
- put_prev_entity(cfs_rq, se);
+ for_each_sched_cfs_entity(cfs_se) {
+ cfs_rq = cfs_rq_of(cfs_se);
+ put_prev_entity(cfs_rq, cfs_se);
}
}

@@ -1938,13 +1960,13 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
* 3) are cache-hot on their current CPU.
*/
if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
- schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
+ schedstat_inc(p, cfs.statistics.nr_failed_migrations_affine);
return 0;
}
*all_pinned = 0;

if (task_running(rq, p)) {
- schedstat_inc(p, se.statistics.nr_failed_migrations_running);
+ schedstat_inc(p, cfs.statistics.nr_failed_migrations_running);
return 0;
}

@@ -1960,14 +1982,14 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
#ifdef CONFIG_SCHEDSTATS
if (tsk_cache_hot) {
schedstat_inc(sd, lb_hot_gained[idle]);
- schedstat_inc(p, se.statistics.nr_forced_migrations);
+ schedstat_inc(p, cfs.statistics.nr_forced_migrations);
}
#endif
return 1;
}

if (tsk_cache_hot) {
- schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
+ schedstat_inc(p, cfs.statistics.nr_failed_migrations_hot);
return 0;
}
return 1;
@@ -1989,7 +2011,7 @@ move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
int pinned = 0;

for_each_leaf_cfs_rq(busiest, cfs_rq) {
- list_for_each_entry_safe(p, n, &cfs_rq->tasks, se.group_node) {
+ list_for_each_entry_safe(p, n, &cfs_rq->tasks, cfs.group_node) {

if (!can_migrate_task(p, busiest, this_cpu,
sd, idle, &pinned))
@@ -2024,17 +2046,17 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,

pinned = 1;

- list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
+ list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, cfs.group_node) {
if (loops++ > sysctl_sched_nr_migrate)
break;

- if ((p->se.load.weight >> 1) > rem_load_move ||
+ if ((p->cfs.load.weight >> 1) > rem_load_move ||
!can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned))
continue;

pull_task(busiest, p, this_rq, this_cpu);
pulled++;
- rem_load_move -= p->se.load.weight;
+ rem_load_move -= p->cfs.load.weight;

#ifdef CONFIG_PREEMPT
/*
@@ -2080,7 +2102,7 @@ static int update_shares_cpu(struct task_group *tg, int cpu)
unsigned long flags;
struct rq *rq;

- if (!tg->se[cpu])
+ if (!tg->cfs_se[cpu])
return 0;

rq = cpu_rq(cpu);
@@ -3996,11 +4018,11 @@ static inline void idle_balance(int cpu, struct rq *rq)
static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
{
struct cfs_rq *cfs_rq;
- struct sched_entity *se = &curr->se;
+ struct sched_cfs_entity *cfs_se = &curr->cfs;

- for_each_sched_entity(se) {
- cfs_rq = cfs_rq_of(se);
- entity_tick(cfs_rq, se, queued);
+ for_each_sched_cfs_entity(cfs_se) {
+ cfs_rq = cfs_rq_of(cfs_se);
+ entity_tick(cfs_rq, cfs_se, queued);
}
}

@@ -4012,7 +4034,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
static void task_fork_fair(struct task_struct *p)
{
struct cfs_rq *cfs_rq = task_cfs_rq(current);
- struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
+ struct sched_cfs_entity *cfs_se = &p->cfs, *curr = cfs_rq->curr;
int this_cpu = smp_processor_id();
struct rq *rq = this_rq();
unsigned long flags;
@@ -4030,19 +4052,20 @@ static void task_fork_fair(struct task_struct *p)
update_curr(cfs_rq);

if (curr)
- se->vruntime = curr->vruntime;
- place_entity(cfs_rq, se, 1);
+ cfs_se->vruntime = curr->vruntime;
+ place_entity(cfs_rq, cfs_se, 1);

- if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
+ if (sysctl_sched_child_runs_first &&
+ curr && entity_before(curr, cfs_se)) {
/*
* Upon rescheduling, sched_class::put_prev_task() will place
* 'current' within the tree based on its new key value.
*/
- swap(curr->vruntime, se->vruntime);
+ swap(curr->vruntime, cfs_se->vruntime);
resched_task(rq->curr);
}

- se->vruntime -= cfs_rq->min_vruntime;
+ cfs_se->vruntime -= cfs_rq->min_vruntime;

raw_spin_unlock_irqrestore(&rq->lock, flags);
}
@@ -4090,10 +4113,10 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p,
*/
static void set_curr_task_fair(struct rq *rq)
{
- struct sched_entity *se = &rq->curr->se;
+ struct sched_cfs_entity *cfs_se = &rq->curr->cfs;

- for_each_sched_entity(se)
- set_next_entity(cfs_rq_of(se), se);
+ for_each_sched_cfs_entity(cfs_se)
+ set_next_entity(cfs_rq_of(cfs_se), cfs_se);
}

#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -4113,16 +4136,16 @@ static void task_move_group_fair(struct task_struct *p, int on_rq)
* fair sleeper stuff for the first placement, but who cares.
*/
if (!on_rq)
- p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
+ p->cfs.vruntime -= cfs_rq_of(&p->cfs)->min_vruntime;
set_task_rq(p, task_cpu(p));
if (!on_rq)
- p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
+ p->cfs.vruntime += cfs_rq_of(&p->cfs)->min_vruntime;
}
#endif

static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
{
- struct sched_entity *se = &task->se;
+ struct sched_cfs_entity *cfs_se = &task->cfs;
unsigned int rr_interval = 0;

/*
@@ -4130,7 +4153,7 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task
* idle runqueue:
*/
if (rq->cfs.load.weight)
- rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
+ rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, cfs_se));

return rr_interval;
}
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index c914ec7..2bba302 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -628,16 +628,16 @@ static void update_curr_rt(struct rq *rq)
if (!task_has_rt_policy(curr))
return;

- delta_exec = rq->clock_task - curr->se.exec_start;
+ delta_exec = rq->clock_task - curr->cfs.exec_start;
if (unlikely((s64)delta_exec < 0))
delta_exec = 0;

- schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec));
+ schedstat_set(curr->cfs.statistics.exec_max, max(curr->cfs.statistics.exec_max, delta_exec));

- curr->se.sum_exec_runtime += delta_exec;
+ curr->cfs.sum_exec_runtime += delta_exec;
account_group_exec_runtime(curr, delta_exec);

- curr->se.exec_start = rq->clock_task;
+ curr->cfs.exec_start = rq->clock_task;
cpuacct_charge(curr, delta_exec);

sched_rt_avg_update(rq, delta_exec);
@@ -1099,7 +1099,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
} while (rt_rq);

p = rt_task_of(rt_se);
- p->se.exec_start = rq->clock_task;
+ p->cfs.exec_start = rq->clock_task;

return p;
}
@@ -1126,13 +1126,13 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
{
update_curr_rt(rq);
- p->se.exec_start = 0;
+ p->cfs.exec_start = 0;

/*
* The previous task needs to be made eligible for pushing
* if it is still active
*/
- if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
+ if (p->cfs.on_rq && p->rt.nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);
}

@@ -1283,7 +1283,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
!cpumask_test_cpu(lowest_rq->cpu,
&task->cpus_allowed) ||
task_running(rq, task) ||
- !task->se.on_rq)) {
+ !task->cfs.on_rq)) {

raw_spin_unlock(&lowest_rq->lock);
lowest_rq = NULL;
@@ -1317,7 +1317,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)
BUG_ON(task_current(rq, p));
BUG_ON(p->rt.nr_cpus_allowed <= 1);

- BUG_ON(!p->se.on_rq);
+ BUG_ON(!p->cfs.on_rq);
BUG_ON(!rt_task(p));

return p;
@@ -1463,7 +1463,7 @@ static int pull_rt_task(struct rq *this_rq)
*/
if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
WARN_ON(p == src_rq->curr);
- WARN_ON(!p->se.on_rq);
+ WARN_ON(!p->cfs.on_rq);

/*
* There's a chance that p is higher in priority
@@ -1534,7 +1534,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
* Update the migration status of the RQ if we have an RT task
* which is running AND changing its weight value.
*/
- if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
+ if (p->cfs.on_rq && (weight != p->rt.nr_cpus_allowed)) {
struct rq *rq = task_rq(p);

if (!task_current(rq, p)) {
@@ -1701,7 +1701,7 @@ static void watchdog(struct rq *rq, struct task_struct *p)
p->rt.timeout++;
next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
if (p->rt.timeout > next)
- p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
+ p->cputime_expires.sched_exp = p->cfs.sum_exec_runtime;
}
}

@@ -1737,7 +1737,7 @@ static void set_curr_task_rt(struct rq *rq)
{
struct task_struct *p = rq->curr;

- p->se.exec_start = rq->clock_task;
+ p->cfs.exec_start = rq->clock_task;

/* The running task is never eligible for pushing */
dequeue_pushable_task(rq, p);
diff --git a/kernel/sched_stoptask.c b/kernel/sched_stoptask.c
index 2bf6b47..763732d 100644
--- a/kernel/sched_stoptask.c
+++ b/kernel/sched_stoptask.c
@@ -26,7 +26,7 @@ static struct task_struct *pick_next_task_stop(struct rq *rq)
{
struct task_struct *stop = rq->stop;

- if (stop && stop->se.on_rq)
+ if (stop && stop->cfs.on_rq)
return stop;

return NULL;
--
1.7.2.3

--
<<This happens because I choose it to happen!>> (Raistlin Majere)
----------------------------------------------------------------------
Dario Faggioli, ReTiS Lab, Scuola Superiore Sant'Anna, Pisa (Italy)

http://retis.sssup.it/people/faggioli -- dario.faggioli@xxxxxxxxxx

Attachment: signature.asc
Description: This is a digitally signed message part