[PATCH 2/3] sched: sched_rt_entity

From: Peter Zijlstra
Date: Tue Dec 18 2007 - 13:21:11 EST


Move the task_struct members specific to rt scheduling together.
A future optimization could be to put sched_entity and sched_rt_entity
into a union.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx>
CC: Srivatsa Vaddagiri <vatsa@xxxxxxxxxxxxxxxxxx>
---
include/linux/init_task.h | 5 +++--
include/linux/sched.h | 8 ++++++--
kernel/sched.c | 2 +-
kernel/sched_rt.c | 20 ++++++++++----------
mm/oom_kill.c | 2 +-
5 files changed, 21 insertions(+), 16 deletions(-)

Index: linux-2.6/include/linux/init_task.h
===================================================================
--- linux-2.6.orig/include/linux/init_task.h
+++ linux-2.6/include/linux/init_task.h
@@ -133,9 +133,10 @@ extern struct group_info init_groups;
.nr_cpus_allowed = NR_CPUS, \
.mm = NULL, \
.active_mm = &init_mm, \
- .run_list = LIST_HEAD_INIT(tsk.run_list), \
+ .rt = { \
+ .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
+ .time_slice = HZ, }, \
.ioprio = 0, \
- .time_slice = HZ, \
.tasks = LIST_HEAD_INIT(tsk.tasks), \
.ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \
.ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \
Index: linux-2.6/include/linux/sched.h
===================================================================
--- linux-2.6.orig/include/linux/sched.h
+++ linux-2.6/include/linux/sched.h
@@ -934,6 +934,11 @@ struct sched_entity {
#endif
};

+struct sched_rt_entity {
+ struct list_head run_list;
+ unsigned int time_slice;
+};
+
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
void *stack;
@@ -950,9 +955,9 @@ struct task_struct {
#endif

int prio, static_prio, normal_prio;
- struct list_head run_list;
const struct sched_class *sched_class;
struct sched_entity se;
+ struct sched_rt_entity rt;

#ifdef CONFIG_PREEMPT_NOTIFIERS
/* list of struct preempt_notifier: */
@@ -977,7 +982,6 @@ struct task_struct {
unsigned int policy;
cpumask_t cpus_allowed;
int nr_cpus_allowed;
- unsigned int time_slice;

#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
Index: linux-2.6/kernel/sched_rt.c
===================================================================
--- linux-2.6.orig/kernel/sched_rt.c
+++ linux-2.6/kernel/sched_rt.c
@@ -111,7 +111,7 @@ static void enqueue_task_rt(struct rq *r
{
struct rt_prio_array *array = &rq->rt.active;

- list_add_tail(&p->run_list, array->queue + p->prio);
+ list_add_tail(&p->rt.run_list, array->queue + p->prio);
__set_bit(p->prio, array->bitmap);
inc_cpu_load(rq, p->se.load.weight);

@@ -127,7 +127,7 @@ static void dequeue_task_rt(struct rq *r

update_curr_rt(rq);

- list_del(&p->run_list);
+ list_del(&p->rt.run_list);
if (list_empty(array->queue + p->prio))
__clear_bit(p->prio, array->bitmap);
dec_cpu_load(rq, p->se.load.weight);
@@ -143,7 +143,7 @@ static void requeue_task_rt(struct rq *r
{
struct rt_prio_array *array = &rq->rt.active;

- list_move_tail(&p->run_list, array->queue + p->prio);
+ list_move_tail(&p->rt.run_list, array->queue + p->prio);
}

static void
@@ -212,7 +212,7 @@ static struct task_struct *pick_next_tas
return NULL;

queue = array->queue + idx;
- next = list_entry(queue->next, struct task_struct, run_list);
+ next = list_entry(queue->next, struct task_struct, rt.run_list);

next->se.exec_start = rq->clock;

@@ -261,14 +261,14 @@ static struct task_struct *pick_next_hig
queue = array->queue + idx;
BUG_ON(list_empty(queue));

- next = list_entry(queue->next, struct task_struct, run_list);
+ next = list_entry(queue->next, struct task_struct, rt.run_list);
if (unlikely(pick_rt_task(rq, next, cpu)))
goto out;

if (queue->next->next != queue) {
/* same prio task */
next = list_entry(queue->next->next, struct task_struct,
- run_list);
+ rt.run_list);
if (pick_rt_task(rq, next, cpu))
goto out;
}
@@ -282,7 +282,7 @@ static struct task_struct *pick_next_hig
queue = array->queue + idx;
BUG_ON(list_empty(queue));

- list_for_each_entry(next, queue, run_list) {
+ list_for_each_entry(next, queue, rt.run_list) {
if (pick_rt_task(rq, next, cpu))
goto out;
}
@@ -846,16 +846,16 @@ static void task_tick_rt(struct rq *rq,
if (p->policy != SCHED_RR)
return;

- if (--p->time_slice)
+ if (--p->rt.time_slice)
return;

- p->time_slice = DEF_TIMESLICE;
+ p->rt.time_slice = DEF_TIMESLICE;

/*
* Requeue to the end of queue if we are not the only element
* on the queue:
*/
- if (p->run_list.prev != p->run_list.next) {
+ if (p->rt.run_list.prev != p->rt.run_list.next) {
requeue_task_rt(rq, p);
set_tsk_need_resched(p);
}
Index: linux-2.6/mm/oom_kill.c
===================================================================
--- linux-2.6.orig/mm/oom_kill.c
+++ linux-2.6/mm/oom_kill.c
@@ -286,7 +286,7 @@ static void __oom_kill_task(struct task_
* all the memory it needs. That way it should be able to
* exit() and clear out its resources quickly...
*/
- p->time_slice = HZ;
+ p->rt.time_slice = HZ;
set_tsk_thread_flag(p, TIF_MEMDIE);

force_sig(SIGKILL, p);
Index: linux-2.6/kernel/sched.c
===================================================================
--- linux-2.6.orig/kernel/sched.c
+++ linux-2.6/kernel/sched.c
@@ -1685,7 +1685,7 @@ static void __sched_fork(struct task_str
p->se.wait_max = 0;
#endif

- INIT_LIST_HEAD(&p->run_list);
+ INIT_LIST_HEAD(&p->rt.run_list);
p->se.on_rq = 0;

#ifdef CONFIG_PREEMPT_NOTIFIERS

--

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/