[RFC/PATCH 05/17] sched: rt-group: optimize dequeue_rt_stack

From: Peter Zijlstra
Date: Sun Mar 09 2008 - 13:12:37 EST


Now that the group hierarchy can have an arbitrary depth the O(n^2) nature
of RT task dequeues will really hurt. Optimize this by providing space to
store the tree path, so we can walk it the other way.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx>
---
include/linux/sched.h | 1 +
kernel/sched_rt.c | 28 ++++++++++++----------------
2 files changed, 13 insertions(+), 16 deletions(-)

Index: linux-2.6-2/kernel/sched_rt.c
===================================================================
--- linux-2.6-2.orig/kernel/sched_rt.c
+++ linux-2.6-2/kernel/sched_rt.c
@@ -479,26 +479,22 @@ static void dequeue_rt_entity(struct sch
/*
* Because the prio of an upper entry depends on the lower
* entries, we must remove entries top - down.
- *
- * XXX: O(1/2 h^2) because we can only walk up, not down the chain.
*/
static void dequeue_rt_stack(struct task_struct *p)
{
- struct sched_rt_entity *rt_se, *top_se;
+ struct sched_rt_entity *rt_se, *back = NULL;

- /*
- * dequeue all, top - down.
- */
- do {
- rt_se = &p->rt;
- top_se = NULL;
- for_each_sched_rt_entity(rt_se) {
- if (on_rt_rq(rt_se))
- top_se = rt_se;
- }
- if (top_se)
- dequeue_rt_entity(top_se);
- } while (top_se);
+ rt_se = &p->rt;
+ for_each_sched_rt_entity(rt_se) {
+ if (!on_rt_rq(rt_se))
+ break;
+
+ rt_se->back = back;
+ back = rt_se;
+ }
+
+ for (rt_se = back; rt_se; rt_se = rt_se->back)
+ dequeue_rt_entity(rt_se);
}

/*
Index: linux-2.6-2/include/linux/sched.h
===================================================================
--- linux-2.6-2.orig/include/linux/sched.h
+++ linux-2.6-2/include/linux/sched.h
@@ -978,6 +978,7 @@ struct sched_rt_entity {
unsigned long timeout;
int nr_cpus_allowed;

+ struct sched_rt_entity *back;
#ifdef CONFIG_RT_GROUP_SCHED
struct sched_rt_entity *parent;
/* rq on which this entity is (to be) queued: */

--

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/