[RFC][PATCH 14/14] sched: cleanup wake_idle

From: Peter Zijlstra
Date: Thu Sep 03 2009 - 09:25:22 EST


A more readable version, with a few differences:

- don't check against the root domain, but instead check
SD_LOAD_BALANCE

- don't re-iterate the cpus already iterated on the previous SD

- use rcu_read_lock() around the sd iteration

Signed-off-by: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx>
---
kernel/sched_fair.c | 45 +++++++++++++++++++++++++--------------------
1 file changed, 25 insertions(+), 20 deletions(-)

Index: linux-2.6/kernel/sched_fair.c
===================================================================
--- linux-2.6.orig/kernel/sched_fair.c
+++ linux-2.6/kernel/sched_fair.c
@@ -1053,15 +1053,10 @@ static void yield_task_fair(struct rq *r
* not idle and an idle cpu is available. The span of cpus to
* search starts with cpus closest then further out as needed,
* so we always favor a closer, idle cpu.
- * Domains may include CPUs that are not usable for migration,
- * hence we need to mask them out (rq->rd->online)
*
* Returns the CPU we should wake onto.
*/
#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
-
-#define cpu_rd_active(cpu, rq) cpumask_test_cpu(cpu, rq->rd->online)
-
/*
* At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu
* are idle and this is not a kernel thread and this task's affinity
@@ -1099,7 +1094,7 @@ static int wake_idle_power_save(int cpu,
static int wake_idle(int cpu, struct task_struct *p)
{
struct rq *task_rq = task_rq(p);
- struct sched_domain *sd;
+ struct sched_domain *sd, *child = NULL;
int i;

i = wake_idle_power_save(cpu, p);
@@ -1118,24 +1113,34 @@ static int wake_idle(int cpu, struct tas
if (idle_cpu(cpu) || cpu_rq(cpu)->cfs.nr_running > 1)
return cpu;

+ rcu_read_lock();
for_each_domain(cpu, sd) {
- if ((sd->flags & SD_WAKE_IDLE)
- || ((sd->flags & SD_WAKE_IDLE_FAR)
- && !task_hot(p, task_rq->clock, sd))) {
- for_each_cpu_and(i, sched_domain_span(sd),
- &p->cpus_allowed) {
- if (cpu_rd_active(i, task_rq) && idle_cpu(i)) {
- if (i != task_cpu(p)) {
- schedstat_inc(p,
- se.nr_wakeups_idle);
- }
- return i;
- }
- }
- } else {
+ if (!(sd->flags & SD_LOAD_BALANCE))
break;
+
+ if (!(sd->flags & SD_WAKE_IDLE) &&
+ (task_hot(p, task_rq->clock, sd) || !(sd->flags & SD_WAKE_IDLE_FAR)))
+ break;
+
+ for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
+ if (child && cpumask_test_cpu(i, sched_domain_span(child)))
+ continue;
+
+ if (!idle_cpu(i))
+ continue;
+
+ if (task_cpu(p) != i)
+ schedstat_inc(p, se.nr_wakeups_idle);
+
+ cpu = i;
+ goto unlock;
}
+
+ child = sd;
}
+unlock:
+ rcu_read_unlock();
+
return cpu;
}
#else /* !ARCH_HAS_SCHED_WAKE_IDLE*/

--

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/