[PATCH 1/3] sched: limit "all_pinned" balance attempts to sd->span

From: Gregory Haskins
Date: Tue Jun 24 2008 - 10:16:59 EST


Load-balancing will never terminate if all cpus are "all_pinned" with
the current algorithm. This would be pretty unlikely to occur, but we
should protect against it nonetheless.

Signed-off-by: Gregory Haskins <ghaskins@xxxxxxxxxx>
---

kernel/sched.c | 19 ++++++++++---------
1 files changed, 10 insertions(+), 9 deletions(-)

diff --git a/kernel/sched.c b/kernel/sched.c
index 31f91d9..54b27b4 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3178,7 +3178,8 @@ static int load_balance(int this_cpu, struct rq *this_rq,
struct sched_group *group;
unsigned long imbalance;
struct rq *busiest;
- cpumask_t cpus = CPU_MASK_ALL;
+ cpumask_t cpus = sd->span;
+ int remain = cpus_weight(sd->span) - 1;
unsigned long flags;

/*
@@ -3237,11 +3238,10 @@ redo:
resched_cpu(this_cpu);

/* All tasks on this runqueue were pinned by CPU affinity */
- if (unlikely(all_pinned)) {
+ if (unlikely(all_pinned && remain)) {
cpu_clear(cpu_of(busiest), cpus);
- if (!cpus_empty(cpus))
- goto redo;
- goto out_balanced;
+ remain--;
+ goto redo;
}
}

@@ -3332,7 +3332,8 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
int ld_moved = 0;
int sd_idle = 0;
int all_pinned = 0;
- cpumask_t cpus = CPU_MASK_ALL;
+ cpumask_t cpus = sd->span;
+ int remain = cpus_weight(sd->span) - 1;

/*
* When power savings policy is enabled for the parent domain, idle
@@ -3375,10 +3376,10 @@ redo:
&all_pinned);
spin_unlock(&busiest->lock);

- if (unlikely(all_pinned)) {
+ if (unlikely(all_pinned && remain)) {
cpu_clear(cpu_of(busiest), cpus);
- if (!cpus_empty(cpus))
- goto redo;
+ remain--;
+ goto redo;
}
}


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/