[PATCH 15/30] sched: fix newidle smp group balancing

From: Peter Zijlstra
Date: Fri Jun 27 2008 - 08:00:01 EST


Re-compute the shares on newidle - so we can make a decision based on
recent data.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx>
---
kernel/sched.c | 13 +++++++++++++
1 file changed, 13 insertions(+)

Index: linux-2.6/kernel/sched.c
===================================================================
--- linux-2.6.orig/kernel/sched.c
+++ linux-2.6/kernel/sched.c
@@ -1579,6 +1579,13 @@ static void update_shares(struct sched_d
walk_tg_tree(tg_nop, tg_shares_up, 0, sd);
}

+static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
+{
+ spin_unlock(&rq->lock);
+ update_shares(sd);
+ spin_lock(&rq->lock);
+}
+
static void update_h_load(int cpu)
{
walk_tg_tree(tg_load_down, tg_nop, cpu, NULL);
@@ -1595,6 +1602,10 @@ static inline void update_shares(struct
{
}

+static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
+{
+}
+
#endif

#endif
@@ -3543,6 +3554,7 @@ load_balance_newidle(int this_cpu, struc

schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
redo:
+ update_shares_locked(this_rq, sd);
group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
&sd_idle, cpus, NULL);
if (!group) {
@@ -3586,6 +3598,7 @@ redo:
} else
sd->nr_balance_failed = 0;

+ update_shares_locked(this_rq, sd);
return ld_moved;

out_balanced:

--

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/