[PATCH v1 10/19] sched: update wake_affine path to use u64, s64 for weights
From: Nikhil Rao
Date: Sun May 01 2011 - 21:21:50 EST
Update the s64/u64 math in wake_affine() and effective_load() to handle
increased resolution.
Signed-off-by: Nikhil Rao <ncrao@xxxxxxxxxx>
---
kernel/sched_fair.c | 8 ++++----
1 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 49e1eeb..1e011b1 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1388,7 +1388,7 @@ static void task_waking_fair(struct rq *rq, struct task_struct *p)
* of group shares between cpus. Assuming the shares were perfectly aligned one
* can calculate the shift in shares.
*/
-static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
+static s64 effective_load(struct task_group *tg, int cpu, s64 wl, s64 wg)
{
struct sched_entity *se = tg->se[cpu];
@@ -1396,7 +1396,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
return wl;
for_each_sched_entity(se) {
- long lw, w;
+ s64 lw, w;
tg = se->my_q->tg;
w = se->my_q->load.weight;
@@ -1409,7 +1409,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
wl += w;
if (lw > 0 && wl < lw)
- wl = (wl * tg->shares) / lw;
+ wl = div64_s64(wl * tg->shares, lw);
else
wl = tg->shares;
@@ -1504,7 +1504,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
if (balanced ||
(this_load <= load &&
- this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
+ this_load + target_load(prev_cpu, idx) <= (u64)tl_per_task)) {
/*
* This domain has SD_WAKE_AFFINE and
* p is cache cold in this domain, and
--
1.7.3.1
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/