[PATCH v5 3/6] sched: Add per-cpu min capacity to sched_group_capacity

From: Morten Rasmussen
Date: Fri Oct 14 2016 - 09:43:15 EST


struct sched_group_capacity currently represents the compute capacity
sum of all cpus in the sched_group. Unless it is divided by the
group_weight to get the average capacity per cpu it hides differences in
cpu capacity for mixed capacity systems (e.g. high RT/IRQ utilization or
ARM big.LITTLE). But even the average may not be sufficient if the group
covers cpus of different capacities. Instead, by extending struct
sched_group_capacity to indicate min per-cpu capacity in the group a
suitable group for a given task utilization can more easily be found
such that cpus with reduced capacity can be avoided for tasks with high
utilization (not implemented by this patch).

cc: Ingo Molnar <mingo@xxxxxxxxxx>
cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>

Signed-off-by: Morten Rasmussen <morten.rasmussen@xxxxxxx>
---
kernel/sched/core.c | 3 ++-
kernel/sched/fair.c | 17 ++++++++++++-----
kernel/sched/sched.h | 3 ++-
3 files changed, 16 insertions(+), 7 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index aae08cedd75e..50526a20ab94 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5703,7 +5703,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
printk(KERN_CONT " %*pbl",
cpumask_pr_args(sched_group_cpus(group)));
if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
- printk(KERN_CONT " (cpu_capacity = %d)",
+ printk(KERN_CONT " (cpu_capacity = %lu)",
group->sgc->capacity);
}

@@ -6180,6 +6180,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
* die on a /0 trap.
*/
sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
+ sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;

/*
* Make sure the first group of this domain contains the
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index cf84e37b3b4a..28e42cb41d7b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6905,13 +6905,14 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)

cpu_rq(cpu)->cpu_capacity = capacity;
sdg->sgc->capacity = capacity;
+ sdg->sgc->min_capacity = capacity;
}

void update_group_capacity(struct sched_domain *sd, int cpu)
{
struct sched_domain *child = sd->child;
struct sched_group *group, *sdg = sd->groups;
- unsigned long capacity;
+ unsigned long capacity, min_capacity;
unsigned long interval;

interval = msecs_to_jiffies(sd->balance_interval);
@@ -6924,6 +6925,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
}

capacity = 0;
+ min_capacity = ULONG_MAX;

if (child->flags & SD_OVERLAP) {
/*
@@ -6948,11 +6950,12 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
*/
if (unlikely(!rq->sd)) {
capacity += capacity_of(cpu);
- continue;
+ } else {
+ sgc = rq->sd->groups->sgc;
+ capacity += sgc->capacity;
}

- sgc = rq->sd->groups->sgc;
- capacity += sgc->capacity;
+ min_capacity = min(capacity, min_capacity);
}
} else {
/*
@@ -6962,12 +6965,16 @@ void update_group_capacity(struct sched_domain *sd, int cpu)

group = child->groups;
do {
- capacity += group->sgc->capacity;
+ struct sched_group_capacity *sgc = group->sgc;
+
+ capacity += sgc->capacity;
+ min_capacity = min(sgc->min_capacity, min_capacity);
group = group->next;
} while (group != child->groups);
}

sdg->sgc->capacity = capacity;
+ sdg->sgc->min_capacity = min_capacity;
}

/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 19b99869809d..b3b3ecbbb494 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -892,7 +892,8 @@ struct sched_group_capacity {
* CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
* for a single CPU.
*/
- unsigned int capacity;
+ unsigned long capacity;
+ unsigned long min_capacity; /* Min per-cpu capacity in group */
unsigned long next_update;
int imbalance; /* XXX unrelated to capacity but shared group state */

--
2.7.4