On Wed, Feb 04, 2015 at 06:31:19PM +0000, Morten Rasmussen wrote:
+++ b/kernel/sched/fair.c
@@ -7216,6 +7216,37 @@ static struct rq *find_busiest_queue(struct lb_env *env,
unsigned long busiest_load = 0, busiest_capacity = 1;
int i;
+ if (env->use_ea) {
+ struct rq *costliest = NULL;
+ unsigned long costliest_usage = 1024, costliest_energy = 1;
+
+ for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
+ unsigned long usage = get_cpu_usage(i);
+ struct rq *rq = cpu_rq(i);
+ struct sched_domain *sd = rcu_dereference(rq->sd);
+ struct energy_env eenv = {
+ .sg_top = sd->groups,
+ .usage_delta = 0,
+ .src_cpu = -1,
+ .dst_cpu = -1,
+ };
+ unsigned long energy = sched_group_energy(&eenv);
+
+ /*
+ * We're looking for the minimal cpu efficiency
+ * min(u_i / e_i), crosswise multiplication leads to
+ * u_i * e_j < u_j * e_i with j as previous minimum.
+ */
+ if (usage * costliest_energy < costliest_usage * energy) {
+ costliest_usage = usage;
+ costliest_energy = energy;
+ costliest = rq;
+ }
+ }
+
+ return costliest;
+ }
+
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
unsigned long capacity, wl;
enum fbq_type rt;
So I've thought about parametrizing the whole load balance thing to
avoid things like this.
Irrespective of whether we balance on pure load or another metric we
have the same structure, only different units plugged in.
I've not really spend too much time on it to see what it would look
like, but I think it would be a good avenue to investigate to avoid
patches like this.