[PATCH v5 11/14] sched/fair: Introduce an energy estimation helper function
From: Quentin Perret
Date: Tue Jul 24 2018 - 08:26:24 EST
In preparation for the definition of an energy-aware wakeup path,
introduce a helper function to estimate the consequence on system energy
when a specific task wakes-up on a specific CPU. compute_energy()
estimates the capacity state to be reached by all frequency domains and
estimates the consumption of each online CPU according to its Energy
Model and its percentage of busy time.
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Signed-off-by: Quentin Perret <quentin.perret@xxxxxxx>
---
kernel/sched/fair.c | 77 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 77 insertions(+)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4aaa9132e840..dce2b1160cf4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6292,6 +6292,83 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
return min_cap * 1024 < task_util(p) * capacity_margin;
}
+/*
+ * Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued)
+ * to @dst_cpu.
+ */
+static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu)
+{
+ struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
+ unsigned long util_est, util = READ_ONCE(cfs_rq->avg.util_avg);
+
+ /*
+ * If @p migrates from @cpu to another, remove its contribution. Or,
+ * if @p migrates from another CPU to @cpu, add its contribution. In
+ * the other cases, @cpu is not impacted by the migration, so the
+ * util_avg should already be correct.
+ */
+ if (task_cpu(p) == cpu && dst_cpu != cpu)
+ util = max_t(long, util - task_util(p), 0);
+ else if (task_cpu(p) != cpu && dst_cpu == cpu)
+ util += task_util(p);
+
+ if (sched_feat(UTIL_EST)) {
+ util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued);
+
+ /*
+ * During wake-up, the task isn't enqueued yet and doesn't
+ * appear in the cfs_rq->avg.util_est.enqueued of any rq,
+ * so just add it (if needed) to "simulate" what will be
+ * cpu_util() after the task has been enqueued.
+ */
+ if (dst_cpu == cpu)
+ util_est += _task_util_est(p);
+
+ util = max(util, util_est);
+ }
+
+ return min_t(unsigned long, util, capacity_orig_of(cpu));
+}
+
+/*
+ * compute_energy(): Estimates the energy that would be consumed if @p was
+ * migrated to @dst_cpu. compute_energy() predicts what will be the utilization
+ * landscape of the * CPUs after the task migration, and uses the Energy Model
+ * to compute what would be the energy if we decided to actually migrate that
+ * task.
+ */
+static long compute_energy(struct task_struct *p, int dst_cpu,
+ struct freq_domain *fd)
+{
+ long util, max_util, sum_util, energy = 0;
+ int cpu;
+
+ while (fd) {
+ max_util = sum_util = 0;
+ /*
+ * The frequency of CPUs of the current rd can be driven by
+ * CPUs of another rd if they belong to the same frequency
+ * domain. So, account for the utilization of these CPUs too
+ * by masking fd with cpu_online_mask instead of the rd span.
+ *
+ * If an entire frequency domain is outide of the current rd,
+ * it will not appear in its fd list and will not be accounted
+ * by compute_energy().
+ */
+ for_each_cpu_and(cpu, freq_domain_span(fd), cpu_online_mask) {
+ util = cpu_util_next(cpu, p, dst_cpu);
+ util = schedutil_freq_util(cpu, util, energy_util);
+ max_util = max(util, max_util);
+ sum_util += util;
+ }
+
+ energy += em_fd_energy(fd->obj, max_util, sum_util);
+ fd = fd->next;
+ }
+
+ return energy;
+}
+
/*
* select_task_rq_fair: Select target runqueue for the waking task in domains
* that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
--
2.18.0