[PATCH 3/3] sched/fair: Consider uclamp for "task fits capacity" checks

From: Valentin Schneider
Date: Wed Nov 20 2019 - 12:57:42 EST


task_fits_capacity() drives CPU selection at wakeup time, and is also used
to detect misfit tasks. Right now it does so by comparing task_util_est()
with a CPU's capacity, but doesn't take into account uclamp restrictions.

There's a few interesting uses that can come out of doing this. For
instance, a low uclamp.max value could prevent certain tasks from being
flagged as misfit tasks, so they could merrily remain on low-capacity CPUs.
Similarly, a high uclamp.min value would steer tasks towards high capacity
CPU at wakeup (and, should that fail, later steered via misfit balancing),
so such "boosted" tasks would favor CPUs of higher capacity.

Introduce uclamp_task_util() and make task_fits_capacity() use it. To keep
things consistent between overutilized wakeup CPU selection (wake_cap())
and !overutilized wakeup CPU selection (find_energy_efficient_cpu()),
add a task_fits_capacity() check to find_energy_efficient_cpu().

Signed-off-by: Valentin Schneider <valentin.schneider@xxxxxxx>
---
kernel/sched/fair.c | 11 ++++++++++-
kernel/sched/sched.h | 13 +++++++++++++
2 files changed, 23 insertions(+), 1 deletion(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 08a233e97a01..446409252b23 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3822,7 +3822,7 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)

static inline int task_fits_capacity(struct task_struct *p, long capacity)
{
- return fits_capacity(task_util_est(p), capacity);
+ return fits_capacity(uclamp_task_util(p, task_util_est(p)), capacity);
}

static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
@@ -6274,6 +6274,15 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
if (!fits_capacity(util, cpu_cap))
continue;

+ /*
+ * Skip CPUs that don't satisfy uclamp requests. Note
+ * that the above already ensures the CPU has enough
+ * spare capacity for the task; this is only really for
+ * uclamp restrictions.
+ */
+ if (!task_fits_capacity(p, capacity_orig_of(cpu)))
+ continue;
+
/* Always use prev_cpu as a candidate. */
if (cpu == prev_cpu) {
prev_delta = compute_energy(p, prev_cpu, pd);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 900328c4eeef..74bf08ec1a01 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2329,6 +2329,14 @@ static inline unsigned long uclamp_rq_util(struct rq *rq, unsigned long util)
{
return uclamp_rq_util_with(rq, util, NULL);
}
+
+static inline
+unsigned long uclamp_task_util(struct task_struct *p, unsigned long util)
+{
+ return clamp(util,
+ (unsigned long)uclamp_eff_value(p, UCLAMP_MIN),
+ (unsigned long)uclamp_eff_value(p, UCLAMP_MAX));
+}
#else /* CONFIG_UCLAMP_TASK */
static inline unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
struct task_struct *p)
@@ -2339,6 +2347,11 @@ static inline unsigned long uclamp_rq_util(struct rq *rq, unsigned long util)
{
return util;
}
+static inline
+unsigned long uclamp_task_util(struct task_struct *p, unsigned long util)
+{
+ return util;
+}
#endif /* CONFIG_UCLAMP_TASK */

#ifdef arch_scale_freq_capacity
--
2.22.0