[PATCH v2 5/7] sched/fair: Employ capacity_greater() throughout load_balance()
From: Valentin Schneider
Date: Fri Feb 19 2021 - 08:05:08 EST
While at it, replace group_smaller_{min, max}_cpu_capacity() with
comparisons of the source group's min/max capacity and the destination
CPU's capacity.
Reviewed-by: Qais Yousef <qais.yousef@xxxxxxx>
Signed-off-by: Valentin Schneider <valentin.schneider@xxxxxxx>
---
kernel/sched/fair.c | 33 ++++-----------------------------
1 file changed, 4 insertions(+), 29 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index cc16d0e0b9fb..af5ce083c982 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8320,26 +8320,6 @@ group_is_overloaded(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
return false;
}
-/*
- * group_smaller_min_cpu_capacity: Returns true if sched_group sg has smaller
- * per-CPU capacity than sched_group ref.
- */
-static inline bool
-group_smaller_min_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
-{
- return fits_capacity(sg->sgc->min_capacity, ref->sgc->min_capacity);
-}
-
-/*
- * group_smaller_max_cpu_capacity: Returns true if sched_group sg has smaller
- * per-CPU capacity_orig than sched_group ref.
- */
-static inline bool
-group_smaller_max_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
-{
- return fits_capacity(sg->sgc->max_capacity, ref->sgc->max_capacity);
-}
-
static inline enum
group_type group_classify(unsigned int imbalance_pct,
struct sched_group *group,
@@ -8491,15 +8471,10 @@ static bool update_sd_pick_busiest(struct lb_env *env,
if (!sgs->sum_h_nr_running)
return false;
- /*
- * Don't try to pull misfit tasks we can't help.
- * We can use max_capacity here as reduction in capacity on some
- * CPUs in the group should either be possible to resolve
- * internally or be covered by avg_load imbalance (eventually).
- */
+ /* Don't try to pull misfit tasks we can't help */
if (static_branch_unlikely(&sched_asym_cpucapacity) &&
sgs->group_type == group_misfit_task &&
- (!group_smaller_max_cpu_capacity(sg, sds->local) ||
+ (!capacity_greater(capacity_of(env->dst_cpu), sg->sgc->max_capacity) ||
sds->local_stat.group_type != group_has_spare))
return false;
@@ -8583,7 +8558,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
*/
if (sd_has_asym_cpucapacity(env->sd) &&
(sgs->group_type <= group_fully_busy) &&
- (group_smaller_min_cpu_capacity(sds->local, sg)))
+ (capacity_greater(sg->sgc->min_capacity, capacity_of(env->dst_cpu))))
return false;
return true;
@@ -9396,7 +9371,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
* average load.
*/
if (sd_has_asym_cpucapacity(env->sd) &&
- capacity_of(env->dst_cpu) < capacity &&
+ !capacity_greater(capacity_of(env->dst_cpu), capacity) &&
nr_running == 1)
continue;
--
2.27.0