Allow update_sg_lb_stats() to retrieve the group stats cached in
sg_lb_stats_prop saved by another CPU performing load balancing around
the same time (same jiffy)
Current implementation without invalidation of cached stats have few
limitations namely that the stats reuse is limited to busy load
balancing since stats can only be updated once a jiffy. Newidle Balance
can happen frequently and concurrently on many CPUs which can result in
readers seeing inconsitent values for the propagated stats.
For this iteration, the focus is to reduce the time taken for busy load
balancing allowing the busy CPU to resume renning the task as quickly as
possible.
Signed-off-by: K Prateek Nayak <kprateek.nayak@xxxxxxx>
---
kernel/sched/fair.c | 83 +++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 81 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 60517a732c10..3b402f294f0b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10275,6 +10275,75 @@ sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
return check_cpu_capacity(rq, sd);
}
+static inline int can_retrieve_stats(struct sched_domain *sd, enum cpu_idle_type idle)
+{
+ /*
+ * Only under perioric load balancing can we ensure that no concurrent
+ * CPUs modifies the stats being propagated upwards since
+ * should_we_balance() can allow multiple concurrent newidle balance
+ * to progress and an idle -> busy transition for idle balance will
+ * require the stats to be recomputed since idleness metrics will
+ * change with migration.
+ */
+ if (idle)
+ return 0;
+
+ /*
+ * If individual groups are separate NUMA domains, migrations can cause
+ * preferred task statistics to change and will require recomputing of
+ * stats.
+ */
+ if (sd->child && (sd->child->flags & SD_NUMA))
+ return 0;
+
+ /*
+ * misfit_task_load requires recalculation on SD_ASYM_CPUCAPACITY
+ * domains. Skip caching stats for them.
+ */
+ if (sd->flags & SD_ASYM_CPUCAPACITY)
+ return 0;
+
+ /*
+ * TODO: For CPU_IDLE case, invalidate stats for an idle -> busy
+ * transition but for the time being, save some cycles during busy
+ * load balancing.
+ */
+ return 1;
+}
+
+static inline int retrieve_cached_stats(struct sched_group *group, struct sg_lb_stats *sg_stats)
+{
+ struct sched_domain_shared *sg_share = group->shared;
+ unsigned long current_jiffy = jiffies;
+ struct sg_lb_stats_prop *lb_prop;
+
+ if (!sg_share)
+ return 0;
+
+ lb_prop = (struct sg_lb_stats_prop *)sg_share->private;
+ if (!lb_prop)
+ return 0;
+
+ /* Stale stats */
+ if (READ_ONCE(lb_prop->last_update) != current_jiffy)
+ return 0;
+
+ /*
+ * Pairs against the update to sgs_prop->last_update to
+ * prevent readers from seeing an inconsistent value of
+ * the propagated stats from a concurrent update.
+ */
+ smp_rmb();
+ *sg_stats = lb_prop->sg_stats;
+
+ /*
+ * If stats were read in the same interval, it cannot
+ * read an inconsistent state since stats are only
+ * updated once per jiffy.
+ */
+ return time_before_eq(jiffies, current_jiffy);
+}
+
/**
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
* @env: The load balancing environment.
@@ -10292,10 +10361,19 @@ static inline void update_sg_lb_stats(struct lb_env *env,
int i, nr_running, local_group, sd_flags = env->sd->flags;
bool balancing_at_rd = !env->sd->parent;
- memset(sgs, 0, sizeof(*sgs));
-
local_group = group == sds->local;
+ /*
+ * If stats can be retrieved, we are doing a busy load balancing.
+ * Skip right ahead to group_classify() since group_asym_packing and
+ * group_smt_balance is not possible under busy load balancing.
+ */
+ if (can_retrieve_stats(env->sd, env->idle) &&
+ retrieve_cached_stats(group, sgs))
+ goto group_classify;
+
+ memset(sgs, 0, sizeof(*sgs));
+
for_each_cpu_and(i, sched_group_span(group), env->cpus) {
struct rq *rq = cpu_rq(i);
unsigned long load = cpu_load(rq);
@@ -10360,6 +10438,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
if (!local_group && smt_balance(env, sgs, group))
sgs->group_smt_balance = 1;
+group_classify:
sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs);
/* Computing avg_load makes sense only when group is overloaded */