[tip: sched/core] sched/numa: Use runnable_avg to classify node
From: tip-bot2 for Vincent Guittot
Date: Tue Sep 29 2020 - 03:57:18 EST
The following commit has been merged into the sched/core branch of tip:
Commit-ID: 8e0e0eda6a13e242239799263cc354796c05434a
Gitweb: https://git.kernel.org/tip/8e0e0eda6a13e242239799263cc354796c05434a
Author: Vincent Guittot <vincent.guittot@xxxxxxxxxx>
AuthorDate: Mon, 21 Sep 2020 09:29:59 +02:00
Committer: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
CommitterDate: Fri, 25 Sep 2020 14:23:24 +02:00
sched/numa: Use runnable_avg to classify node
Use runnable_avg to classify numa node state similarly to what is done for
normal load balancer. This helps to ensure that numa and normal balancers
use the same view of the state of the system.
Large arm64system: 2 nodes / 224 CPUs:
hackbench -l (256000/#grp) -g #grp
grp tip/sched/core +patchset improvement
1 14,008(+/- 4,99 %) 13,800(+/- 3.88 %) 1,48 %
4 4,340(+/- 5.35 %) 4.283(+/- 4.85 %) 1,33 %
16 3,357(+/- 0.55 %) 3.359(+/- 0.54 %) -0,06 %
32 3,050(+/- 0.94 %) 3.039(+/- 1,06 %) 0,38 %
64 2.968(+/- 1,85 %) 3.006(+/- 2.92 %) -1.27 %
128 3,290(+/-12.61 %) 3,108(+/- 5.97 %) 5.51 %
256 3.235(+/- 3.95 %) 3,188(+/- 2.83 %) 1.45 %
Signed-off-by: Vincent Guittot <vincent.guittot@xxxxxxxxxx>
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Reviewed-by: Mel Gorman <mgorman@xxxxxxx>
Link: https://lkml.kernel.org/r/20200921072959.16317-1-vincent.guittot@xxxxxxxxxx
---
kernel/sched/fair.c | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 33699db..a15deb2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1504,6 +1504,7 @@ enum numa_type {
/* Cached statistics for all CPUs within a node */
struct numa_stats {
unsigned long load;
+ unsigned long runnable;
unsigned long util;
/* Total compute capacity of CPUs on a node */
unsigned long compute_capacity;
@@ -1547,6 +1548,7 @@ struct task_numa_env {
};
static unsigned long cpu_load(struct rq *rq);
+static unsigned long cpu_runnable(struct rq *rq);
static unsigned long cpu_util(int cpu);
static inline long adjust_numa_imbalance(int imbalance, int src_nr_running);
@@ -1555,11 +1557,13 @@ numa_type numa_classify(unsigned int imbalance_pct,
struct numa_stats *ns)
{
if ((ns->nr_running > ns->weight) &&
- ((ns->compute_capacity * 100) < (ns->util * imbalance_pct)))
+ (((ns->compute_capacity * 100) < (ns->util * imbalance_pct)) ||
+ ((ns->compute_capacity * imbalance_pct) < (ns->runnable * 100))))
return node_overloaded;
if ((ns->nr_running < ns->weight) ||
- ((ns->compute_capacity * 100) > (ns->util * imbalance_pct)))
+ (((ns->compute_capacity * 100) > (ns->util * imbalance_pct)) &&
+ ((ns->compute_capacity * imbalance_pct) > (ns->runnable * 100))))
return node_has_spare;
return node_fully_busy;
@@ -1610,6 +1614,7 @@ static void update_numa_stats(struct task_numa_env *env,
struct rq *rq = cpu_rq(cpu);
ns->load += cpu_load(rq);
+ ns->runnable += cpu_runnable(rq);
ns->util += cpu_util(cpu);
ns->nr_running += rq->cfs.h_nr_running;
ns->compute_capacity += capacity_of(cpu);