[PATCH 1/5] sched/fair: ignore SIS_UTIL when has idle core

From: Abel Wu
Date: Tue Jul 12 2022 - 04:22:20 EST


When SIS_UTIL is enabled, SIS domain scan will be skipped if
the LLC is overloaded. Since the overloaded status is checked
in the load balancing at LLC level, the interval is llc_size
miliseconds. The duration might be long enough to affect the
overall system throughput if idle cores are out of reach in
SIS domain scan.

Signed-off-by: Abel Wu <wuyun.abel@xxxxxxxxxxxxx>
---
kernel/sched/fair.c | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a78d2e3b9d49..cd758b3616bd 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6392,16 +6392,19 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool
struct sched_domain *this_sd;
u64 time = 0;

- this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
- if (!this_sd)
- return -1;
-
cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);

- if (sched_feat(SIS_PROP) && !has_idle_core) {
+ if (has_idle_core)
+ goto scan;
+
+ if (sched_feat(SIS_PROP)) {
u64 avg_cost, avg_idle, span_avg;
unsigned long now = jiffies;

+ this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
+ if (!this_sd)
+ return -1;
+
/*
* If we're busy, the assumption that the last idle period
* predicts the future is flawed; age away the remaining
@@ -6436,7 +6439,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool
return -1;
}
}
-
+scan:
for_each_cpu_wrap(cpu, cpus, target + 1) {
if (has_idle_core) {
i = select_idle_core(p, cpu, cpus, &idle_cpu);
--
2.31.1