[RFC PATCH 07/16] sched/fair: Fix forced idle sibling starvation corner case
From: Vineeth Remanan Pillai
Date: Tue Jun 30 2020 - 17:33:26 EST
From: vpillai <vpillai@xxxxxxxxxxxxxxxx>
If there is only one long running local task and the sibling is
forced idle, it might not get a chance to run until a schedule
event happens on any cpu in the core.
So we check for this condition during a tick to see if a sibling
is starved and then give it a chance to schedule.
Signed-off-by: Vineeth Remanan Pillai <vpillai@xxxxxxxxxxxxxxxx>
Signed-off-by: Julien Desfossez <jdesfossez@xxxxxxxxxxxxxxxx>
---
kernel/sched/fair.c | 39 +++++++++++++++++++++++++++++++++++++++
1 file changed, 39 insertions(+)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ae17507533a0..49fb93296e35 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10613,6 +10613,40 @@ static void rq_offline_fair(struct rq *rq)
#endif /* CONFIG_SMP */
+#ifdef CONFIG_SCHED_CORE
+static inline bool
+__entity_slice_used(struct sched_entity *se)
+{
+ return (se->sum_exec_runtime - se->prev_sum_exec_runtime) >
+ sched_slice(cfs_rq_of(se), se);
+}
+
+/*
+ * If runqueue has only one task which used up its slice and if the sibling
+ * is forced idle, then trigger schedule to give forced idle task a chance.
+ */
+static void resched_forceidle_sibling(struct rq *rq, struct sched_entity *se)
+{
+ int cpu = cpu_of(rq), sibling_cpu;
+
+ if (rq->cfs.nr_running > 1 || !__entity_slice_used(se))
+ return;
+
+ for_each_cpu(sibling_cpu, cpu_smt_mask(cpu)) {
+ struct rq *sibling_rq;
+ if (sibling_cpu == cpu)
+ continue;
+ if (cpu_is_offline(sibling_cpu))
+ continue;
+
+ sibling_rq = cpu_rq(sibling_cpu);
+ if (sibling_rq->core_forceidle) {
+ resched_curr(sibling_rq);
+ }
+ }
+}
+#endif
+
/*
* scheduler tick hitting a task of our scheduling class.
*
@@ -10636,6 +10670,11 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
update_misfit_status(curr, rq);
update_overutilized_status(task_rq(curr));
+
+#ifdef CONFIG_SCHED_CORE
+ if (sched_core_enabled(rq))
+ resched_forceidle_sibling(rq, &curr->se);
+#endif
}
/*
--
2.17.1