[PATCH v3] sched/fair: Preempt if the current process is ineligible

From: Chunxin Zang
Date: Thu Jun 13 2024 - 09:17:18 EST


I found that some tasks have been running for a long enough time and
have become illegal, but they are still not releasing the CPU. This
will increase the scheduling delay of other processes. Therefore, we
can check the ineligible of the current process in update_curr, and if
it is ineligible, trigger preemption. This modification only takes effect
when RUN_TO_PARITY is disabled, so as not to break the original
intention of RUN_TO_PARITY.

I have pasted some test results below.
I isolated four cores for testing and ran hackbench in the background,
and observed the test results of cyclictest.

hackbench -g 4 -l 100000000 &
cyclictest --mlockall -D 5m -q

EEVDF EEVDF-NO_PARITY PATCH-NO_PARITY

# Min Latencies: 00006 00006 00006
LNICE(-19) # Avg Latencies: 00191 00089 00065
# Max Latencies: 15442 14133 11893

# Min Latencies: 00006 00006 00005
LNICE(0) # Avg Latencies: 00466 00289 00245
# Max Latencies: 38917 32665 20238

# Min Latencies: 00019 00010 00008
LNICE(19) # Avg Latencies: 37151 18293 23177
# Max Latencies: 2688299 426196 1953298

Signed-off-by: Chunxin Zang <zangchunxin@xxxxxxxxxxx>
Reviewed-by: Chen Yang <yangchen11@xxxxxxxxxxx>

------
Changes in v3:
- Place the evaluation of the current process's ineligible in
update_curr.
- Update the commit message

Changes in v2:
- Make the logic that determines the current process as ineligible and
triggers preemption effective only when NO_RUN_TO_PARITY is enabled.
- Update the commit message
---
kernel/sched/fair.c | 28 +++++++++++++++++++++-------
1 file changed, 21 insertions(+), 7 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 03be0d1330a6..21ef610ddb14 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -745,6 +745,15 @@ int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
return vruntime_eligible(cfs_rq, se->vruntime);
}

+static bool check_entity_need_preempt(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ if (sched_feat(RUN_TO_PARITY) || cfs_rq->nr_running <= 1 ||
+ entity_eligible(cfs_rq, se))
+ return false;
+
+ return true;
+}
+
static u64 __update_min_vruntime(struct cfs_rq *cfs_rq, u64 vruntime)
{
u64 min_vruntime = cfs_rq->min_vruntime;
@@ -974,11 +983,13 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
/*
* XXX: strictly: vd_i += N*r_i/w_i such that: vd_i > ve_i
* this is probably good enough.
+ *
+ * return true if se need preempt
*/
-static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static bool update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
if ((s64)(se->vruntime - se->deadline) < 0)
- return;
+ return false;

/*
* For EEVDF the virtual time slope is determined by w_i (iow.
@@ -995,10 +1006,7 @@ static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
/*
* The task has consumed its request, reschedule.
*/
- if (cfs_rq->nr_running > 1) {
- resched_curr(rq_of(cfs_rq));
- clear_buddies(cfs_rq, se);
- }
+ return true;
}

#include "pelt.h"
@@ -1157,6 +1165,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq->curr;
s64 delta_exec;
+ bool need_preempt;

if (unlikely(!curr))
return;
@@ -1166,12 +1175,17 @@ static void update_curr(struct cfs_rq *cfs_rq)
return;

curr->vruntime += calc_delta_fair(delta_exec, curr);
- update_deadline(cfs_rq, curr);
+ need_preempt = update_deadline(cfs_rq, curr);
update_min_vruntime(cfs_rq);

if (entity_is_task(curr))
update_curr_task(task_of(curr), delta_exec);

+ if (need_preempt || check_entity_need_preempt(cfs_rq, curr)) {
+ resched_curr(rq_of(cfs_rq));
+ clear_buddies(cfs_rq, curr);
+ }
+
account_cfs_rq_runtime(cfs_rq, delta_exec);
}

--
2.34.1