[PATCH v2 22/35] sched: default preemption policy for PREEMPT_AUTO
From: Ankur Arora
Date: Mon May 27 2024 - 20:40:06 EST
Add resched_opt_translate() which determines the particular
need-resched flag based on scheduling policy.
Preemption models other than PREEMPT_AUTO: continue to use
tif_resched(RESCHED_NOW).
PREEMPT_AUTO: use tif_resched(RESCHED_LAZY) to reschedule at
the next exit-to-user.
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Juri Lelli <juri.lelli@xxxxxxxxxx>
Cc: Vincent Guittot <vincent.guittot@xxxxxxxxxx>
Originally-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Link: https://lore.kernel.org/lkml/87jzshhexi.ffs@tglx/
Signed-off-by: Ankur Arora <ankur.a.arora@xxxxxxxxxx>
---
kernel/sched/core.c | 30 ++++++++++++++++++++++++------
kernel/sched/sched.h | 12 +++++++++++-
2 files changed, 35 insertions(+), 7 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 27b908cc9134..ee846dc9133b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1032,20 +1032,38 @@ void wake_up_q(struct wake_q_head *head)
}
/*
- * resched_curr - mark rq's current task 'to be rescheduled now'.
+ * For preemption models other than PREEMPT_AUTO: always schedule
+ * eagerly.
*
- * On UP this means the setting of the need_resched flag, on SMP it
- * might also involve a cross-CPU call to trigger the scheduler on
- * the target CPU.
+ * For PREEMPT_AUTO: allow everything else to finish its time quanta, and
+ * mark for rescheduling at the next exit to user.
*/
-void resched_curr(struct rq *rq)
+static resched_t resched_opt_translate(struct task_struct *curr,
+ enum resched_opt opt)
+{
+ if (!IS_ENABLED(CONFIG_PREEMPT_AUTO))
+ return RESCHED_NOW;
+
+ return RESCHED_LAZY;
+}
+
+/*
+ * __resched_curr - mark rq's current task 'to be rescheduled now'.
+ *
+ * On UP this means the setting of the appropriate need_resched flag.
+ * On SMP, in addition it might also involve a cross-CPU call to
+ * trigger the scheduler on the target CPU.
+ */
+void __resched_curr(struct rq *rq, enum resched_opt opt)
{
struct task_struct *curr = rq->curr;
- resched_t rs = RESCHED_NOW;
+ resched_t rs;
int cpu;
lockdep_assert_rq_held(rq);
+ rs = resched_opt_translate(curr, opt);
+
/*
* TIF_NEED_RESCHED is the higher priority bit, so if it is already
* set, nothing more to be done.
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c9239c0b0095..7013bd054a2f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2464,7 +2464,17 @@ extern void init_sched_fair_class(void);
extern void reweight_task(struct task_struct *p, int prio);
-extern void resched_curr(struct rq *rq);
+enum resched_opt {
+ RESCHED_DEFAULT,
+};
+
+extern void __resched_curr(struct rq *rq, enum resched_opt opt);
+
+static inline void resched_curr(struct rq *rq)
+{
+ __resched_curr(rq, RESCHED_DEFAULT);
+}
+
extern void resched_cpu(int cpu);
extern struct rt_bandwidth def_rt_bandwidth;
--
2.31.1