[PATCH 8/9] irqentry: define irqentry_exit_allow_resched()

From: Ankur Arora
Date: Mon Apr 03 2023 - 01:24:03 EST


Allow threads marked TIF_ALLOW_RESCHED to be rescheduled in irqexit.

This is only necessary under !preempt_model_preemptible() for which
we reuse the same logic as irqentry_exit_code_resched().

Signed-off-by: Ankur Arora <ankur.a.arora@xxxxxxxxxx>
---
kernel/entry/common.c | 8 ++++++++
kernel/sched/core.c | 36 +++++++++++++++++++++---------------
2 files changed, 29 insertions(+), 15 deletions(-)

diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index be61332c66b5..f1005595ebe7 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -390,6 +390,9 @@ void raw_irqentry_exit_cond_resched(void)
preempt_schedule_irq();
}
}
+
+void irqentry_exit_allow_resched(void) __alias(raw_irqentry_exit_cond_resched);
+
#ifdef CONFIG_PREEMPT_DYNAMIC
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
DEFINE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
@@ -431,6 +434,11 @@ noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
instrumentation_begin();
if (IS_ENABLED(CONFIG_PREEMPTION))
irqentry_exit_cond_resched();
+ /*
+ * We care about this clause only in the dynamic !preemptible case.
+ */
+ if (unlikely(!preempt_model_preemptible() && resched_allowed()))
+ irqentry_exit_allow_resched();

/* Covers both tracing and lockdep */
trace_hardirqs_on();
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0d18c3969f90..11845a91b691 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6500,6 +6500,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
* - explicit schedule() call
* - return from syscall or exception to user-space
* - return from interrupt-handler to user-space
+ * - return from interrupt-handler with the task having set
+ * TIF_RESCHED_ALLOW
*
* WARNING: must be called with preemption disabled!
*/
@@ -8597,28 +8599,32 @@ EXPORT_SYMBOL(__cond_resched_rwlock_write);
* SC:preempt_schedule
* SC:preempt_schedule_notrace
* SC:irqentry_exit_cond_resched
+ * SC:irqentry_exit_allow_resched
*
*
* NONE:
- * cond_resched <- __cond_resched
- * might_resched <- RET0
- * preempt_schedule <- NOP
- * preempt_schedule_notrace <- NOP
- * irqentry_exit_cond_resched <- NOP
+ * cond_resched <- __cond_resched
+ * might_resched <- RET0
+ * preempt_schedule <- NOP
+ * preempt_schedule_notrace <- NOP
+ * irqentry_exit_cond_resched <- NOP
+ * irqentry_exit_allow_resched <- irqentry_exit_allow_resched
*
* VOLUNTARY:
- * cond_resched <- __cond_resched
- * might_resched <- __cond_resched
- * preempt_schedule <- NOP
- * preempt_schedule_notrace <- NOP
- * irqentry_exit_cond_resched <- NOP
+ * cond_resched <- __cond_resched
+ * might_resched <- __cond_resched
+ * preempt_schedule <- NOP
+ * preempt_schedule_notrace <- NOP
+ * irqentry_exit_cond_resched <- NOP
+ * irqentry_exit_allow_resched <- irqentry_exit_allow_resched
*
* FULL:
- * cond_resched <- RET0
- * might_resched <- RET0
- * preempt_schedule <- preempt_schedule
- * preempt_schedule_notrace <- preempt_schedule_notrace
- * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
+ * cond_resched <- RET0
+ * might_resched <- RET0
+ * preempt_schedule <- preempt_schedule
+ * preempt_schedule_notrace <- preempt_schedule_notrace
+ * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
+ * irqentry_exit_allow_resched <- NOP
*/

enum {
--
2.31.1