[PATCH 3/4] arm64: Implement IRQ exit preemption static call for dynamic preemption

From: Frederic Weisbecker
Date: Mon Sep 20 2021 - 19:38:25 EST


arm64 doesn't support generic entry yet, so the architecture's own IRQ
exit preemption path needs to be exposed through the relevant static
call.

Signed-off-by: Frederic Weisbecker <frederic@xxxxxxxxxx>
Cc: Mark Rutland <mark.rutland@xxxxxxx>
Cc: Quentin Perret <qperret@xxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Catalin Marinas <catalin.marinas@xxxxxxx>
Cc: James Morse <james.morse@xxxxxxx>
Cc: Will Deacon <will@xxxxxxxxxx>
Cc: Ard Biesheuvel <ardb@xxxxxxxxxx>
---
arch/arm64/include/asm/preempt.h | 7 +++++++
arch/arm64/kernel/entry-common.c | 15 ++++++++++++---
2 files changed, 19 insertions(+), 3 deletions(-)

diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
index e83f0982b99c..4fbbe644532f 100644
--- a/arch/arm64/include/asm/preempt.h
+++ b/arch/arm64/include/asm/preempt.h
@@ -3,6 +3,7 @@
#define __ASM_PREEMPT_H

#include <linux/thread_info.h>
+#include <linux/static_call_types.h>

#define PREEMPT_NEED_RESCHED BIT(32)
#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)
@@ -86,4 +87,10 @@ void preempt_schedule_notrace(void);
#define __preempt_schedule_notrace() preempt_schedule_notrace()
#endif /* CONFIG_PREEMPTION */

+#ifdef CONFIG_PREEMPT_DYNAMIC
+void arm64_preempt_schedule_irq(void);
+#define __irqentry_exit_cond_resched_func arm64_preempt_schedule_irq
+DECLARE_STATIC_CALL(irqentry_exit_cond_resched, __irqentry_exit_cond_resched_func);
+#endif /* CONFIG_PREEMPT_DYNAMIC */
+
#endif /* __ASM_PREEMPT_H */
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 32f9796c4ffe..f1c739dd874d 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -12,6 +12,7 @@
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/thread_info.h>
+#include <linux/static_call.h>

#include <asm/cpufeature.h>
#include <asm/daifflags.h>
@@ -235,7 +236,7 @@ static void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
exit_to_kernel_mode(regs);
}

-static void __sched arm64_preempt_schedule_irq(void)
+void __sched arm64_preempt_schedule_irq(void)
{
lockdep_assert_irqs_disabled();

@@ -259,6 +260,9 @@ static void __sched arm64_preempt_schedule_irq(void)
if (system_capabilities_finalized())
preempt_schedule_irq();
}
+#ifdef CONFIG_PREEMPT_DYNAMIC
+DEFINE_STATIC_CALL(irqentry_exit_cond_resched, arm64_preempt_schedule_irq);
+#endif

static void do_interrupt_handler(struct pt_regs *regs,
void (*handler)(struct pt_regs *))
@@ -446,8 +450,13 @@ static void noinstr el1_interrupt(struct pt_regs *regs,
* preempt_count().
*/
if (IS_ENABLED(CONFIG_PREEMPTION) &&
- READ_ONCE(current_thread_info()->preempt_count) == 0)
- arm64_preempt_schedule_irq();
+ READ_ONCE(current_thread_info()->preempt_count) == 0) {
+#ifdef CONFIG_PREEMPT_DYNAMIC
+ static_call(irqentry_exit_cond_resched)();
+#else
+ arm64_preempt_schedule_irq();
+#endif
+ }

exit_el1_irq_or_nmi(regs);
}
--
2.25.1