[PATCHv3 2/3] arm64: entry: refactor EL1 interrupt entry logic

From: Pingfan Liu
Date: Thu Sep 30 2021 - 09:17:50 EST


From: Mark Rutland <mark.rutland@xxxxxxx>

Currently we distinguish IRQ and definitely-PNMI at entry/exit time
via the enter_el1_irq_or_nmi() and enter_el1_irq_or_nmi() helpers. In
subsequent patches we'll need to handle the two cases more distinctly
in the body of the exception handler.

To make this possible, this patch refactors el1_interrupt to be a
top-level dispatcher to separate handlers for the IRQ and PNMI cases,
removing the need for the enter_el1_irq_or_nmi() and
exit_el1_irq_or_nmi() helpers.

Note that since arm64_enter_nmi() calls __nmi_enter(), which
increments the preemt_count, we could never preempt when handling a
PNMI. We now only check for preemption in the IRQ case, which makes
this clearer.

There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@xxxxxxx>
Cc: "Paul E. McKenney" <paulmck@xxxxxxxxxx>
Cc: Catalin Marinas <catalin.marinas@xxxxxxx>
Cc: Will Deacon <will@xxxxxxxxxx>
Cc: Marc Zyngier <maz@xxxxxxxxxx>
Cc: Joey Gouly <joey.gouly@xxxxxxx>
Cc: Sami Tolvanen <samitolvanen@xxxxxxxxxx>
Cc: Julien Thierry <julien.thierry@xxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Yuichi Ito <ito-yuichi@xxxxxxxxxxx>
Cc: Pingfan Liu <kernelfans@xxxxxxxxx>
Cc: linux-kernel@xxxxxxxxxxxxxxx
To: linux-arm-kernel@xxxxxxxxxxxxxxxxxxx
---
arch/arm64/kernel/entry-common.c | 44 ++++++++++++++++----------------
1 file changed, 22 insertions(+), 22 deletions(-)

diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 32f9796c4ffe..5f1473319fb0 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -219,22 +219,6 @@ static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
lockdep_hardirqs_on(CALLER_ADDR0);
}

-static void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
-{
- if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
- arm64_enter_nmi(regs);
- else
- enter_from_kernel_mode(regs);
-}
-
-static void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
-{
- if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
- arm64_exit_nmi(regs);
- else
- exit_to_kernel_mode(regs);
-}
-
static void __sched arm64_preempt_schedule_irq(void)
{
lockdep_assert_irqs_disabled();
@@ -432,14 +416,19 @@ asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
}
}

-static void noinstr el1_interrupt(struct pt_regs *regs,
- void (*handler)(struct pt_regs *))
+static __always_inline void
+__el1_pnmi(struct pt_regs *regs, void (*handler)(struct pt_regs *))
{
- write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
-
- enter_el1_irq_or_nmi(regs);
+ arm64_enter_nmi(regs);
do_interrupt_handler(regs, handler);
+ arm64_exit_nmi(regs);
+}

+static __always_inline void
+__el1_interrupt(struct pt_regs *regs, void (*handler)(struct pt_regs *))
+{
+ enter_from_kernel_mode(regs);
+ do_interrupt_handler(regs, handler);
/*
* Note: thread_info::preempt_count includes both thread_info::count
* and thread_info::need_resched, and is not equivalent to
@@ -448,8 +437,19 @@ static void noinstr el1_interrupt(struct pt_regs *regs,
if (IS_ENABLED(CONFIG_PREEMPTION) &&
READ_ONCE(current_thread_info()->preempt_count) == 0)
arm64_preempt_schedule_irq();
+ exit_to_kernel_mode(regs);
+}
+
+static void noinstr el1_interrupt(struct pt_regs *regs,
+ void (*handler)(struct pt_regs *))
+{
+ write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
+
+ if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
+ __el1_pnmi(regs, handler);
+ else
+ __el1_interrupt(regs, handler);

- exit_el1_irq_or_nmi(regs);
}

asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
--
2.31.1