Re: [PATCH 09/10] arm64: entry: Use split preemption logic

From: Jinjie Ruan

Date: Tue Apr 07 2026 - 21:52:36 EST




On 2026/4/7 21:16, Mark Rutland wrote:
> The generic irqentry code now provides
> irqentry_exit_to_kernel_mode_preempt() and
> irqentry_exit_to_kernel_mode_after_preempt(), which can be used
> where architectures have different state requirements for involuntary
> preemption and exception return, as is the case on arm64.
>
> Use the new functions on arm64, aligning our exit to kernel mode logic
> with the style of our exit to user mode logic. This removes the need for
> the recently-added bodge in arch_irqentry_exit_need_resched(), and
> allows preemption to occur when returning from any exception taken from
> kernel mode, which is nicer for RT.
>
> In an ideal world, we'd remove arch_irqentry_exit_need_resched(), and
> fold the conditionality directly into the architecture-specific entry
> code. That way all the logic necessary to avoid preempting from a
> pseudo-NMI could be constrained specifically to the EL1 IRQ/FIQ paths,
> avoiding redundant work for other exceptions, and making the flow a bit
> clearer. At present it looks like that would require a larger
> refactoring (e.g. for the PREEMPT_DYNAMIC logic), and so I've left that
> as-is for now.
>
> Signed-off-by: Mark Rutland <mark.rutland@xxxxxxx>
> Cc: Andy Lutomirski <luto@xxxxxxxxxx>
> Cc: Catalin Marinas <catalin.marinas@xxxxxxx>
> Cc: Jinjie Ruan <ruanjinjie@xxxxxxxxxx>
> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
> Cc: Thomas Gleixner <tglx@xxxxxxxxxx>
> Cc: Vladimir Murzin <vladimir.murzin@xxxxxxx>
> Cc: Will Deacon <will@xxxxxxxxxx>
> ---
> arch/arm64/include/asm/entry-common.h | 21 ++++++++-------------
> arch/arm64/kernel/entry-common.c | 12 ++++--------
> 2 files changed, 12 insertions(+), 21 deletions(-)
>
> diff --git a/arch/arm64/include/asm/entry-common.h b/arch/arm64/include/asm/entry-common.h
> index 20f0a7c7bde15..cab8cd78f6938 100644
> --- a/arch/arm64/include/asm/entry-common.h
> +++ b/arch/arm64/include/asm/entry-common.h
> @@ -29,19 +29,14 @@ static __always_inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
>
> static inline bool arch_irqentry_exit_need_resched(void)
> {
> - if (system_uses_irq_prio_masking()) {
> - /*
> - * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
> - * priority masking is used the GIC irqchip driver will clear DAIF.IF
> - * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
> - * DAIF we must have handled an NMI, so skip preemption.
> - */
> - if (read_sysreg(daif))
> - return false;
> - } else {
> - if (read_sysreg(daif) & (PSR_D_BIT | PSR_A_BIT))
> - return false;
> - }
> + /*
> + * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
> + * priority masking is used the GIC irqchip driver will clear DAIF.IF
> + * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
> + * DAIF we must have handled an NMI, so skip preemption.
> + */
> + if (system_uses_irq_prio_masking() && read_sysreg(daif))
> + return false;
>
> /*
> * Preempting a task from an IRQ means we leave copies of PSTATE
> diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
> index 16a65987a6a9b..f42ce7b5c67f3 100644
> --- a/arch/arm64/kernel/entry-common.c
> +++ b/arch/arm64/kernel/entry-common.c
> @@ -54,8 +54,11 @@ static noinstr irqentry_state_t arm64_enter_from_kernel_mode(struct pt_regs *reg
> static void noinstr arm64_exit_to_kernel_mode(struct pt_regs *regs,
> irqentry_state_t state)
> {
> + local_irq_disable();
> + irqentry_exit_to_kernel_mode_preempt(regs, state);
> + local_daif_mask();
> mte_check_tfsr_exit();
> - irqentry_exit_to_kernel_mode(regs, state);
> + irqentry_exit_to_kernel_mode_after_preempt(regs, state);
> }

Reviewed-by: Jinjie Ruan <ruanjinjie@xxxxxxxxxx>

>
> /*
> @@ -301,7 +304,6 @@ static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
> state = arm64_enter_from_kernel_mode(regs);
> local_daif_inherit(regs);
> do_mem_abort(far, esr, regs);
> - local_daif_mask();
> arm64_exit_to_kernel_mode(regs, state);
> }
>
> @@ -313,7 +315,6 @@ static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
> state = arm64_enter_from_kernel_mode(regs);
> local_daif_inherit(regs);
> do_sp_pc_abort(far, esr, regs);
> - local_daif_mask();
> arm64_exit_to_kernel_mode(regs, state);
> }
>
> @@ -324,7 +325,6 @@ static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
> state = arm64_enter_from_kernel_mode(regs);
> local_daif_inherit(regs);
> do_el1_undef(regs, esr);
> - local_daif_mask();
> arm64_exit_to_kernel_mode(regs, state);
> }
>
> @@ -335,7 +335,6 @@ static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
> state = arm64_enter_from_kernel_mode(regs);
> local_daif_inherit(regs);
> do_el1_bti(regs, esr);
> - local_daif_mask();
> arm64_exit_to_kernel_mode(regs, state);
> }
>
> @@ -346,7 +345,6 @@ static void noinstr el1_gcs(struct pt_regs *regs, unsigned long esr)
> state = arm64_enter_from_kernel_mode(regs);
> local_daif_inherit(regs);
> do_el1_gcs(regs, esr);
> - local_daif_mask();
> arm64_exit_to_kernel_mode(regs, state);
> }
>
> @@ -357,7 +355,6 @@ static void noinstr el1_mops(struct pt_regs *regs, unsigned long esr)
> state = arm64_enter_from_kernel_mode(regs);
> local_daif_inherit(regs);
> do_el1_mops(regs, esr);
> - local_daif_mask();
> arm64_exit_to_kernel_mode(regs, state);
> }
>
> @@ -423,7 +420,6 @@ static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
> state = arm64_enter_from_kernel_mode(regs);
> local_daif_inherit(regs);
> do_el1_fpac(regs, esr);
> - local_daif_mask();
> arm64_exit_to_kernel_mode(regs, state);
> }
>