[patch 11/46] genirq/chip: Prepare for code reduction

From: Thomas Gleixner
Date: Thu Mar 13 2025 - 12:02:54 EST


The interrupt flow handlers have similar patterns to decide whether to
handle an interrupt or not.

Provide common helper functions to allow removal of duplicated code.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
---
kernel/irq/chip.c | 37 ++++++++++++++++++++++++++++---------
1 file changed, 28 insertions(+), 9 deletions(-)

--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -499,7 +499,7 @@ static bool irq_check_poll(struct irq_de
return irq_wait_for_poll(desc);
}

-static bool irq_may_run(struct irq_desc *desc)
+static bool irq_can_handle_pm(struct irq_desc *desc)
{
unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;

@@ -524,6 +524,25 @@ static bool irq_may_run(struct irq_desc
return irq_check_poll(desc);
}

+static inline bool irq_can_handle_actions(struct irq_desc *desc)
+{
+ desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
+
+ if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
+ desc->istate |= IRQS_PENDING;
+ return false;
+ }
+ return true;
+}
+
+static inline bool irq_can_handle(struct irq_desc *desc)
+{
+ if (!irq_can_handle_pm(desc))
+ return false;
+
+ return irq_can_handle_actions(desc);
+}
+
/**
* handle_simple_irq - Simple and software-decoded IRQs.
* @desc: the interrupt description structure for this irq
@@ -539,7 +558,7 @@ void handle_simple_irq(struct irq_desc *
{
raw_spin_lock(&desc->lock);

- if (!irq_may_run(desc))
+ if (!irq_can_handle_pm(desc))
goto out_unlock;

desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
@@ -574,7 +593,7 @@ void handle_untracked_irq(struct irq_des
{
raw_spin_lock(&desc->lock);

- if (!irq_may_run(desc))
+ if (!irq_can_handle_pm(desc))
goto out_unlock;

desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
@@ -630,7 +649,7 @@ void handle_level_irq(struct irq_desc *d
raw_spin_lock(&desc->lock);
mask_ack_irq(desc);

- if (!irq_may_run(desc))
+ if (!irq_can_handle_pm(desc))
goto out_unlock;

desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
@@ -695,7 +714,7 @@ void handle_fasteoi_irq(struct irq_desc
* can arrive on the new CPU before the original CPU has completed
* handling the previous one - it may need to be resent.
*/
- if (!irq_may_run(desc)) {
+ if (!irq_can_handle_pm(desc)) {
if (irqd_needs_resend_when_in_progress(&desc->irq_data))
desc->istate |= IRQS_PENDING;
goto out;
@@ -790,7 +809,7 @@ void handle_edge_irq(struct irq_desc *de

desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);

- if (!irq_may_run(desc)) {
+ if (!irq_can_handle_pm(desc)) {
desc->istate |= IRQS_PENDING;
mask_ack_irq(desc);
goto out_unlock;
@@ -854,7 +873,7 @@ void handle_edge_eoi_irq(struct irq_desc

desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);

- if (!irq_may_run(desc)) {
+ if (!irq_can_handle_pm(desc)) {
desc->istate |= IRQS_PENDING;
goto out_eoi;
}
@@ -1213,7 +1232,7 @@ void handle_fasteoi_ack_irq(struct irq_d

raw_spin_lock(&desc->lock);

- if (!irq_may_run(desc))
+ if (!irq_can_handle_pm(desc))
goto out;

desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
@@ -1265,7 +1284,7 @@ void handle_fasteoi_mask_irq(struct irq_
raw_spin_lock(&desc->lock);
mask_ack_irq(desc);

- if (!irq_may_run(desc))
+ if (!irq_can_handle_pm(desc))
goto out;

desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);