[PATCH v3] genirq/cpuhotplug: notify about irq affinity change for offlined cpus.

From: Imran Khan

Date: Tue Jan 13 2026 - 09:37:35 EST


During cpu offlining the irqs affined to that cpu are moved
to other cpu, but this affinity change is not accounted for by
irq_desc::affinity_notify (if available).
This can leave users, of irq_set_affinity_notifier, with old
affinity information.
Avoid this by allowing to schedule affinity change notification
work for irqs that were affined to the cpu being offlined.

Also since irq_set_affinity_locked uses the same logic to
schedule affinity change notification work, split out this
logic in a dedicated function and use that at both the places.

Signed-off-by: Imran Khan <imran.f.khan@xxxxxxxxxx>
---
v2 -> v3:
- Address review comments from Thomas
- Removed RFC tag
v1 -> v2:
- Fix compilation error due to missed parenthesis around scoped_guard

include/linux/interrupt.h | 2 ++
kernel/irq/cpuhotplug.c | 6 ++++--
kernel/irq/manage.c | 28 ++++++++++++++++++++--------
3 files changed, 26 insertions(+), 10 deletions(-)

diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 266f2b39213a0..6f93bf524f728 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -383,6 +383,7 @@ irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
const struct irq_affinity *affd);

+extern void schedule_affinity_notify_work(struct irq_desc *desc);
#else /* CONFIG_SMP */

static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
@@ -445,6 +446,7 @@ irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
return maxvec;
}

+static inline void schedule_affinity_notify_work(struct irq_desc *desc) { }
#endif /* CONFIG_SMP */

/*
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index 755346ea98196..c77a3b95fba85 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -177,9 +177,11 @@ void irq_migrate_all_off_this_cpu(void)
bool affinity_broken;

desc = irq_to_desc(irq);
- scoped_guard(raw_spinlock, &desc->lock)
+ scoped_guard(raw_spinlock, &desc->lock) {
affinity_broken = migrate_one_irq(desc);
-
+ if (affinity_broken && desc->affinity_notify)
+ schedule_affinity_notify_work(desc);
+ }
if (affinity_broken) {
pr_debug_ratelimited("IRQ %u: no longer affine to CPU%u\n",
irq, smp_processor_id());
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 349ae7979da0e..6440a64f65a7b 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -139,6 +139,23 @@ void synchronize_irq(unsigned int irq)
}
EXPORT_SYMBOL(synchronize_irq);

+/**
+ * schedule_affinity_notify_work - Schedule work to notify
+ * about irq affinity change.
+ * @desc: irq descriptor whose affinity changed
+ *
+ * Caller needs to hold desc->lock
+ */
+void schedule_affinity_notify_work(struct irq_desc *desc)
+{
+ kref_get(&desc->affinity_notify->kref);
+ if (!schedule_work(&desc->affinity_notify->work))
+ /* Work was already scheduled, drop our extra ref */
+ kref_put(&desc->affinity_notify->kref,
+ desc->affinity_notify->release);
+}
+EXPORT_SYMBOL_GPL(schedule_affinity_notify_work);
+
#ifdef CONFIG_SMP
cpumask_var_t irq_default_affinity;

@@ -367,14 +384,9 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
irq_copy_pending(desc, mask);
}

- if (desc->affinity_notify) {
- kref_get(&desc->affinity_notify->kref);
- if (!schedule_work(&desc->affinity_notify->work)) {
- /* Work was already scheduled, drop our extra ref */
- kref_put(&desc->affinity_notify->kref,
- desc->affinity_notify->release);
- }
- }
+ if (desc->affinity_notify)
+ schedule_affinity_notify_work(desc);
+
irqd_set(data, IRQD_AFFINITY_SET);

return ret;

base-commit: f8f9c1f4d0c7a64600e2ca312dec824a0bc2f1da
--
2.34.1