[PATCH RT 03/19] genirq: Do not invoke the affinity callback via a workqueue on RT

From: zanussi
Date: Thu Aug 08 2019 - 15:54:44 EST


From: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx>

v4.14.137-rt65-rc1 stable review patch.
If anyone has any objections, please let me know.

-----------


[ Upstream commit 2122adbe011cdc0eb62ad62494e181005b23c76a ]

Joe Korty reported, that __irq_set_affinity_locked() schedules a
workqueue while holding a rawlock which results in a might_sleep()
warning.
This patch uses swork_queue() instead.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx>
Signed-off-by: Tom Zanussi <zanussi@xxxxxxxxxx>

Conflicts:
include/linux/interrupt.h
kernel/irq/manage.c
---
include/linux/interrupt.h | 5 ++---
kernel/irq/manage.c | 19 ++++---------------
2 files changed, 6 insertions(+), 18 deletions(-)

diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 0f25fa19b2d8..233e3c027f53 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -15,7 +15,7 @@
#include <linux/hrtimer.h>
#include <linux/kref.h>
#include <linux/workqueue.h>
-#include <linux/swork.h>
+#include <linux/kthread.h>

#include <linux/atomic.h>
#include <asm/ptrace.h>
@@ -230,7 +230,6 @@ extern void resume_device_irqs(void);
* struct irq_affinity_notify - context for notification of IRQ affinity changes
* @irq: Interrupt to which notification applies
* @kref: Reference count, for internal use
- * @swork: Swork item, for internal use
* @work: Work item, for internal use
* @notify: Function to be called on change. This will be
* called in process context.
@@ -243,7 +242,7 @@ struct irq_affinity_notify {
unsigned int irq;
struct kref kref;
#ifdef CONFIG_PREEMPT_RT_BASE
- struct swork_event swork;
+ struct kthread_work work;
#else
struct work_struct work;
#endif
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index f9415590661c..3d5b33fe874b 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -228,7 +228,7 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
kref_get(&desc->affinity_notify->kref);

#ifdef CONFIG_PREEMPT_RT_BASE
- swork_queue(&desc->affinity_notify->swork);
+ kthread_schedule_work(&desc->affinity_notify->work);
#else
schedule_work(&desc->affinity_notify->work);
#endif
@@ -293,21 +293,11 @@ static void _irq_affinity_notify(struct irq_affinity_notify *notify)
}

#ifdef CONFIG_PREEMPT_RT_BASE
-static void init_helper_thread(void)
-{
- static int init_sworker_once;
-
- if (init_sworker_once)
- return;
- if (WARN_ON(swork_get()))
- return;
- init_sworker_once = 1;
-}

-static void irq_affinity_notify(struct swork_event *swork)
+static void irq_affinity_notify(struct kthread_work *work)
{
struct irq_affinity_notify *notify =
- container_of(swork, struct irq_affinity_notify, swork);
+ container_of(work, struct irq_affinity_notify, work);
_irq_affinity_notify(notify);
}

@@ -350,8 +340,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
notify->irq = irq;
kref_init(&notify->kref);
#ifdef CONFIG_PREEMPT_RT_BASE
- INIT_SWORK(&notify->swork, irq_affinity_notify);
- init_helper_thread();
+ kthread_init_work(&notify->work, irq_affinity_notify);
#else
INIT_WORK(&notify->work, irq_affinity_notify);
#endif
--
2.14.1