[RFC PATCH 1/3] workqueue: Add interface for user-defined workqueue lockdep map

From: Matthew Brost
Date: Tue Jul 30 2024 - 18:17:19 EST


Add an interface for a user-defined workqueue lockdep map, which is
helpful when multiple workqueues are created for the same purpose. This
also helps avoid leaking lockdep maps on each workqueue creation.

Implement a new workqueue flag, WQ_USER_OWNED_LOCKDEP, to indicate that
the user will set up the workqueue lockdep map using the new function
wq_init_user_lockdep_map.

Cc: Tejun Heo <tj@xxxxxxxxxx>
Cc: Lai Jiangshan <jiangshanlai@xxxxxxxxx>
Signed-off-by: Matthew Brost <matthew.brost@xxxxxxxxx>
---
include/linux/workqueue.h | 3 +++
kernel/workqueue.c | 44 ++++++++++++++++++++++++++++++++-------
2 files changed, 40 insertions(+), 7 deletions(-)

diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index d9968bfc8eac..3e6db0889e2b 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -223,6 +223,8 @@ struct execute_work {
};

#ifdef CONFIG_LOCKDEP
+void wq_init_user_lockdep_map(struct workqueue_struct *wq,
+ struct lockdep_map *lockdep_map);
/*
* NB: because we have to copy the lockdep_map, setting _key
* here is required, otherwise it could get initialised to the
@@ -401,6 +403,7 @@ enum wq_flags {
* http://thread.gmane.org/gmane.linux.kernel/1480396
*/
WQ_POWER_EFFICIENT = 1 << 7,
+ WQ_USER_OWNED_LOCKDEP = 1 << 8, /* allow users to define lockdep map */

__WQ_DESTROYING = 1 << 15, /* internal: workqueue is destroying */
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 3fbaecfc88c2..228b52b8d7c4 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -366,7 +366,8 @@ struct workqueue_struct {
#ifdef CONFIG_LOCKDEP
char *lock_name;
struct lock_class_key key;
- struct lockdep_map lockdep_map;
+ struct lockdep_map __lockdep_map;
+ struct lockdep_map *lockdep_map;
#endif
char name[WQ_NAME_LEN]; /* I: workqueue name */

@@ -3220,7 +3221,7 @@ __acquires(&pool->lock)
lockdep_start_depth = lockdep_depth(current);
/* see drain_dead_softirq_workfn() */
if (!bh_draining)
- lock_map_acquire(&pwq->wq->lockdep_map);
+ lock_map_acquire(pwq->wq->lockdep_map);
lock_map_acquire(&lockdep_map);
/*
* Strictly speaking we should mark the invariant state without holding
@@ -3254,7 +3255,7 @@ __acquires(&pool->lock)
pwq->stats[PWQ_STAT_COMPLETED]++;
lock_map_release(&lockdep_map);
if (!bh_draining)
- lock_map_release(&pwq->wq->lockdep_map);
+ lock_map_release(pwq->wq->lockdep_map);

if (unlikely((worker->task && in_atomic()) ||
lockdep_depth(current) != lockdep_start_depth ||
@@ -3892,8 +3893,8 @@ static void touch_wq_lockdep_map(struct workqueue_struct *wq)
if (wq->flags & WQ_BH)
local_bh_disable();

- lock_map_acquire(&wq->lockdep_map);
- lock_map_release(&wq->lockdep_map);
+ lock_map_acquire(wq->lockdep_map);
+ lock_map_release(wq->lockdep_map);

if (wq->flags & WQ_BH)
local_bh_enable();
@@ -3927,7 +3928,8 @@ void __flush_workqueue(struct workqueue_struct *wq)
struct wq_flusher this_flusher = {
.list = LIST_HEAD_INIT(this_flusher.list),
.flush_color = -1,
- .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
+ .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done,
+ (*wq->lockdep_map)),
};
int next_color;

@@ -4778,26 +4780,54 @@ static int init_worker_pool(struct worker_pool *pool)
}

#ifdef CONFIG_LOCKDEP
+/**
+ * wq_init_user_lockdep_map - init user lockdep map for workqueue
+ * @wq: workqueue to init lockdep map for
+ * @lockdep_map: lockdep map to use for workqueue
+ *
+ * Initialize workqueue with a user defined lockdep map. WQ_USER_OWNED_LOCKDEP
+ * must be set for workqueue.
+ */
+void wq_init_user_lockdep_map(struct workqueue_struct *wq,
+ struct lockdep_map *lockdep_map)
+{
+ if (WARN_ON_ONCE(!(wq->flags & WQ_USER_OWNED_LOCKDEP)))
+ return;
+
+ wq->lockdep_map = lockdep_map;
+}
+EXPORT_SYMBOL_GPL(wq_init_user_lockdep_map);
+
static void wq_init_lockdep(struct workqueue_struct *wq)
{
char *lock_name;

+ if (wq->flags & WQ_USER_OWNED_LOCKDEP)
+ return;
+
lockdep_register_key(&wq->key);
lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
if (!lock_name)
lock_name = wq->name;

wq->lock_name = lock_name;
- lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
+ wq->lockdep_map = &wq->__lockdep_map;
+ lockdep_init_map(wq->lockdep_map, lock_name, &wq->key, 0);
}

static void wq_unregister_lockdep(struct workqueue_struct *wq)
{
+ if (wq->flags & WQ_USER_OWNED_LOCKDEP)
+ return;
+
lockdep_unregister_key(&wq->key);
}

static void wq_free_lockdep(struct workqueue_struct *wq)
{
+ if (wq->flags & WQ_USER_OWNED_LOCKDEP)
+ return;
+
if (wq->lock_name != wq->name)
kfree(wq->lock_name);
}
--
2.34.1