[tip: locking/core] locking/mutex: Remove the list_head from struct mutex
From: tip-bot2 for Matthew Wilcox (Oracle)
Date: Mon Mar 09 2026 - 15:49:20 EST
The following commit has been merged into the locking/core branch of tip:
Commit-ID: 25500ba7e77ce9d3d9b5a1929d41a2ee2e23f6fe
Gitweb: https://git.kernel.org/tip/25500ba7e77ce9d3d9b5a1929d41a2ee2e23f6fe
Author: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
AuthorDate: Thu, 05 Mar 2026 19:55:43
Committer: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
CommitterDate: Sun, 08 Mar 2026 11:06:52 +01:00
locking/mutex: Remove the list_head from struct mutex
Instead of embedding a list_head in struct mutex, store a pointer to
the first waiter. The list of waiters remains a doubly linked list so
we can efficiently add to the tail of the list, remove from the front
(or middle) of the list.
Some of the list manipulation becomes more complicated, but it's a
reasonable tradeoff on the slow paths to shrink data structures which
embed a mutex like struct file.
Some of the debug checks have to be deleted because there's no equivalent
to checking them in the new scheme (eg an empty waiter->list now means
that it is the only waiter, not that the waiter is no longer on the list).
Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Link: https://patch.msgid.link/20260305195545.3707590-4-willy@xxxxxxxxxxxxx
---
include/linux/mutex.h | 2 +-
include/linux/mutex_types.h | 2 +-
kernel/locking/mutex-debug.c | 5 +----
kernel/locking/mutex.c | 49 +++++++++++++++++++----------------
kernel/locking/ww_mutex.h | 25 +++++-------------
5 files changed, 37 insertions(+), 46 deletions(-)
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 2f648ee..c471b12 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -79,7 +79,7 @@ do { \
#define __MUTEX_INITIALIZER(lockname) \
{ .owner = ATOMIC_LONG_INIT(0) \
, .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
- , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
+ , .first_waiter = NULL \
__DEBUG_MUTEX_INITIALIZER(lockname) \
__DEP_MAP_MUTEX_INITIALIZER(lockname) }
diff --git a/include/linux/mutex_types.h b/include/linux/mutex_types.h
index 8097593..a8f119f 100644
--- a/include/linux/mutex_types.h
+++ b/include/linux/mutex_types.h
@@ -44,7 +44,7 @@ context_lock_struct(mutex) {
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
struct optimistic_spin_queue osq; /* Spinner MCS lock */
#endif
- struct list_head wait_list;
+ struct mutex_waiter *first_waiter;
#ifdef CONFIG_DEBUG_MUTEXES
void *magic;
#endif
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index 2c6b02d..94930d5 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -37,9 +37,8 @@ void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter)
{
lockdep_assert_held(&lock->wait_lock);
- DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list));
+ DEBUG_LOCKS_WARN_ON(!lock->first_waiter);
DEBUG_LOCKS_WARN_ON(waiter->magic != waiter);
- DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
}
void debug_mutex_free_waiter(struct mutex_waiter *waiter)
@@ -62,7 +61,6 @@ void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
{
struct mutex *blocked_on = __get_task_blocked_on(task);
- DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
DEBUG_LOCKS_WARN_ON(waiter->task != task);
DEBUG_LOCKS_WARN_ON(blocked_on && blocked_on != lock);
@@ -74,7 +72,6 @@ void debug_mutex_unlock(struct mutex *lock)
{
if (likely(debug_locks)) {
DEBUG_LOCKS_WARN_ON(lock->magic != lock);
- DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
}
}
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index c867f6c..95f1822 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -47,7 +47,7 @@ static void __mutex_init_generic(struct mutex *lock)
{
atomic_long_set(&lock->owner, 0);
raw_spin_lock_init(&lock->wait_lock);
- INIT_LIST_HEAD(&lock->wait_list);
+ lock->first_waiter = NULL;
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
osq_lock_init(&lock->osq);
#endif
@@ -194,33 +194,42 @@ static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
atomic_long_andnot(flag, &lock->owner);
}
-static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
-{
- return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
-}
-
/*
* Add @waiter to a given location in the lock wait_list and set the
* FLAG_WAITERS flag if it's the first waiter.
*/
static void
__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
- struct list_head *list)
+ struct mutex_waiter *first)
{
hung_task_set_blocker(lock, BLOCKER_TYPE_MUTEX);
debug_mutex_add_waiter(lock, waiter, current);
- list_add_tail(&waiter->list, list);
- if (__mutex_waiter_is_first(lock, waiter))
+ if (!first)
+ first = lock->first_waiter;
+
+ if (first) {
+ list_add_tail(&waiter->list, &first->list);
+ } else {
+ INIT_LIST_HEAD(&waiter->list);
+ lock->first_waiter = waiter;
__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
+ }
}
static void
__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
{
- list_del(&waiter->list);
- if (likely(list_empty(&lock->wait_list)))
+ if (list_empty(&waiter->list)) {
__mutex_clear_flag(lock, MUTEX_FLAGS);
+ lock->first_waiter = NULL;
+ } else {
+ if (lock->first_waiter == waiter) {
+ lock->first_waiter = list_first_entry(&waiter->list,
+ struct mutex_waiter, list);
+ }
+ list_del(&waiter->list);
+ }
debug_mutex_remove_waiter(lock, waiter, current);
hung_task_clear_blocker();
@@ -340,7 +349,7 @@ bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
* Similarly, stop spinning if we are no longer the
* first waiter.
*/
- if (waiter && !__mutex_waiter_is_first(lock, waiter))
+ if (waiter && lock->first_waiter != waiter)
return false;
return true;
@@ -645,7 +654,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
if (!use_ww_ctx) {
/* add waiting tasks to the end of the waitqueue (FIFO): */
- __mutex_add_waiter(lock, &waiter, &lock->wait_list);
+ __mutex_add_waiter(lock, &waiter, NULL);
} else {
/*
* Add in stamp order, waking up waiters that must kill
@@ -691,7 +700,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
schedule_preempt_disabled();
- first = __mutex_waiter_is_first(lock, &waiter);
+ first = lock->first_waiter == &waiter;
/*
* As we likely have been woken up by task
@@ -734,8 +743,7 @@ acquired:
* Wound-Wait; we stole the lock (!first_waiter), check the
* waiters as anyone might want to wound us.
*/
- if (!ww_ctx->is_wait_die &&
- !__mutex_waiter_is_first(lock, &waiter))
+ if (!ww_ctx->is_wait_die && lock->first_waiter != &waiter)
__ww_mutex_check_waiters(lock, ww_ctx, &wake_q);
}
@@ -931,6 +939,7 @@ EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
{
struct task_struct *next = NULL;
+ struct mutex_waiter *waiter;
DEFINE_WAKE_Q(wake_q);
unsigned long owner;
unsigned long flags;
@@ -962,12 +971,8 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
raw_spin_lock_irqsave(&lock->wait_lock, flags);
debug_mutex_unlock(lock);
- if (!list_empty(&lock->wait_list)) {
- /* get the first entry from the wait-list: */
- struct mutex_waiter *waiter =
- list_first_entry(&lock->wait_list,
- struct mutex_waiter, list);
-
+ waiter = lock->first_waiter;
+ if (waiter) {
next = waiter->task;
debug_mutex_wake_waiter(lock, waiter);
diff --git a/kernel/locking/ww_mutex.h b/kernel/locking/ww_mutex.h
index 31a785a..a0847e9 100644
--- a/kernel/locking/ww_mutex.h
+++ b/kernel/locking/ww_mutex.h
@@ -8,20 +8,14 @@
static inline struct mutex_waiter *
__ww_waiter_first(struct mutex *lock)
{
- struct mutex_waiter *w;
-
- w = list_first_entry(&lock->wait_list, struct mutex_waiter, list);
- if (list_entry_is_head(w, &lock->wait_list, list))
- return NULL;
-
- return w;
+ return lock->first_waiter;
}
static inline struct mutex_waiter *
__ww_waiter_next(struct mutex *lock, struct mutex_waiter *w)
{
w = list_next_entry(w, list);
- if (list_entry_is_head(w, &lock->wait_list, list))
+ if (lock->first_waiter == w)
return NULL;
return w;
@@ -31,7 +25,7 @@ static inline struct mutex_waiter *
__ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w)
{
w = list_prev_entry(w, list);
- if (list_entry_is_head(w, &lock->wait_list, list))
+ if (lock->first_waiter == w)
return NULL;
return w;
@@ -40,22 +34,17 @@ __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w)
static inline struct mutex_waiter *
__ww_waiter_last(struct mutex *lock)
{
- struct mutex_waiter *w;
-
- w = list_last_entry(&lock->wait_list, struct mutex_waiter, list);
- if (list_entry_is_head(w, &lock->wait_list, list))
- return NULL;
+ struct mutex_waiter *w = lock->first_waiter;
+ if (w)
+ w = list_prev_entry(w, list);
return w;
}
static inline void
__ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_waiter *pos)
{
- struct list_head *p = &lock->wait_list;
- if (pos)
- p = &pos->list;
- __mutex_add_waiter(lock, waiter, p);
+ __mutex_add_waiter(lock, waiter, pos);
}
static inline struct task_struct *