[tip: locking/core] locking/rtmutex: Add context analysis

From: tip-bot2 for Peter Zijlstra

Date: Mon Mar 09 2026 - 15:51:34 EST


The following commit has been merged into the locking/core branch of tip:

Commit-ID: 90bb681dcdf7e69c90b56a18f06c0389a0810b92
Gitweb: https://git.kernel.org/tip/90bb681dcdf7e69c90b56a18f06c0389a0810b92
Author: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
AuthorDate: Tue, 20 Jan 2026 18:17:50 +01:00
Committer: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
CommitterDate: Sun, 08 Mar 2026 11:06:53 +01:00

locking/rtmutex: Add context analysis

Add compiler context analysis annotations.

Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Link: https://patch.msgid.link/20260121111213.851599178@xxxxxxxxxxxxx
---
include/linux/rtmutex.h | 8 +++----
kernel/locking/Makefile | 2 ++-
kernel/locking/rtmutex.c | 18 ++++++++++++++-
kernel/locking/rtmutex_api.c | 2 ++-
kernel/locking/rtmutex_common.h | 27 ++++++++++++++++-------
kernel/locking/ww_mutex.h | 20 ++++++++++++-----
kernel/locking/ww_rt_mutex.c | 1 +-
scripts/context-analysis-suppression.txt | 1 +-
8 files changed, 61 insertions(+), 18 deletions(-)

diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index ede4c6b..78e7e58 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -22,8 +22,8 @@ extern int max_lock_depth;

struct rt_mutex_base {
raw_spinlock_t wait_lock;
- struct rb_root_cached waiters;
- struct task_struct *owner;
+ struct rb_root_cached waiters __guarded_by(&wait_lock);
+ struct task_struct *owner __guarded_by(&wait_lock);
};

#define __RT_MUTEX_BASE_INITIALIZER(rtbasename) \
@@ -41,7 +41,7 @@ struct rt_mutex_base {
*/
static inline bool rt_mutex_base_is_locked(struct rt_mutex_base *lock)
{
- return READ_ONCE(lock->owner) != NULL;
+ return data_race(READ_ONCE(lock->owner) != NULL);
}

#ifdef CONFIG_RT_MUTEXES
@@ -49,7 +49,7 @@ static inline bool rt_mutex_base_is_locked(struct rt_mutex_base *lock)

static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
{
- unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
+ unsigned long owner = (unsigned long) data_race(READ_ONCE(lock->owner));

return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS);
}
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index 264447d..0c07de7 100644
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
@@ -4,6 +4,8 @@
KCOV_INSTRUMENT := n

CONTEXT_ANALYSIS_mutex.o := y
+CONTEXT_ANALYSIS_rtmutex_api.o := y
+CONTEXT_ANALYSIS_ww_rt_mutex.o := y

obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o

diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index c80902e..ccaba61 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -94,6 +94,7 @@ static inline int __ww_mutex_check_kill(struct rt_mutex *lock,

static __always_inline struct task_struct *
rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner)
+ __must_hold(&lock->wait_lock)
{
unsigned long val = (unsigned long)owner;

@@ -105,6 +106,7 @@ rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner)

static __always_inline void
rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
+ __must_hold(&lock->wait_lock)
{
/*
* lock->wait_lock is held but explicit acquire semantics are needed
@@ -114,12 +116,14 @@ rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
}

static __always_inline void rt_mutex_clear_owner(struct rt_mutex_base *lock)
+ __must_hold(&lock->wait_lock)
{
/* lock->wait_lock is held so the unlock provides release semantics. */
WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL));
}

static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
+ __must_hold(&lock->wait_lock)
{
lock->owner = (struct task_struct *)
((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
@@ -127,6 +131,7 @@ static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)

static __always_inline void
fixup_rt_mutex_waiters(struct rt_mutex_base *lock, bool acquire_lock)
+ __must_hold(&lock->wait_lock)
{
unsigned long owner, *p = (unsigned long *) &lock->owner;

@@ -328,6 +333,7 @@ static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
}

static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
+ __must_hold(&lock->wait_lock)
{
lock->owner = (struct task_struct *)
((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
@@ -1206,6 +1212,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
struct ww_acquire_ctx *ww_ctx,
enum rtmutex_chainwalk chwalk,
struct wake_q_head *wake_q)
+ __must_hold(&lock->wait_lock)
{
struct task_struct *owner = rt_mutex_owner(lock);
struct rt_mutex_waiter *top_waiter = waiter;
@@ -1249,6 +1256,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,

/* Check whether the waiter should back out immediately */
rtm = container_of(lock, struct rt_mutex, rtmutex);
+ __assume_ctx_lock(&rtm->rtmutex.wait_lock);
res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx, wake_q);
if (res) {
raw_spin_lock(&task->pi_lock);
@@ -1356,6 +1364,7 @@ static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh,
}

static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
+ __must_hold(&lock->wait_lock)
{
int ret = try_to_take_rt_mutex(lock, current, NULL);

@@ -1505,7 +1514,7 @@ static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
* - the VCPU on which owner runs is preempted
*/
if (!owner_on_cpu(owner) || need_resched() ||
- !rt_mutex_waiter_is_top_waiter(lock, waiter)) {
+ !data_race(rt_mutex_waiter_is_top_waiter(lock, waiter))) {
res = false;
break;
}
@@ -1538,6 +1547,7 @@ static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
*/
static void __sched remove_waiter(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter)
+ __must_hold(&lock->wait_lock)
{
bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
struct task_struct *owner = rt_mutex_owner(lock);
@@ -1613,6 +1623,8 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
struct task_struct *owner;
int ret = 0;

+ __assume_ctx_lock(&rtm->rtmutex.wait_lock);
+
lockevent_inc(rtmutex_slow_block);
for (;;) {
/* Try to acquire the lock: */
@@ -1658,6 +1670,7 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
struct rt_mutex_base *lock,
struct rt_mutex_waiter *w)
+ __must_hold(&lock->wait_lock)
{
/*
* If the result is not -EDEADLOCK or the caller requested
@@ -1694,11 +1707,13 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
enum rtmutex_chainwalk chwalk,
struct rt_mutex_waiter *waiter,
struct wake_q_head *wake_q)
+ __must_hold(&lock->wait_lock)
{
struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
struct ww_mutex *ww = ww_container_of(rtm);
int ret;

+ __assume_ctx_lock(&rtm->rtmutex.wait_lock);
lockdep_assert_held(&lock->wait_lock);
lockevent_inc(rtmutex_slowlock);

@@ -1750,6 +1765,7 @@ static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
struct ww_acquire_ctx *ww_ctx,
unsigned int state,
struct wake_q_head *wake_q)
+ __must_hold(&lock->wait_lock)
{
struct rt_mutex_waiter waiter;
int ret;
diff --git a/kernel/locking/rtmutex_api.c b/kernel/locking/rtmutex_api.c
index 59dbd29..124219a 100644
--- a/kernel/locking/rtmutex_api.c
+++ b/kernel/locking/rtmutex_api.c
@@ -526,6 +526,7 @@ static __always_inline int __mutex_lock_common(struct mutex *lock,
unsigned int subclass,
struct lockdep_map *nest_lock,
unsigned long ip)
+ __acquires(lock) __no_context_analysis
{
int ret;

@@ -647,6 +648,7 @@ EXPORT_SYMBOL(mutex_trylock);
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */

void __sched mutex_unlock(struct mutex *lock)
+ __releases(lock) __no_context_analysis
{
mutex_release(&lock->dep_map, _RET_IP_);
__rt_mutex_unlock(&lock->rtmutex);
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index cf6ddd1..c38b7bd 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -79,12 +79,18 @@ struct rt_wake_q_head {
* PI-futex support (proxy locking functions, etc.):
*/
extern void rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
- struct task_struct *proxy_owner);
-extern void rt_mutex_proxy_unlock(struct rt_mutex_base *lock);
+ struct task_struct *proxy_owner)
+ __must_hold(&lock->wait_lock);
+
+extern void rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
+ __must_hold(&lock->wait_lock);
+
extern int __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task,
- struct wake_q_head *);
+ struct wake_q_head *)
+ __must_hold(&lock->wait_lock);
+
extern int rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task);
@@ -94,8 +100,9 @@ extern int rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter);

-extern int rt_mutex_futex_trylock(struct rt_mutex_base *l);
-extern int __rt_mutex_futex_trylock(struct rt_mutex_base *l);
+extern int rt_mutex_futex_trylock(struct rt_mutex_base *lock);
+extern int __rt_mutex_futex_trylock(struct rt_mutex_base *lock)
+ __must_hold(&lock->wait_lock);

extern void rt_mutex_futex_unlock(struct rt_mutex_base *lock);
extern bool __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
@@ -109,6 +116,7 @@ extern void rt_mutex_postunlock(struct rt_wake_q_head *wqh);
*/
#ifdef CONFIG_RT_MUTEXES
static inline int rt_mutex_has_waiters(struct rt_mutex_base *lock)
+ __must_hold(&lock->wait_lock)
{
return !RB_EMPTY_ROOT(&lock->waiters.rb_root);
}
@@ -120,6 +128,7 @@ static inline int rt_mutex_has_waiters(struct rt_mutex_base *lock)
*/
static inline bool rt_mutex_waiter_is_top_waiter(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter)
+ __must_hold(&lock->wait_lock)
{
struct rb_node *leftmost = rb_first_cached(&lock->waiters);

@@ -127,6 +136,7 @@ static inline bool rt_mutex_waiter_is_top_waiter(struct rt_mutex_base *lock,
}

static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex_base *lock)
+ __must_hold(&lock->wait_lock)
{
struct rb_node *leftmost = rb_first_cached(&lock->waiters);
struct rt_mutex_waiter *w = NULL;
@@ -170,9 +180,10 @@ enum rtmutex_chainwalk {

static inline void __rt_mutex_base_init(struct rt_mutex_base *lock)
{
- raw_spin_lock_init(&lock->wait_lock);
- lock->waiters = RB_ROOT_CACHED;
- lock->owner = NULL;
+ scoped_guard (raw_spinlock_init, &lock->wait_lock) {
+ lock->waiters = RB_ROOT_CACHED;
+ lock->owner = NULL;
+ }
}

/* Debug functions */
diff --git a/kernel/locking/ww_mutex.h b/kernel/locking/ww_mutex.h
index c50ea5d..b1834ab 100644
--- a/kernel/locking/ww_mutex.h
+++ b/kernel/locking/ww_mutex.h
@@ -4,6 +4,7 @@

#define MUTEX mutex
#define MUTEX_WAITER mutex_waiter
+#define WAIT_LOCK wait_lock

static inline struct mutex_waiter *
__ww_waiter_first(struct mutex *lock)
@@ -86,9 +87,11 @@ static inline void lockdep_assert_wait_lock_held(struct mutex *lock)

#define MUTEX rt_mutex
#define MUTEX_WAITER rt_mutex_waiter
+#define WAIT_LOCK rtmutex.wait_lock

static inline struct rt_mutex_waiter *
__ww_waiter_first(struct rt_mutex *lock)
+ __must_hold(&lock->rtmutex.wait_lock)
{
struct rb_node *n = rb_first(&lock->rtmutex.waiters.rb_root);
if (!n)
@@ -116,6 +119,7 @@ __ww_waiter_prev(struct rt_mutex *lock, struct rt_mutex_waiter *w)

static inline struct rt_mutex_waiter *
__ww_waiter_last(struct rt_mutex *lock)
+ __must_hold(&lock->rtmutex.wait_lock)
{
struct rb_node *n = rb_last(&lock->rtmutex.waiters.rb_root);
if (!n)
@@ -137,21 +141,25 @@ __ww_mutex_owner(struct rt_mutex *lock)

static inline bool
__ww_mutex_has_waiters(struct rt_mutex *lock)
+ __must_hold(&lock->rtmutex.wait_lock)
{
return rt_mutex_has_waiters(&lock->rtmutex);
}

static inline void lock_wait_lock(struct rt_mutex *lock, unsigned long *flags)
+ __acquires(&lock->rtmutex.wait_lock)
{
raw_spin_lock_irqsave(&lock->rtmutex.wait_lock, *flags);
}

static inline void unlock_wait_lock(struct rt_mutex *lock, unsigned long *flags)
+ __releases(&lock->rtmutex.wait_lock)
{
raw_spin_unlock_irqrestore(&lock->rtmutex.wait_lock, *flags);
}

static inline void lockdep_assert_wait_lock_held(struct rt_mutex *lock)
+ __must_hold(&lock->rtmutex.wait_lock)
{
lockdep_assert_held(&lock->rtmutex.wait_lock);
}
@@ -304,7 +312,7 @@ static bool __ww_mutex_wound(struct MUTEX *lock,
struct ww_acquire_ctx *ww_ctx,
struct ww_acquire_ctx *hold_ctx,
struct wake_q_head *wake_q)
- __must_hold(&lock->wait_lock)
+ __must_hold(&lock->WAIT_LOCK)
{
struct task_struct *owner = __ww_mutex_owner(lock);

@@ -369,7 +377,7 @@ static bool __ww_mutex_wound(struct MUTEX *lock,
static void
__ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx,
struct wake_q_head *wake_q)
- __must_hold(&lock->wait_lock)
+ __must_hold(&lock->WAIT_LOCK)
{
struct MUTEX_WAITER *cur;

@@ -396,6 +404,7 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
DEFINE_WAKE_Q(wake_q);
unsigned long flags;
+ bool has_waiters;

ww_mutex_lock_acquired(lock, ctx);

@@ -417,7 +426,8 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
* __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
* and/or !empty list.
*/
- if (likely(!__ww_mutex_has_waiters(&lock->base)))
+ has_waiters = data_race(__ww_mutex_has_waiters(&lock->base));
+ if (likely(!has_waiters))
return;

/*
@@ -463,7 +473,7 @@ __ww_mutex_kill(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
static inline int
__ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
struct ww_acquire_ctx *ctx)
- __must_hold(&lock->wait_lock)
+ __must_hold(&lock->WAIT_LOCK)
{
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
@@ -514,7 +524,7 @@ __ww_mutex_add_waiter(struct MUTEX_WAITER *waiter,
struct MUTEX *lock,
struct ww_acquire_ctx *ww_ctx,
struct wake_q_head *wake_q)
- __must_hold(&lock->wait_lock)
+ __must_hold(&lock->WAIT_LOCK)
{
struct MUTEX_WAITER *cur, *pos = NULL;
bool is_wait_die;
diff --git a/kernel/locking/ww_rt_mutex.c b/kernel/locking/ww_rt_mutex.c
index c7196de..e07fb3b 100644
--- a/kernel/locking/ww_rt_mutex.c
+++ b/kernel/locking/ww_rt_mutex.c
@@ -90,6 +90,7 @@ ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
EXPORT_SYMBOL(ww_mutex_lock_interruptible);

void __sched ww_mutex_unlock(struct ww_mutex *lock)
+ __no_context_analysis
{
struct rt_mutex *rtm = &lock->base;

diff --git a/scripts/context-analysis-suppression.txt b/scripts/context-analysis-suppression.txt
index fd8951d..1c51b61 100644
--- a/scripts/context-analysis-suppression.txt
+++ b/scripts/context-analysis-suppression.txt
@@ -24,6 +24,7 @@ src:*include/linux/mutex*.h=emit
src:*include/linux/rcupdate.h=emit
src:*include/linux/refcount.h=emit
src:*include/linux/rhashtable.h=emit
+src:*include/linux/rtmutex*.h=emit
src:*include/linux/rwlock*.h=emit
src:*include/linux/rwsem.h=emit
src:*include/linux/sched*=emit