[PATCH RFC 06/33] locking/mutex: Annotate struct mutex and mutex functions

From: Bart Van Assche
Date: Thu Feb 06 2025 - 12:53:28 EST


Inform the Clang thread-safety analyzer about mutex operations and also
about the meaning of the return value of the functions that operate on
mutexes.

Signed-off-by: Bart Van Assche <bvanassche@xxxxxxx>
---
include/linux/mutex.h | 46 +++++++++++++++++++++++--------------
include/linux/mutex_types.h | 5 ++--
kernel/locking/mutex.c | 12 ++++++++++
3 files changed, 44 insertions(+), 19 deletions(-)

diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 6c0a8a843a29..b7669baeffe9 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -21,6 +21,7 @@
#include <linux/debug_locks.h>
#include <linux/cleanup.h>
#include <linux/mutex_types.h>
+#include <linux/thread_safety.h>

struct device;

@@ -154,14 +155,18 @@ static inline int __devm_mutex_init(struct device *dev, struct mutex *lock)
* Also see Documentation/locking/mutex-design.rst.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
-extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
+void mutex_lock_nested(struct mutex *lock, unsigned int subclass) ACQUIRE(*lock);
+void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock)
+ ACQUIRE(*lock);

extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
- unsigned int subclass);
+ unsigned int subclass)
+ TRY_ACQUIRE(0, *lock);
extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
- unsigned int subclass);
-extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
+ unsigned int subclass)
+ TRY_ACQUIRE(0, *lock);
+extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
+ ACQUIRE(*lock);

#define mutex_lock(lock) mutex_lock_nested(lock, 0)
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
@@ -175,10 +180,11 @@ do { \
} while (0)

#else
-extern void mutex_lock(struct mutex *lock);
-extern int __must_check mutex_lock_interruptible(struct mutex *lock);
-extern int __must_check mutex_lock_killable(struct mutex *lock);
-extern void mutex_lock_io(struct mutex *lock);
+void mutex_lock(struct mutex *lock) ACQUIRE(*lock);
+int __must_check mutex_lock_interruptible(struct mutex *lock)
+ TRY_ACQUIRE(0, *lock);
+int __must_check mutex_lock_killable(struct mutex *lock) TRY_ACQUIRE(0, *lock);
+void mutex_lock_io(struct mutex *lock) ACQUIRE(*lock);

# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
@@ -193,13 +199,19 @@ extern void mutex_lock_io(struct mutex *lock);
*
* Returns 1 if the mutex has been acquired successfully, and 0 on contention.
*/
-extern int mutex_trylock(struct mutex *lock);
-extern void mutex_unlock(struct mutex *lock);
-
-bool atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
-
-DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
-DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
-DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T) == 0)
+int mutex_trylock(struct mutex *lock) TRY_ACQUIRE(1, *lock);
+void mutex_unlock(struct mutex *lock) RELEASE(*lock);
+
+bool atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
+ TRY_ACQUIRE(true, *lock);
+
+DEFINE_GUARD_ATTR(mutex, struct mutex *,
+ mutex_lock(_T),
+ ASSERT_CAPABILITY(*_T) NO_THREAD_SAFETY_ANALYSIS,
+ mutex_unlock(_T))
+DEFINE_GUARD_COND_ATTR(mutex, _try, mutex_trylock(_T),
+ ASSERT_CAPABILITY(*_T) NO_THREAD_SAFETY_ANALYSIS)
+DEFINE_GUARD_COND_ATTR(mutex, _intr, mutex_lock_interruptible(_T) == 0,
+ ASSERT_CAPABILITY(*_T) NO_THREAD_SAFETY_ANALYSIS)

#endif /* __LINUX_MUTEX_H */
diff --git a/include/linux/mutex_types.h b/include/linux/mutex_types.h
index fdf7f515fde8..59469956426a 100644
--- a/include/linux/mutex_types.h
+++ b/include/linux/mutex_types.h
@@ -6,6 +6,7 @@
#include <linux/lockdep_types.h>
#include <linux/osq_lock.h>
#include <linux/spinlock_types.h>
+#include <linux/thread_safety.h>
#include <linux/types.h>

#ifndef CONFIG_PREEMPT_RT
@@ -38,7 +39,7 @@
* - detects multi-task circular deadlocks and prints out all affected
* locks and tasks (and only those tasks)
*/
-struct mutex {
+struct CAPABILITY("mutex") mutex {
atomic_long_t owner;
raw_spinlock_t wait_lock;
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -59,7 +60,7 @@ struct mutex {
*/
#include <linux/rtmutex.h>

-struct mutex {
+struct CAPABILITY("mutex") mutex {
struct rt_mutex_base rtmutex;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 0af175f5f031..e44becde2610 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -254,6 +254,7 @@ static void __sched __mutex_lock_slowpath(struct mutex *lock);
* This function is similar to (but not equivalent to) down().
*/
void __sched mutex_lock(struct mutex *lock)
+ NO_THREAD_SAFETY_ANALYSIS
{
might_sleep();

@@ -515,6 +516,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
* This function is similar to (but not equivalent to) up().
*/
void __sched mutex_unlock(struct mutex *lock)
+ NO_THREAD_SAFETY_ANALYSIS
{
#ifndef CONFIG_DEBUG_LOCK_ALLOC
if (__mutex_unlock_fast(lock))
@@ -536,6 +538,7 @@ EXPORT_SYMBOL(mutex_unlock);
* of a unlocked mutex is not allowed.
*/
void __sched ww_mutex_unlock(struct ww_mutex *lock)
+ NO_THREAD_SAFETY_ANALYSIS
{
__ww_mutex_unlock(lock);
mutex_unlock(&lock->base);
@@ -751,6 +754,7 @@ __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
* A mutex acquired with this function must be released with ww_mutex_unlock.
*/
int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
+ NO_THREAD_SAFETY_ANALYSIS
{
if (!ww_ctx)
return mutex_trylock(&ww->base);
@@ -778,6 +782,7 @@ EXPORT_SYMBOL(ww_mutex_trylock);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __sched
mutex_lock_nested(struct mutex *lock, unsigned int subclass)
+ NO_THREAD_SAFETY_ANALYSIS
{
__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
}
@@ -786,6 +791,7 @@ EXPORT_SYMBOL_GPL(mutex_lock_nested);

void __sched
_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
+ NO_THREAD_SAFETY_ANALYSIS
{
__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
}
@@ -807,6 +813,7 @@ EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);

void __sched
mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
+ NO_THREAD_SAFETY_ANALYSIS
{
int token;

@@ -957,6 +964,7 @@ __mutex_lock_interruptible_slowpath(struct mutex *lock);
* signal arrived.
*/
int __sched mutex_lock_interruptible(struct mutex *lock)
+ NO_THREAD_SAFETY_ANALYSIS
{
might_sleep();

@@ -981,6 +989,7 @@ EXPORT_SYMBOL(mutex_lock_interruptible);
* fatal signal arrived.
*/
int __sched mutex_lock_killable(struct mutex *lock)
+ NO_THREAD_SAFETY_ANALYSIS
{
might_sleep();

@@ -1061,6 +1070,7 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
* mutex must be released by the same task that acquired it.
*/
int __sched mutex_trylock(struct mutex *lock)
+ NO_THREAD_SAFETY_ANALYSIS
{
bool locked;

@@ -1077,6 +1087,7 @@ EXPORT_SYMBOL(mutex_trylock);
#ifndef CONFIG_DEBUG_LOCK_ALLOC
int __sched
ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+ NO_THREAD_SAFETY_ANALYSIS
{
might_sleep();

@@ -1092,6 +1103,7 @@ EXPORT_SYMBOL(ww_mutex_lock);

int __sched
ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+ NO_THREAD_SAFETY_ANALYSIS
{
might_sleep();